diff --git a/.gitattributes b/.gitattributes index 72d49dd59eed2f7ea132ac64b30b34fac6a62751..e93f9d48b477ea695121f369ad2a7229587f5571 100644 --- a/.gitattributes +++ b/.gitattributes @@ -177,3 +177,4 @@ lib/python3.10/site-packages/babel/locale-data/blo.dat filter=lfs diff=lfs merge lib/python3.10/site-packages/babel/locale-data/ia.dat filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/babel/locale-data/lt.dat filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/babel/locale-data/lb.dat filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/babel/locale-data/am.dat filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/babel/locale-data/am.dat b/lib/python3.10/site-packages/babel/locale-data/am.dat new file mode 100644 index 0000000000000000000000000000000000000000..50fd1c7221bdd4c1b8fdcf319392ef9e751bed16 --- /dev/null +++ b/lib/python3.10/site-packages/babel/locale-data/am.dat @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd75f79121216fce34c5a41faf1f782348ab36afc893dc261c92bae289b5d96 +size 173260 diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h b/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h new file mode 100644 index 0000000000000000000000000000000000000000..6254ba47707bd5dc9c1bef8ea3528ca0be2ecd9b --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h @@ -0,0 +1,321 @@ +#pragma once + +#include +#include +#include +#include + +// Forward declare TI +namespace at { +class Tensor; +struct TensorIterator; + +namespace native { +enum class TransposeType; +} + +} + +namespace at::native { + +enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss}; + +#if AT_BUILD_WITH_LAPACK() +// Define per-batch functions to be used in the implementation of batched +// linear algebra operations + +template +void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info); + +template +void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info); + +template +void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info); + +template +void lapackGels(char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info); + +template +void lapackGelsd(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t* work, int lwork, + value_t *rwork, int* iwork, int *info); + +template +void lapackGelsy(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + int *jpvt, value_t rcond, int *rank, + scalar_t *work, int lwork, value_t* rwork, int *info); + +template +void lapackGelss(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t *work, int lwork, + value_t *rwork, int *info); + +template +struct lapackLstsq_impl; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGels( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsy( + m, n, nrhs, + a, lda, b, ldb, + jpvt, rcond, rank, + work, lwork, rwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsd( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, iwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelss( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, info); + } +}; + +template +void lapackLstsq( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackLstsq_impl::call( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info, + jpvt, rcond, rank, rwork, + s, + iwork); +} + +template +void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info); + +template +void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info); + +template +void lapackLdlHermitian( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSymmetric( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSolveHermitian( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackLdlSolveSymmetric( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info); +#endif + +#if AT_BUILD_WITH_BLAS() +template +void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb); +#endif + +using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/); +DECLARE_DISPATCH(cholesky_fn, cholesky_stub) + +using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/); + +DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub) + +using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/); + +DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub) + +using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(geqrf_fn, geqrf_stub) + +using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(orgqr_fn, orgqr_stub) + +using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/); +DECLARE_DISPATCH(ormqr_fn, ormqr_stub) + +using linalg_eigh_fn = void (*)( + const Tensor& /*eigenvalues*/, + const Tensor& /*eigenvectors*/, + const Tensor& /*infos*/, + bool /*upper*/, + bool /*compute_eigenvectors*/); +DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub) + +using lstsq_fn = void (*)( + const Tensor& /*a*/, + Tensor& /*b*/, + Tensor& /*rank*/, + Tensor& /*singular_values*/, + Tensor& /*infos*/, + double /*rcond*/, + std::string /*driver_name*/); +DECLARE_DISPATCH(lstsq_fn, lstsq_stub) + +using triangular_solve_fn = void (*)( + const Tensor& /*A*/, + const Tensor& /*B*/, + bool /*left*/, + bool /*upper*/, + TransposeType /*transpose*/, + bool /*unitriangular*/); +DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub) + +using lu_factor_fn = void (*)( + const Tensor& /*input*/, + const Tensor& /*pivots*/, + const Tensor& /*infos*/, + bool /*compute_pivots*/); +DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub) + +using unpack_pivots_fn = void(*)( + TensorIterator& iter, + const int64_t dim_size, + const int64_t max_pivot); +DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub) + +using lu_solve_fn = void (*)( + const Tensor& /*LU*/, + const Tensor& /*pivots*/, + const Tensor& /*B*/, + TransposeType /*trans*/); +DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub) + +using ldl_factor_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*info*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub) + +using svd_fn = void (*)( + const Tensor& /*A*/, + const bool /*full_matrices*/, + const bool /*compute_uv*/, + const std::optional& /*driver*/, + const Tensor& /*U*/, + const Tensor& /*S*/, + const Tensor& /*Vh*/, + const Tensor& /*info*/); +DECLARE_DISPATCH(svd_fn, svd_stub) + +using ldl_solve_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*result*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub) +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h b/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h new file mode 100644 index 0000000000000000000000000000000000000000..db9742e04021e6fa6942c540c28f4ca6ff90d5df --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h @@ -0,0 +1,13 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at::native { + +TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits::max()); + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h b/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h new file mode 100644 index 0000000000000000000000000000000000000000..fc8a5f1962d861c8e3725c5a1a4b3e8cd0d6532d --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h @@ -0,0 +1,479 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +// Implements instruction set specific function dispatch. +// +// Kernels that may make use of specialized instruction sets (e.g. AVX2) are +// compiled multiple times with different compiler flags (e.g. -mavx2). A +// DispatchStub contains a table of function pointers for a kernel. At runtime, +// the fastest available kernel is chosen based on the features reported by +// cpuinfo. +// +// Example: +// +// In native/MyKernel.h: +// using fn_type = void(*)(const Tensor& x); +// DECLARE_DISPATCH(fn_type, stub) +// +// In native/MyKernel.cpp +// DEFINE_DISPATCH(stub); +// +// In native/cpu/MyKernel.cpp: +// namespace { +// // use anonymous namespace so that different cpu versions won't conflict +// void kernel(const Tensor& x) { ... } +// } +// REGISTER_DISPATCH(stub, &kernel); +// +// To call: +// stub(kCPU, tensor); +// +// TODO: CPU instruction set selection should be folded into whatever +// the main dispatch mechanism is. +// +// Supported device types for registration: +// - CPU: Central Processing Unit +// - CUDA: NVIDIA GPUs +// - HIP: AMD GPUs +// - MPS: Apple Silicon GPUs (Metal Performance Shaders) +// - MTIA: Meta Training and Inference Devices +// - XPU: Intel GPUs +// - PrivateUse1: Reserved for private/custom device types +// +// If you want to update the list of supported devices, add a new dispatch_ptr +// member in DispatchStubImpl.h and update the get_call_ptr switch. +// As well you will need to update the inlined list in 'is_device_supported` +// +// +// ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere +C10_CLANG_DIAGNOSTIC_PUSH() +C10_CLANG_DIAGNOSTIC_IGNORE("-Wundefined-var-template") + +namespace at::native { + +enum class CPUCapability { + DEFAULT = 0, +#if defined(HAVE_VSX_CPU_DEFINITION) + VSX = 1, +#elif defined(HAVE_ZVECTOR_CPU_DEFINITION) + ZVECTOR = 1, +#elif defined(HAVE_SVE_CPU_DEFINITION) + SVE256 = 1, +#else + AVX2 = 1, + AVX512 = 2, +#endif + NUM_OPTIONS +}; + +// Enum for error types +enum class ErrorType { + MissingDeviceKernel, + DeviceNotSupported +}; + +// Alias for the return type using std::variant +using DispatchResult = std::variant; + +CPUCapability get_cpu_capability(); + +template +struct DispatchStub; + +/** + * The sole purpose of this class is to outline methods that don't need to be + * specialized or otherwise inlined and duplicated (by the compiler due to + * template expansion), since it causes size bloat if there are a significant + * number of specialization of the DispatchStub<> class. + */ +struct TORCH_API DispatchStubImpl { + + // The DispatchStubImpl::try_get_call_ptr() method is used to get the call + // pointer for a given device type. If the call pointer is not found, + // DispatchStubImpl::try_get_call_ptr() returns an ErrorType. + // The main difference between try_get_call_ptr() and get_call_ptr() is that + // try_get_call_ptr() will return the ErrorType and not raise an exception. + DispatchResult try_get_call_ptr( + c10::DeviceType device_type + , void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , void *SVE256 +#endif + ); + + // Analogous to try_get_call_ptr(), but it will return the ErrorType and not + // raise an exception. + DispatchResult try_choose_cpu_impl( + void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , void *SVE256 +#endif + ); + + + void* get_call_ptr( + c10::DeviceType device_type + , void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , void *SVE256 +#endif + ); + + /** + * The CPU Dispatch actual method is chosen in decreasing order of preference by + * DispatchStubImpl::choose_cpu_impl() in case none is found by + * DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr. + */ + void* choose_cpu_impl( + void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , void *SVE256 +#endif + ); + + // Fixing dispatch error in Windows debug builds. + // See https://github.com/pytorch/pytorch/issues/22681 for more details. + #if defined(_MSC_VER) && defined(_DEBUG) + std::atomic cpu_dispatch_ptr; + void* cuda_dispatch_ptr; + void* hip_dispatch_ptr; + void* mps_dispatch_ptr; + void* mtia_dispatch_ptr; + #if defined(USE_XPU) + void* xpu_dispatch_ptr; + #endif + void* privateuse1_dispatch_ptr; + #else + std::atomic cpu_dispatch_ptr{nullptr}; + void* cuda_dispatch_ptr = nullptr; + void* hip_dispatch_ptr = nullptr; + void* mps_dispatch_ptr = nullptr; + void* mtia_dispatch_ptr = nullptr; + #if defined(USE_XPU) + void* xpu_dispatch_ptr = nullptr; + #endif + void* privateuse1_dispatch_ptr = nullptr; + #endif +}; + +template +struct DispatchStub { + using FnPtr = rT (*) (Args...); + + DispatchStub() = default; + DispatchStub(const DispatchStub&) = delete; + DispatchStub& operator=(const DispatchStub&) = delete; + +private: + FnPtr get_call_ptr(const c10::DeviceType device_type) { + return reinterpret_cast( + impl.get_call_ptr(device_type + , reinterpret_cast(DEFAULT) +#ifdef HAVE_AVX512_CPU_DEFINITION + , reinterpret_cast(AVX512) +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , reinterpret_cast(AVX2) +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , reinterpret_cast(VSX) +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , reinterpret_cast(ZVECTOR) +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , reinterpret_cast(SVE256) +#endif + ) + ); + } + +public: + template + rT operator()(c10::DeviceType device_type, ArgTypes&&... args) { + FnPtr call_ptr = get_call_ptr(device_type); + return (*call_ptr)(std::forward(args)...); + } + + void set_cuda_dispatch_ptr(FnPtr fn_ptr) { + impl.cuda_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + #if defined(USE_XPU) + void set_xpu_dispatch_ptr(FnPtr fn_ptr){ + impl.xpu_dispatch_ptr = reinterpret_cast(fn_ptr); + } + #endif + + void set_hip_dispatch_ptr(FnPtr fn_ptr) { + impl.hip_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + void set_mps_dispatch_ptr(FnPtr fn_ptr) { + impl.mps_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + void set_mtia_dispatch_ptr(FnPtr fn_ptr) { + impl.mtia_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + void set_privateuse1_dispatch_ptr(FnPtr fn_ptr) { + impl.privateuse1_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + // Returns true if the dispatcher has a kernel registered for this device + // type. + bool is_device_supported(const c10::DeviceType device_type) { + auto result = impl.try_get_call_ptr(device_type + , reinterpret_cast(DEFAULT) +#ifdef HAVE_AVX512_CPU_DEFINITION + , reinterpret_cast(AVX512) +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , reinterpret_cast(AVX2) +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , reinterpret_cast(VSX) +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , reinterpret_cast(ZVECTOR) +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + , reinterpret_cast(SVE256) +#endif + ); + if (std::holds_alternative(result)){ + return false; + } + return true; + } + + static TORCH_API FnPtr DEFAULT; +#ifdef HAVE_AVX512_CPU_DEFINITION + static TORCH_API FnPtr AVX512; +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + static TORCH_API FnPtr AVX2; +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + static TORCH_API FnPtr VSX; +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + static TORCH_API FnPtr ZVECTOR; +#endif +#ifdef HAVE_SVE256_CPU_DEFINITION + static TORCH_API FnPtr SVE256; +#endif +private: + DispatchStubImpl impl; +}; + +namespace { +template +struct RegisterCUDADispatch { + RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_cuda_dispatch_ptr(value); + } +}; + +template +struct RegisterXPUDispatch { + RegisterXPUDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value){ + stub.set_xpu_dispatch_ptr(value); + } +}; + +template +struct RegisterMPSDispatch { + RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_mps_dispatch_ptr(value); + } +}; + +template +struct RegisterHIPDispatch { + RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + // TODO: make this point at hip_dispatch_ptr + stub.set_cuda_dispatch_ptr(value); + } +}; + +template +struct RegisterMTIADispatch { + RegisterMTIADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_mtia_dispatch_ptr(value); + } +}; + +template +struct RegisterPRIVATEUSE1Dispatch { + RegisterPRIVATEUSE1Dispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_privateuse1_dispatch_ptr(value); + } +}; + +} // anonymous namespace +// Compiler will complain if you put things like std::tuple in +// the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g., +// adding parentheses and using helper struct to get rid of the parentheses, do +// not work with MSVC. So do a `using`-declaration if you need to pass in such +// `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h. +#define DECLARE_DISPATCH(fn, name) \ + struct name##_DECLARE_DISPATCH_type : DispatchStub { \ + name##_DECLARE_DISPATCH_type() = default; \ + name##_DECLARE_DISPATCH_type(const name##_DECLARE_DISPATCH_type&) = delete; \ + name##_DECLARE_DISPATCH_type& operator=(const name##_DECLARE_DISPATCH_type&) = delete; \ + name##_DECLARE_DISPATCH_type(name##_DECLARE_DISPATCH_type&&) = delete; \ + name##_DECLARE_DISPATCH_type& operator=(name##_DECLARE_DISPATCH_type&&) = delete; \ + ~name##_DECLARE_DISPATCH_type() = default; \ + }; \ + extern TORCH_API struct name##_DECLARE_DISPATCH_type name; + +#define DEFINE_DISPATCH(name) struct name##_DECLARE_DISPATCH_type name + +#define REGISTER_ARCH_DISPATCH(name, arch, fn) \ + template <> name##_DECLARE_DISPATCH_type::FnPtr TORCH_API DispatchStub::arch = fn; + +#ifdef HAVE_AVX512_CPU_DEFINITION +#define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn) +#else +#define REGISTER_AVX512_DISPATCH(name, fn) +#endif + +#ifdef HAVE_AVX2_CPU_DEFINITION +#define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn) +#else +#define REGISTER_AVX2_DISPATCH(name, fn) +#endif + +#ifdef HAVE_VSX_CPU_DEFINITION +#define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn) +#else +#define REGISTER_VSX_DISPATCH(name, fn) +#endif + +#ifdef HAVE_ZVECTOR_CPU_DEFINITION +#define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn) +#else +#define REGISTER_ZVECTOR_DISPATCH(name, fn) +#endif + +#ifdef HAVE_SVE256_CPU_DEFINITION +#define REGISTER_SVE256_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, SVE256, fn) +#else +#define REGISTER_SVE256_DISPATCH(name, fn) +#endif + +// Macro to register the same kernel for all CPU arch types. This is useful +// if a kernel does not benefit from being recompiled across different arch types. +#define REGISTER_ALL_CPU_DISPATCH(name, fn) \ + REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \ + REGISTER_AVX512_DISPATCH(name, fn) \ + REGISTER_AVX2_DISPATCH(name, fn) \ + REGISTER_VSX_DISPATCH(name, fn) \ + REGISTER_ZVECTOR_DISPATCH(name, fn) \ + REGISTER_SVE256_DISPATCH(name, fn) + +#define REGISTER_NO_CPU_DISPATCH(name) \ + REGISTER_ALL_CPU_DISPATCH(name, nullptr) + +#define REGISTER_CUDA_DISPATCH(name, fn) \ + static RegisterCUDADispatch name ## __register(name, fn); + +#define REGISTER_XPU_DISPATCH(name, fn) \ + static RegisterXPUDispatch name ## __register(name, fn); + +#define REGISTER_HIP_DISPATCH(name, fn) \ + static RegisterHIPDispatch name ## __register(name, fn); + +#define REGISTER_MPS_DISPATCH(name, fn) \ + static RegisterMPSDispatch name ## __register(name, fn); + +#define REGISTER_MTIA_DISPATCH(name, fn) \ + static RegisterMTIADispatch name ## __register(name, fn); + +#define REGISTER_PRIVATEUSE1_DISPATCH(name, fn) \ + static RegisterPRIVATEUSE1Dispatch name ## __register(name, fn); + +// NB: This macro must be used in an actual 'cu' file; if you try using +// it from a 'cpp' file it will not work! +#if defined(__CUDACC__) +#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn) +#elif defined(__HIPCC__) +// TODO: cut this over to HIP dispatch once we stop pretending that CUDA +// is HIP in the PyTorch HIPify build. +#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn) +// #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn) +#elif defined(__OBJC__) && defined(USE_MPS) +// NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel +#define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn) +#elif defined(CPU_CAPABILITY) +// REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches. +// ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others. +// ALSO_REGISTER_SVE256_DISPATCH should be used for ensuring SVE256 dispatch, among others. +#ifdef CPU_CAPABILITY_AVX512 +#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, ((void*)(fn) ? nullptr : nullptr)) +#else +#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) +#endif +#define ALSO_REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) +#define ALSO_REGISTER_SVE256_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) +#endif +} // namespace at::native + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h b/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h new file mode 100644 index 0000000000000000000000000000000000000000..eb29e1171dcd60a5d9fbc5cb35b488603c1b0886 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h @@ -0,0 +1,153 @@ +#include +#include +#include + +#ifdef USE_FBGEMM +#include +#endif + +namespace at::native { + +enum class EmbeddingBagMode { + SUM = 0, + MEAN = 1, + MAX = 2, +}; + +[[maybe_unused]] static bool operator==(int64_t op1, EmbeddingBagMode op2) { + return op1 == static_cast(op2); +} + +[[maybe_unused]] static bool operator!=(int64_t op1, EmbeddingBagMode op2) { + return !(op1 == op2); +} + +void check_arguments( + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const std::optional& per_sample_weights, + bool include_last_offset); + +void make_bag_size_out( + Tensor& bag_size_out, + const Tensor& offsets, + const Tensor& indices, + const int64_t mode, + const bool include_last_offset, + const bool requires_grad); + +void make_max_indices_out( + Tensor& max_indices_out, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const Tensor& bag_size, + const int64_t mode, + bool include_last_offset); + +void make_offset2bag_out( + Tensor& offset2bag, + Tensor& output, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const std::optional& per_sample_weights, + const int64_t padding_idx = -1); + +#ifdef USE_FBGEMM + +template +struct _CallbackAndBlockSize { + using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature::Type; + + int64_t blockSize = -1; + TCallback callback = nullptr; + + static TCallback generateCallback(int64_t block_size) { + return fbgemm::GenerateEmbeddingSpMDM( + block_size, + has_weight, + /* normalize_by_lengths */false, + /* prefetch */16, + /* is_weight_positional */false, + /* use_offsets */true); + } + + _CallbackAndBlockSize() = default; + + explicit _CallbackAndBlockSize(std::optional maybe_block_size) + : blockSize(maybe_block_size.value_or(-1)) + , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr) + {} +}; + +template +struct _EmbeddingBagKernelCacheImpl : private StorageMixins... { + + _EmbeddingBagKernelCacheImpl() = default; + // use each of the mixins to store corresponding kernel and block size + explicit _EmbeddingBagKernelCacheImpl(std::optional maybe_block_size) + : StorageMixins(maybe_block_size)... + {} + + // this method is thread safe (call sites may call from different threads) + template + typename _CallbackAndBlockSize::TCallback + getCallback(int64_t block_size) const { + // if the cache doesn't store the kernel for the incoming block size + // (so it is different from the one stored in corresponding mixin) + // regenerate the kernel (not writing it into the cache so we avoid locks) + if (block_size != _CallbackAndBlockSize::blockSize) { + return _CallbackAndBlockSize::generateCallback(block_size); + } + // else retrieve the cached kernel from the corresponding mixin + return _CallbackAndBlockSize::callback; + } +}; + +// instantiate the cache with the list of storage mixins +// for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file +using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl< + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize>; +#else +struct _EmbeddingBagKernelCache { + explicit _EmbeddingBagKernelCache(std::optional /* maybe_block_size */) {} +}; +#endif + +void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag, + Tensor& bag_size, Tensor* max_indices, + const Tensor &weight, const Tensor &indices, + const Tensor &offsets, const int64_t mode = 0, + const std::optional& per_sample_weights = std::nullopt, + bool include_last_offset = false, + int64_t padding_idx = -1, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +void _embedding_bag_cpu_out( + at::Tensor& output, + at::Tensor& offset2bag, + at::Tensor& bag_size, + at::Tensor* p_max_indices, + const at::Tensor& weight, + const at::Tensor& indices, + const at::Tensor& offsets, + const bool scale_grad_by_freq, + const int64_t mode, + const bool sparse, + const std::optional& per_sample_weights, + const bool include_last_offset, + const std::optional& padding_idx, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/FusedAdam.h b/lib/python3.10/site-packages/torch/include/ATen/native/FusedAdam.h new file mode 100644 index 0000000000000000000000000000000000000000..db93f10bb95f168da44b24cb506ff5c04e3da06e --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/FusedAdam.h @@ -0,0 +1,27 @@ +#include +#include + +namespace at::native { + +enum class ADAM_MODE : uint8_t { ORIGINAL = 0, ADAMW = 1 }; + +using fused_adam_fn = void (*)( + const at::Tensor& param, + const at::Tensor& grad, + const at::Tensor& exp_avg, + const at::Tensor& exp_avg_sq, + const at::Tensor& max_exp_avg_sq, + const at::Tensor& state_step, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const float* grad_scale_ptr, + const ADAM_MODE); + +DECLARE_DISPATCH(fused_adam_fn, fused_adam_stub) + +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h b/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h new file mode 100644 index 0000000000000000000000000000000000000000..509a305fe4b5ed33c128b06fec8473816eeca46a --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h @@ -0,0 +1,298 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at::native { + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, +// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). +// if align_corners: -1 and +1 get sent to the centers of the corner pixels +// -1 --> 0 +// +1 --> (size - 1) +// scale_factor = (size - 1) / 2 +// if not align_corners: -1 and +1 get sent to the image edges +// -1 --> -0.5 +// +1 --> (size - 1) + 0.5 == size - 0.5 +// scale_factor = size / 2 +template +static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size, + bool align_corners) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + return ((coord + 1) * size - 1) / 2; + } +} + +// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize +// except that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size, + bool align_corners, scalar_t *grad_in) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + *grad_in = static_cast(size - 1) / 2; + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + *grad_in = static_cast(size) / 2; + return ((coord + 1) * size - 1) / 2; + } +} + +// Clips coordinates to between 0 and clip_limit - 1 +template +static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) { + return std::min(static_cast(clip_limit - 1), std::max(in, static_cast(0))); +} + +// clip_coordinates_set_grad works similarly to clip_coordinates except that +// it also returns the `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit, + scalar_t *grad_in) { + // Note that it is important for the gradient calculation that borders + // are considered out of bounds. + if (in <= static_cast(0)) { + *grad_in = static_cast(0); + return static_cast(0); + } else { + scalar_t max = static_cast(clip_limit - 1); + if (in >= max) { + *grad_in = static_cast(0); + return max; + } else { + *grad_in = static_cast(1); + return in; + } + } +} + +// Reflects coordinates until they fall between low and high (inclusive). +// The bounds are passed as twice their value so that half-integer values +// can be represented as ints. +template +static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low, + int64_t twice_high) { + if (twice_low == twice_high) { + return static_cast(0); + } + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = std::fabs(in - min); + // `fmod` returns same sign as `in`, which is positive after the `fabs` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + return extra + min; + } else { + return span - extra + min; + } +} + +// reflect_coordinates_set_grad works similarly to reflect_coordinates except +// that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low, + int64_t twice_high, scalar_t *grad_in) { + if (twice_low == twice_high) { + *grad_in = static_cast(0); + return static_cast(0); + } + int grad_in_mult_; + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = in - min; + if (in < static_cast(0)) { + grad_in_mult_ = -1; + in = -in; + } else { + grad_in_mult_ = 1; + } + // `fmod` returns same sign as `in`, which is positive after the `if` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + *grad_in = static_cast(grad_in_mult_); + return extra + min; + } else { + *grad_in = static_cast(-grad_in_mult_); + return span - extra + min; + } +} + +// Mapping the out-of-boundary points back into boundary +// This would only affect padding_mode=border or reflection +template +static inline scalar_t compute_coordinates(scalar_t coord, int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates(coord, 0, 2*(size - 1)); + } else { + coord = reflect_coordinates(coord, -1, 2*size - 1); + } + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } + return coord; +} + +// Computes the pixel source index value for a grid coordinate +template +static inline scalar_t grid_sampler_compute_source_index( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + coord = grid_sampler_unnormalize(coord, size, align_corners); + coord = compute_coordinates(coord, size, padding_mode, align_corners); + return coord; +} + +// grid_sampler_compute_source_index_set_grad works similarly to +// grid_sampler_compute_source_index except that it also returns the +// `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_compute_source_index_set_grad( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners, + scalar_t *grad_in) { + scalar_t grad_clip, grad_refl; + coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_clip; + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl); + } else { + coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl); + } + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_refl * grad_clip; + } + return coord; +} + +static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) { + return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; +} + +template +static inline scalar_t get_value_bounded( + const scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + if (within_bounds_2d(iy, ix, H, W)) { + return data[iy * sH + ix * sW]; + } + return static_cast(0); +} + +template +static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w, + int64_t sH, int64_t sW, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_2d(h, w, H, W)) { + data[h * sH + w * sW] += delta; + } +} + +template +static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w, + int64_t sD, int64_t sH, int64_t sW, + int64_t D, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_3d(d, h, w, D, H, W)) { + data[d * sD + h * sH + w * sW] += delta; + } +} + +template +static inline void add_value_bounded( + scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + scalar_t delta, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + safe_add_2d(data, iy, ix, sH, sW, H, W, delta); +} + +// Calculate the differential of the cubic convolution, i.e. `d coeff / d x` +template +static inline void get_cubic_coefficients_grad( + scalar_t coeffs[4], + scalar_t t) { + + // Must be the same as forward calculation in + // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients + scalar_t A = -0.75; + + scalar_t x; + x = -1 - t; // 1 < x = |-1 - tx| < 2 + coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A; + x = -t; // x = |0 - tx| <= 1 + coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 1 - t; // x = |1 - tx| <= 1 + coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 2 - t; // 1 < x = |2 - tx| < 2 + coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A; +} + +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h b/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h new file mode 100644 index 0000000000000000000000000000000000000000..97b0854d82d0a2fec6bb708db767d81273ec7bcc --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h @@ -0,0 +1,71 @@ +#pragma once + +namespace at { +// views and their in-place version ops +#define TORCH_VIEW_FNS(m) \ + m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \ + m.impl("detach", torch::CppFunction::makeFallthrough()); \ + m.impl("detach_", torch::CppFunction::makeFallthrough()); \ + m.impl("diagonal", torch::CppFunction::makeFallthrough()); \ + m.impl("expand", torch::CppFunction::makeFallthrough()); \ + m.impl("expand_as", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \ + m.impl("narrow", torch::CppFunction::makeFallthrough()); \ + m.impl("permute", torch::CppFunction::makeFallthrough()); \ + m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("select.int", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose_", torch::CppFunction::makeFallthrough()); \ + m.impl("t", torch::CppFunction::makeFallthrough()); \ + m.impl("t_", torch::CppFunction::makeFallthrough()); \ + m.impl("real", torch::CppFunction::makeFallthrough()); \ + m.impl("imag", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("unfold", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \ + m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapdims", torch::CppFunction::makeFallthrough()); \ + m.impl("chunk", torch::CppFunction::makeFallthrough()); \ + m.impl("reshape", torch::CppFunction::makeFallthrough()); \ + m.impl("alias", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \ + m.impl("resize_", torch::CppFunction::makeFallthrough()); + +#define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \ + m.impl("empty_like", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.out", torch::CppFunction::makeFallthrough()); \ + m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("full_like", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.int", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("size.int", torch::CppFunction::makeFallthrough()); \ + m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("is_complex", torch::CppFunction::makeFallthrough()); \ + m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \ + m.impl("requires_grad_", torch::CppFunction::makeFallthrough()); +} + +#define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \ + m.impl("as_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("view", torch::CppFunction::makeFallthrough()); diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h b/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h new file mode 100644 index 0000000000000000000000000000000000000000..a57d138e15511407ca7fa7d117400bf8436fe133 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace at { +class Tensor; +} + +namespace at::native { + +using reduce_all_fn = void (*)(Tensor & result, const Tensor & self); +using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self); +DECLARE_DISPATCH(reduce_all_fn, min_all_stub) +DECLARE_DISPATCH(reduce_all_fn, max_all_stub) + +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h b/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h new file mode 100644 index 0000000000000000000000000000000000000000..48ecf5e83d4ead792de475964e306627149826b5 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h @@ -0,0 +1,40 @@ +#pragma once + +#include + +namespace at::native { + +enum class ReductionType {MAX, MEAN, MIN, SUM, PROD}; + +inline ReductionType get_reduction_enum(const std::string_view& reduce) { + if (reduce == "max" || reduce == "amax") { + return ReductionType::MAX; + } else if (reduce == "mean") { + return ReductionType::MEAN; + } else if (reduce == "min" || reduce == "amin") { + return ReductionType::MIN; + } else if (reduce == "sum") { + return ReductionType::SUM; + } else if (reduce == "prod") { + return ReductionType::PROD; + } else { + TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce); + } +} + +// used for `scatter_reduce`, old options for BC. +inline ReductionType get_operator_enum(const std::string_view reduce, bool use_new_options) { + if (use_new_options) { + return get_reduction_enum(reduce); + } else { + if (reduce == "add") { + return ReductionType::SUM; + } else if (reduce == "multiply") { + return ReductionType::PROD; + } else { + TORCH_CHECK(false, "reduce argument must be either add or multiply.") + } + } +} + +} // at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h b/lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h new file mode 100644 index 0000000000000000000000000000000000000000..9dd28c39a141263e6bb6d92184a4b17576476096 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +namespace at { +class TensorBase; +} + +namespace at::native { + +enum class QUANTILE_INTERPOLATION_MODE : uint8_t { + LINEAR, + LOWER, + HIGHER, + MIDPOINT, + NEAREST +}; + +using sort_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, bool, bool); +using topk_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, int64_t, bool, bool); + +DECLARE_DISPATCH(sort_fn, sort_stub) +DECLARE_DISPATCH(topk_fn, topk_stub) + +void _fill_indices(const TensorBase &indices, int64_t dim); + +} // namespace at::native diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de606b9fbf58001178e402e172dc46edb2436b47 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a3ba55acc7140fbb9e608b2509c3953bbc6d19c7 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _empty_affine_quantized { + using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, double, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_empty_affine_quantized"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor"; + static at::Tensor call(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); +}; + +struct TORCH_API _empty_affine_quantized_out { + using schema = at::Tensor & (c10::SymIntArrayRef, double, int64_t, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_empty_affine_quantized"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d9d920b1104f28f71cf640b5aa4ee64ee84b5e6c --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional scale=::std::nullopt, bool enable_gqa=false); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b0cea2784d68620290551c825d38200b964d7ed6 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a445539e587c8fdb618afa159222015bb50eb9f1 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..149f406fbbb93a5c754370371c56b51f0fd221ab --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_get_values { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_nested_get_values"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_nested_get_values(Tensor(a) self) -> Tensor(a)"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36ca51f9fb8160e696c9bf5ffc33b875c4e600a7 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..a4ce1d94b630bce3071d939a6295730d995a9e9f --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor +inline at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride)); +} +namespace symint { + template >> + at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride)); + } +} + +// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor +inline at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); +} +namespace symint { + template >> + at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template >> + at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); +} +namespace symint { + template >> + at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); +} +namespace symint { + template >> + at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); + } +} + +// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); +} +namespace symint { + template >> + at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out); + } +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f8506ea85cc3b45b7b18dc0daa821ed72f746fa6 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad); +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e60b7ffb3ce4d6995b796085283fb862a4e780 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d7091acab41803c06cc607a8e9f5fd86edca4451 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _print { + using schema = void (c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_print"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_print(str s) -> ()"; + static void call(c10::string_view s); + static void redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view s); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..10c899858b407c7e62a280b8d43fbe6631e26dcc --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_attention_math { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, double, bool, const ::std::optional &, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_scaled_dot_product_attention_math"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor)"; + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask, double dropout_p, bool is_causal, const ::std::optional & dropout_mask, ::std::optional scale, bool enable_gqa); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask, double dropout_p, bool is_causal, const ::std::optional & dropout_mask, ::std::optional scale, bool enable_gqa); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cf0ca0a94635de54a68397cb5eb99baf9cd6cac5 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_flash_attention { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_scaled_dot_product_flash_attention"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)"; + static ::std::tuple call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional scale); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..350f1233b144b476a3b9db5fb01cf1ab5a1d7326 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_log_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_log_softmax"; + static constexpr const char* overload_name = "int"; + static constexpr const char* schema_str = "_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +struct TORCH_API _sparse_log_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_log_softmax"; + static constexpr const char* overload_name = "Dimname"; + static constexpr const char* schema_str = "_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional dtype); +}; + +struct TORCH_API _sparse_log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_log_softmax"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"; + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +struct TORCH_API _sparse_log_softmax_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_sparse_log_softmax"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fa8f86487430ac6d26d8e29a4a551dc09a3016f2 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_ambiguous_defaults_a { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_test_ambiguous_defaults"; + static constexpr const char* overload_name = "a"; + static constexpr const char* schema_str = "_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor"; + static at::Tensor call(const at::Tensor & dummy, int64_t a, int64_t b); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, int64_t b); +}; + +struct TORCH_API _test_ambiguous_defaults_b { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::_test_ambiguous_defaults"; + static constexpr const char* overload_name = "b"; + static constexpr const char* schema_str = "_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor"; + static at::Tensor call(const at::Tensor & dummy, int64_t a, c10::string_view b); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2525692d7a49f7d877fbefc81d74cda23951323 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__upsample_nearest_exact1d_backward_out_cpu : public at::meta::structured__upsample_nearest_exact1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, ::std::optional scales, const at::Tensor & grad_input); +}; +struct TORCH_API structured__upsample_nearest_exact1d_backward_out_cuda : public at::meta::structured__upsample_nearest_exact1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, ::std::optional scales, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4491fdd247be48c7d52b96254591a22014c558da --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1b3c8e5b72945c9ed2e4f4fd10155f81dec7e82 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace meta +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/all_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/all_native.h new file mode 100644 index 0000000000000000000000000000000000000000..272b358c4860353ca736499bccbac12f00881470 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/all_native.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_all_out : public at::meta::structured_all_dim { +void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_all(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor all_dims_default(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & all_dims_out_default(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out); +struct TORCH_API structured_all_dims_out : public at::meta::structured_all_dims { +void impl(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, const at::Tensor & out); +}; +TORCH_API at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out); +struct TORCH_API structured_all_all_out : public at::meta::structured_all { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward.h b/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..582bae27aebf50a594113224764e9eeffe22b299 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor) +inline ::std::tuple batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve) { + return at::_ops::batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve); +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bce6b86ce11e02596d401bd1542a325d6a831a7f --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API binary_cross_entropy_with_logits { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::binary_cross_entropy_with_logits"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction); +}; + +struct TORCH_API binary_cross_entropy_with_logits_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::binary_cross_entropy_with_logits"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward.h b/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac8937e21e0943e7ca35eff349fd053c14a72bf --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) +inline ::std::tuple conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad); +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..84324e29f311dcd5e218eb3d41d7536d644377dd --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & detach_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1273034a7b6d247ad06e47304e6ccfcdf55e4327 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7b157ce70b60ebafbc78c23b8a39582c996d54be --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_digamma_out : public at::meta::structured_digamma { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e4e34a5221d2ce9fd6450273996fbf396f853d78 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API div_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "div.Tensor(Tensor self, Tensor other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API div__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div_"; + static constexpr const char* overload_name = "Tensor"; + static constexpr const char* schema_str = "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API div_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API div_Tensor_mode { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Tensor_mode"; + static constexpr const char* schema_str = "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode); +}; + +struct TORCH_API div__Tensor_mode { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div_"; + static constexpr const char* overload_name = "Tensor_mode"; + static constexpr const char* schema_str = "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode); +}; + +struct TORCH_API div_out_mode { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "out_mode"; + static constexpr const char* schema_str = "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode, at::Tensor & out); +}; + +struct TORCH_API div_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "div.Scalar(Tensor self, Scalar other) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API div__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div_"; + static constexpr const char* overload_name = "Scalar"; + static constexpr const char* schema_str = "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API div_Scalar_mode { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Scalar_mode"; + static constexpr const char* schema_str = "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor"; + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode); +}; + +struct TORCH_API div__Scalar_mode { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div_"; + static constexpr const char* overload_name = "Scalar_mode"; + static constexpr const char* schema_str = "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode); +}; + +struct TORCH_API div_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Scalar_out"; + static constexpr const char* schema_str = "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API div_Scalar_mode_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::div"; + static constexpr const char* overload_name = "Scalar_mode_out"; + static constexpr const char* schema_str = "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8e2ad1c9d375706520a2cc9c98efd2d6c809e8b9 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API embedding_sparse_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::embedding_sparse_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor"; + static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8a8599208b31217cae97fa3f801065dd67c0b7a9 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor exp2(const at::Tensor & self); +TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & exp2_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0f6d2152aeda38e6ebccbadabd8920eeeab90a61 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_hfft_symint(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_hfft_symint_out(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2.h b/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2.h new file mode 100644 index 0000000000000000000000000000000000000000..500cf47c8b4352172cc251e7e147d9da5c2482c0 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); +} +namespace symint { + template >> + at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); + } +} + +// aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2::call(self, s, dim, norm); +} +namespace symint { + template >> + at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2::call(self, s, dim, norm); + } +} + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template >> + at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template >> + at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template >> + at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template >> + at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..73fa8def35efec67b010677a9e2ad9429932ff79 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gelu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); +}; + +struct TORCH_API gelu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::gelu_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9753bd7884ef0dc507be78a630f6971f562bd54d --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API hardshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::hardshrink_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h b/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h new file mode 100644 index 0000000000000000000000000000000000000000..ff51cf9b88fc0688209ff85aa147276bac8d95ff --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c7b8206515af1375770ce99df6ac17821cc3dd7 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..def73445aa606f253e255ab9ef8d1208835d20af --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isfinite(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b007238b4049fd710b0fb0037fa6f17954227c8 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b40d121d4e8baff929717269d4d145fab1f748cf --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0d8c18df53f28a9522d64149809751240744ef4e --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enable=true); +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5e37440c0bfe1a4071c5086c0bad3293b2052a77 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1ff72fcfcbd7399a369e7d286071fb0c4ae7fa89 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3adc307190db636f1019c168d0c65b244780feca --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb0330b29f83c29c60e350fcbcbc14550df96c0f --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/log_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..36fe5692331a07dbb52c03b07fb4a1b1e94c017b --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple log_sigmoid_forward_cpu(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cpu(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +TORCH_API ::std::tuple log_sigmoid_forward_cuda(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cuda(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e018b18ad2ab1d2aa9bd13148584a7304142df0 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2e47cec5fd64e761e890163d95c9ba3dd467319d --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool3d_with_indices_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices_backward"; + static constexpr const char* overload_name = "grad_input"; + static constexpr const char* schema_str = "max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API max_pool3d_with_indices_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::max_pool3d_with_indices_backward"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor"; + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1b65a44d257a0d6e80cdde77d67d5e122683d3e --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple min(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices); + +} // namespace meta +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_compositeimplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce06d57632bcd494ad3d410680059e6379e7f7c8 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2); +TORCH_API at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2); +TORCH_API at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..003f5504199d476d879abb58e55317a9c07493d8 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..68defe435ec9a634d44b1082b3ccc624cb47e90a --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool1d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantized_max_pool1d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::quantized_max_pool1d"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor"; + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API quantized_max_pool1d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::quantized_max_pool1d"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea6238cc626c84eb8569729a2334547aa0643260 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reflection_pad1d_backward_out_cpu : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +struct TORCH_API structured_reflection_pad1d_backward_out_cuda : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b4f2fdaabcac50c87b320aa114722ec3ded3a10 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5a4fd8fa7698b9aa227bd0b7dea741a7d740daa1 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API segment_reduce { + using schema = at::Tensor (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::segment_reduce"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor"; + static at::Tensor call(const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial); +}; + +struct TORCH_API segment_reduce_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, const ::std::optional &, const ::std::optional &, const ::std::optional &, int64_t, bool, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::segment_reduce"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dcd9762ae3581007abff1b51b0bf632e0e23d2a1 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/sinc_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sinc { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::sinc"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "sinc(Tensor self) -> Tensor"; + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sinc_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::sinc_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "sinc_(Tensor(a!) self) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sinc_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::sinc"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..048b22bcf2209b78e492713a62f4c23cb77fe92b --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options); +TORCH_API at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e623bb1bd1883bbb3a47ff1a30983dc0690d7ae4 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..446a4de530b1c65fabcc5f03c88dcbbbfbcfb387 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7c3a44d48df6fac3b67de2171910ced647f1a05c --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Tensor & n); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d184797c64a87c74da40e1c90050ef5bfb1bc816 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h b/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5c50a78125b15b3388490d0fd4e5c546928f12 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); +} +namespace symint { + template >> + ::std::vector split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); + } +} + +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); +} +namespace symint { + template >> + ::std::vector split_with_sizes_copy(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); +} +namespace symint { + template >> + void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); +} +namespace symint { + template >> + void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} +namespace symint { + template >> + void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} +namespace symint { + template >> + void split_with_sizes_copy_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/std_cpu_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/std_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ead92c4e782fa43da8357e777b7fea6dda547a24 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/std_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes.h b/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes.h new file mode 100644 index 0000000000000000000000000000000000000000..7927ef54846bd06550082f1e557550bc41b37148 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +inline at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1) { + return at::_ops::swapaxes::call(self, axis0, axis1); +} + +} diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h b/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..25f7887730c7fabd74cef55878412f188e841595 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/uniform_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API uniform_ { + using schema = at::Tensor & (at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::uniform_"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)"; + static at::Tensor & call(at::Tensor & self, double from, double to, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, ::std::optional generator); +}; + +struct TORCH_API uniform_out { + using schema = at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::uniform"; + static constexpr const char* overload_name = "out"; + static constexpr const char* schema_str = "uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"; + static at::Tensor & call(const at::Tensor & self, double from, double to, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API uniform { + using schema = at::Tensor (const at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + static constexpr const char* name = "aten::uniform"; + static constexpr const char* overload_name = ""; + static constexpr const char* schema_str = "uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor"; + static at::Tensor call(const at::Tensor & self, double from, double to, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..464203bad925381ca42b4925be2d2acd88d7ea0f --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_bilinear2d_backward_out_cpu : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_bilinear2d_backward_out_cuda : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea664f3c1403ff9bc1422ed865f97165ae4a461 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_native.h b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..30f6bc43a3b2ee2ec4f10a4f102cd8c981d508f8 --- /dev/null +++ b/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_trilinear3d_backward_out_cpu : public at::meta::structured_upsample_trilinear3d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_trilinear3d_backward_out_cuda : public at::meta::structured_upsample_trilinear3d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at