Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- lib/python3.10/site-packages/babel/locale-data/af.dat +3 -0
- lib/python3.10/site-packages/babel/locale-data/blo.dat +3 -0
- lib/python3.10/site-packages/babel/locale-data/pt.dat +3 -0
- lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
- lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +313 -0
- lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
- lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +81 -0
- lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +157 -0
- lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +196 -0
- lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128.h +14 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_bfloat16_neon.h +556 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_convert.h +64 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_float_neon.h +580 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_half_neon.h +603 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h +263 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h +1670 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h +711 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h +295 -0
- lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h +404 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h +409 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h +9 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h +2 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h +14 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h +22 -0
- lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h +16 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h +38 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h +480 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedFallback.h +81 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h +169 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h +126 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h +124 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/FunctionalizeInterpreter.h +22 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/Interpreter.h +209 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/LegacyVmapTransforms.h +187 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/Macros.h +3 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/PlumbingHelper.h +63 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h +103 -0
- lib/python3.10/site-packages/torch/include/ATen/functorch/VmapInterpreter.h +25 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation.h +39 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_ops.h +28 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta.h +27 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm.h +30 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h +21 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h +26 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh.h +39 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit.h +58 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_copy_native.h +21 -0
- lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h +24 -0
.gitattributes
CHANGED
|
@@ -171,3 +171,6 @@ lib/python3.10/site-packages/babel/locale-data/ar.dat filter=lfs diff=lfs merge=
|
|
| 171 |
lib/python3.10/site-packages/babel/locale-data/az.dat filter=lfs diff=lfs merge=lfs -text
|
| 172 |
lib/python3.10/site-packages/babel/locale-data/kk.dat filter=lfs diff=lfs merge=lfs -text
|
| 173 |
lib/python3.10/site-packages/babel/locale-data/syr.dat filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
lib/python3.10/site-packages/babel/locale-data/az.dat filter=lfs diff=lfs merge=lfs -text
|
| 172 |
lib/python3.10/site-packages/babel/locale-data/kk.dat filter=lfs diff=lfs merge=lfs -text
|
| 173 |
lib/python3.10/site-packages/babel/locale-data/syr.dat filter=lfs diff=lfs merge=lfs -text
|
| 174 |
+
lib/python3.10/site-packages/babel/locale-data/pt.dat filter=lfs diff=lfs merge=lfs -text
|
| 175 |
+
lib/python3.10/site-packages/babel/locale-data/af.dat filter=lfs diff=lfs merge=lfs -text
|
| 176 |
+
lib/python3.10/site-packages/babel/locale-data/blo.dat filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/babel/locale-data/af.dat
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4ef7fdc1a49490009de067f8fa65d5291aadf3a84bfa088659f82a494d7c2be
|
| 3 |
+
size 144732
|
lib/python3.10/site-packages/babel/locale-data/blo.dat
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23481fb06bfcd317fb768fa8c738e4876a2bf77e18c5880332433d4477fb5179
|
| 3 |
+
size 168499
|
lib/python3.10/site-packages/babel/locale-data/pt.dat
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e836ff7bfb5018b268a7cd36a4c8fc69e34672e2b16bd88a4c265804d33872bb
|
| 3 |
+
size 183448
|
lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/operator_name.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API ObservedOperators {
|
| 10 |
+
ObservedOperators() = delete;
|
| 11 |
+
|
| 12 |
+
static bool isObserved(const OperatorName& name);
|
| 13 |
+
|
| 14 |
+
static std::unordered_set<std::string>& getUnobservedOperatorList();
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
} // namespace c10
|
lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <c10/util/Metaprogramming.h>
|
| 5 |
+
#include <c10/util/flat_hash_map.h>
|
| 6 |
+
#include <c10/core/DispatchKey.h>
|
| 7 |
+
#include <c10/core/PyHandleCache.h>
|
| 8 |
+
#include <c10/core/SafePyObject.h>
|
| 9 |
+
#include <ATen/core/ivalue.h>
|
| 10 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 11 |
+
#include <ATen/core/dispatch/DispatchKeyExtractor.h>
|
| 12 |
+
|
| 13 |
+
#include <ATen/core/dispatch/OperatorOptions.h>
|
| 14 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 15 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 16 |
+
#include <ATen/core/enum_tag.h>
|
| 17 |
+
|
| 18 |
+
#include <optional>
|
| 19 |
+
#include <array>
|
| 20 |
+
#include <list>
|
| 21 |
+
|
| 22 |
+
#ifdef C10_MOBILE
|
| 23 |
+
#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
namespace c10 {
|
| 27 |
+
|
| 28 |
+
class Dispatcher;
|
| 29 |
+
|
| 30 |
+
namespace impl {
|
| 31 |
+
|
| 32 |
+
// This data structure represents a kernel that was registered to us from a
|
| 33 |
+
// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
|
| 34 |
+
// about the kernel that isn't necessary for actual dispatching (this is why
|
| 35 |
+
// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
|
| 36 |
+
// giving good error messages.
|
| 37 |
+
struct AnnotatedKernel final {
|
| 38 |
+
AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
|
| 39 |
+
: kernel(std::move(k))
|
| 40 |
+
, inferred_function_schema(std::move(s))
|
| 41 |
+
, debug(std::move(d))
|
| 42 |
+
{}
|
| 43 |
+
AnnotatedKernel() = default;
|
| 44 |
+
KernelFunction kernel;
|
| 45 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 46 |
+
// A little debug string to help us identify the kernel in question.
|
| 47 |
+
// Most importantly it records the TORCH_LIBRARY block that did the
|
| 48 |
+
// registration.
|
| 49 |
+
std::string debug;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
// This data structure represents operator schema, with metadata specifying
|
| 53 |
+
// where the registration of this schema occurred
|
| 54 |
+
struct AnnotatedSchema final {
|
| 55 |
+
AnnotatedSchema(FunctionSchema s, std::string d)
|
| 56 |
+
: schema(std::move(s))
|
| 57 |
+
, debug(std::move(d))
|
| 58 |
+
{}
|
| 59 |
+
FunctionSchema schema;
|
| 60 |
+
std::string debug;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
// Internal data structure that records information about a specific operator.
|
| 64 |
+
// It's not part of the public API; typically, users will interact with
|
| 65 |
+
// OperatorHandle instead.
|
| 66 |
+
//
|
| 67 |
+
// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
|
| 68 |
+
// lock (this is important because some methods in OperatorEntry access
|
| 69 |
+
// dispatcher state)
|
| 70 |
+
class TORCH_API OperatorEntry final {
|
| 71 |
+
public:
|
| 72 |
+
explicit OperatorEntry(OperatorName&& operator_name);
|
| 73 |
+
|
| 74 |
+
OperatorEntry(const OperatorEntry&) = delete;
|
| 75 |
+
OperatorEntry(OperatorEntry&&) noexcept = delete;
|
| 76 |
+
OperatorEntry& operator=(const OperatorEntry&) = delete;
|
| 77 |
+
OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
|
| 78 |
+
|
| 79 |
+
const FunctionSchema& schema() const {
|
| 80 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
|
| 81 |
+
return schema_->schema;
|
| 82 |
+
}
|
| 83 |
+
const std::string& debug() const {
|
| 84 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 85 |
+
return schema_->debug;
|
| 86 |
+
}
|
| 87 |
+
bool hasSchema() const {
|
| 88 |
+
return schema_.has_value();
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
bool isObserved() const {
|
| 92 |
+
return is_observed_;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// We may allocate an OperatorEntry for an operator even when we don't
|
| 96 |
+
// have a schema. When we receive the schema registration, we post
|
| 97 |
+
// facto register a schema.
|
| 98 |
+
//
|
| 99 |
+
// NB: registerSchema/deregisterSchema are not idempotent; if you
|
| 100 |
+
// attempt to register a schema when one is already present or vice
|
| 101 |
+
// versa that is an error. (Refcounting for the registrations is
|
| 102 |
+
// handled in the OperatorHandle in Dispatcher)
|
| 103 |
+
void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
|
| 104 |
+
void deregisterSchema();
|
| 105 |
+
|
| 106 |
+
const OperatorName& operator_name() const {
|
| 107 |
+
return name_;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 111 |
+
using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
|
| 112 |
+
#else
|
| 113 |
+
using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
|
| 114 |
+
#endif
|
| 115 |
+
using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
|
| 116 |
+
|
| 117 |
+
// Why are kernels and fallback asymmetric? It has to do with ownership.
|
| 118 |
+
// Kernels and the computed dispatch tables for them are canonically
|
| 119 |
+
// owned by OperatorEntry, but backend fallbacks are specified once
|
| 120 |
+
// and apply for all operators, so they should be owned by Dispatcher.
|
| 121 |
+
// However, the registration of a backend fallback affects the
|
| 122 |
+
// state of the computed dispatch table, so when a backend fallback
|
| 123 |
+
// is updated, we need to update the operator tables too. Thus,
|
| 124 |
+
// registerKernel is the mechanism by which we give kernels to
|
| 125 |
+
// operator entry to own (and update dispatch table), but we only
|
| 126 |
+
// need a non-owning mechanism to update fallback.
|
| 127 |
+
|
| 128 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 129 |
+
// Postcondition: caller is responsible for disposing of the kernel
|
| 130 |
+
AnnotatedKernelContainerIterator registerKernel(
|
| 131 |
+
const Dispatcher& dispatcher,
|
| 132 |
+
std::optional<DispatchKey> dispatch_key,
|
| 133 |
+
KernelFunction kernel,
|
| 134 |
+
std::optional<CppSignature> cpp_signature,
|
| 135 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
| 136 |
+
std::string debug
|
| 137 |
+
);
|
| 138 |
+
|
| 139 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 140 |
+
void deregisterKernel_(
|
| 141 |
+
const Dispatcher& dispatcher,
|
| 142 |
+
std::optional<DispatchKey> dispatch_key,
|
| 143 |
+
AnnotatedKernelContainerIterator kernel
|
| 144 |
+
);
|
| 145 |
+
|
| 146 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 147 |
+
void updateFallback(
|
| 148 |
+
const Dispatcher& dispatcher,
|
| 149 |
+
DispatchKey dispatch_key
|
| 150 |
+
);
|
| 151 |
+
|
| 152 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 153 |
+
void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
|
| 154 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 155 |
+
schema_->schema.setAliasAnalysis(a);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
std::string dumpComputedTable() const;
|
| 159 |
+
std::string dumpState() const;
|
| 160 |
+
void checkInvariants() const;
|
| 161 |
+
|
| 162 |
+
const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
|
| 163 |
+
|
| 164 |
+
// Asserts that the given FuncType is correct for calling this operator in an unboxed way.
|
| 165 |
+
template<class FuncType>
|
| 166 |
+
inline void assertSignatureIsCorrect() {
|
| 167 |
+
assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
|
| 171 |
+
|
| 172 |
+
[[noreturn]] void reportError(DispatchKey dispatchKey) const;
|
| 173 |
+
|
| 174 |
+
const KernelFunction& lookup(DispatchKeySet ks) const {
|
| 175 |
+
const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
|
| 176 |
+
if (C10_UNLIKELY(idx == -1)) {
|
| 177 |
+
reportError(ks.highestPriorityTypeId());
|
| 178 |
+
}
|
| 179 |
+
const auto& kernel = dispatchTable_[idx];
|
| 180 |
+
// A valid kernel *always* has a boxed kernel and *may* have an
|
| 181 |
+
// unboxed kernel. However, we typically do unboxed calls in at::
|
| 182 |
+
// APIs, where the kernel 1) will very likely be valid and 2)
|
| 183 |
+
// should have an unboxed kernel. Checking the unboxed kernel
|
| 184 |
+
// first will allow us to avoid touching the boxed kernel at all
|
| 185 |
+
// in the common case.
|
| 186 |
+
if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
|
| 187 |
+
if (!kernel.isValid()) {
|
| 188 |
+
reportError(ks.highestPriorityTypeId());
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
return kernel;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
std::string listAllDispatchKeys() const;
|
| 195 |
+
|
| 196 |
+
// Returns true if kernel_ has entry for any key in ks.
|
| 197 |
+
//
|
| 198 |
+
// Invariant: There are no alias keys in the passed-in dispatch key set.
|
| 199 |
+
// Note [No Alias Keys in DispatchKeySet]
|
| 200 |
+
// Alias keys should be checked using `hasKernelForDispatchKey`
|
| 201 |
+
// Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
|
| 202 |
+
// have a value > 63 (causing overflow).
|
| 203 |
+
bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
|
| 204 |
+
// Returns true if kernel_ has entry for a particular key.
|
| 205 |
+
bool hasKernelForDispatchKey(DispatchKey k) const;
|
| 206 |
+
// Retrieves the kernel entry at a particular key. Symmetric with
|
| 207 |
+
// hasKernelForDispatchKey. To get the AnnotatedKernel, see
|
| 208 |
+
// getKernelForDispatchKey (private)
|
| 209 |
+
const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
|
| 210 |
+
// Returns true if the "computed table" has an entry for a particular key.
|
| 211 |
+
bool hasComputedKernelForDispatchKey(DispatchKey k) const;
|
| 212 |
+
// Returns all the operator tags added at the time of registration
|
| 213 |
+
const std::vector<at::Tag>& getTags() const;
|
| 214 |
+
void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
|
| 215 |
+
|
| 216 |
+
template <typename F>
|
| 217 |
+
PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
|
| 218 |
+
return py_cache_.ptr_or(self_interpreter, slow_accessor);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
private:
|
| 222 |
+
|
| 223 |
+
OperatorName name_;
|
| 224 |
+
std::optional<AnnotatedSchema> schema_;
|
| 225 |
+
#ifndef C10_MOBILE
|
| 226 |
+
std::vector<at::Tag> tags_;
|
| 227 |
+
#endif
|
| 228 |
+
std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
|
| 229 |
+
DispatchKeyExtractor dispatchKeyExtractor_;
|
| 230 |
+
// Pointer to the torch.ops.ns.op.overload object for speed
|
| 231 |
+
c10::PyHandleCache py_cache_;
|
| 232 |
+
|
| 233 |
+
// kernels_ stores all registered kernels for the corresponding dispatch key
|
| 234 |
+
// and catchAllKernels_ stores the catch-all kernels.
|
| 235 |
+
// If an operator library gets loaded that overwrites an already existing kernel,
|
| 236 |
+
// both kernels will be in that list but only the newer one will be in
|
| 237 |
+
// dispatchTable. If any of the kernels go away (say the library gets
|
| 238 |
+
// unloaded), we remove the kernel from this list and update the
|
| 239 |
+
// dispatchTable if necessary.
|
| 240 |
+
// Kernels in the list are ordered by registration time descendingly,
|
| 241 |
+
// newer registrations are before older registrations.
|
| 242 |
+
// We do not combine dispatchTable and kernels into one hash map because
|
| 243 |
+
// kernels is a larger data structure and accessed quite infrequently
|
| 244 |
+
// while dispatchTable is accessed often and should be kept small to fit
|
| 245 |
+
// into CPU caches.
|
| 246 |
+
// Invariants:
|
| 247 |
+
// - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
|
| 248 |
+
// - dispatchTable[dispatch_key] does not exist if and only if
|
| 249 |
+
// kernels_[dispatch_key] does not exist
|
| 250 |
+
// - If kernels_[dispatch_key] exists, then it has elements.
|
| 251 |
+
// It is never an empty list.
|
| 252 |
+
//
|
| 253 |
+
// Why do we do that?
|
| 254 |
+
// -----
|
| 255 |
+
// We mostly do this to enable Jupyter notebooks where a cell registering
|
| 256 |
+
// a kernel could be executed multiple times and the later execution
|
| 257 |
+
// should overwrite the earlier one. Note that this still fails when the
|
| 258 |
+
// function schema changed between the executions, but it works as long
|
| 259 |
+
// as the function schema didn't change. A better solution would be to
|
| 260 |
+
// unload the old extension library from the Jupyter cell when the cell is
|
| 261 |
+
// re-executed and then only allow one kernel here, i.e. error if a kernel
|
| 262 |
+
// is already registered, but that's a lot of effort to implement and
|
| 263 |
+
// currently not high-pri.
|
| 264 |
+
ska::flat_hash_map<DispatchKey,
|
| 265 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 266 |
+
// On mobile, we needn't worry about Jupyter notebooks.
|
| 267 |
+
std::array<AnnotatedKernel, 1>
|
| 268 |
+
#else
|
| 269 |
+
std::list<AnnotatedKernel>
|
| 270 |
+
#endif
|
| 271 |
+
> kernels_;
|
| 272 |
+
|
| 273 |
+
const AnnotatedKernel& missingKernel() const;
|
| 274 |
+
const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
|
| 275 |
+
|
| 276 |
+
// cpp_signature_ stores function signature if any of
|
| 277 |
+
// the kernels was created in a way that allowed us to know the function
|
| 278 |
+
// signature (i.e. by supplying an unboxed C++ kernel function).
|
| 279 |
+
// If this is set, it will be used to check that future kernel
|
| 280 |
+
// registrations match and it will be used in unboxed function calls
|
| 281 |
+
// to verify their arguments against the known function signature.
|
| 282 |
+
struct CppSignatureWithDebug {
|
| 283 |
+
CppSignature signature;
|
| 284 |
+
std::string debug;
|
| 285 |
+
std::optional<DispatchKey> dispatch_key;
|
| 286 |
+
};
|
| 287 |
+
std::optional<CppSignatureWithDebug> cpp_signature_;
|
| 288 |
+
std::optional<CppSignatureWithDebug> sym_cpp_signature_;
|
| 289 |
+
|
| 290 |
+
// A Python custom error handler for OperatorEntry::reportError
|
| 291 |
+
std::unique_ptr<c10::SafePyObject> report_error_callback_;
|
| 292 |
+
|
| 293 |
+
// Whether this operator needs to be observed with RecordFunction
|
| 294 |
+
const bool is_observed_;
|
| 295 |
+
|
| 296 |
+
[[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
|
| 297 |
+
const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
|
| 298 |
+
std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
|
| 299 |
+
const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
|
| 300 |
+
) const;
|
| 301 |
+
// This function re-establishes the invariant that dispatchTable
|
| 302 |
+
// contains the front element from the kernels list for a given runtime dispatch key.
|
| 303 |
+
void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 304 |
+
// Like above, but also handles alias dispatch keys.
|
| 305 |
+
void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 306 |
+
// Like above, but for ALL entries in the dispatch table.
|
| 307 |
+
void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
|
| 308 |
+
// Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
|
| 309 |
+
const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
} // namespace impl
|
| 313 |
+
} // namespace c10
|
lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
enum class AliasAnalysisKind : uint8_t {
|
| 8 |
+
INTERNAL_SPECIAL_CASE,
|
| 9 |
+
CONSERVATIVE, // The most conservative alias analysis type, assumes
|
| 10 |
+
// side-effects. This is the default analysis.
|
| 11 |
+
FROM_SCHEMA,
|
| 12 |
+
PURE_FUNCTION
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
#if !defined(_MSC_VER)
|
| 16 |
+
constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
|
| 17 |
+
#endif
|
| 18 |
+
inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
|
| 19 |
+
return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
|
| 20 |
+
? "CONSERVATIVE"
|
| 21 |
+
: (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
|
| 22 |
+
? "FROM_SCHEMA"
|
| 23 |
+
: (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
|
| 24 |
+
? "PURE_FUNCTION"
|
| 25 |
+
: (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
|
| 26 |
+
? "INTERNAL_SPECIAL_CASE"
|
| 27 |
+
: "UNKNOWN";
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
} // namespace c10
|
lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/TensorUtils.h>
|
| 5 |
+
#include <ATen/core/List.h>
|
| 6 |
+
#include <c10/core/TensorOptions.h>
|
| 7 |
+
|
| 8 |
+
/*
|
| 9 |
+
* [Note: hacky wrapper removal for optional tensor]
|
| 10 |
+
*
|
| 11 |
+
* The kernel implementation takes an optional tensor marked in the schema as
|
| 12 |
+
* Tensor? but the C++ function takes Tensor instead of the std::optional<Tensor>
|
| 13 |
+
* expected by the dispatcher.
|
| 14 |
+
*
|
| 15 |
+
* To remove the hacky wrapper, the C++ function is changed to take
|
| 16 |
+
* std::optional<Tensor> and unwrap the Tensor value at the beginning of
|
| 17 |
+
* the function, e.g.:
|
| 18 |
+
* > c10::MaybeOwned<Tensor> weight_maybe_owned =
|
| 19 |
+
* > at::borrow_from_optional_tensor(weight_opt);
|
| 20 |
+
* > const Tensor& weight = *weight_maybe_owned;
|
| 21 |
+
*
|
| 22 |
+
* We may want to make the kernel handle optional directly without
|
| 23 |
+
* going through the creation of a default-constructed Tensor in
|
| 24 |
+
* at::borrow_from_optional_tensor.
|
| 25 |
+
*/
|
| 26 |
+
|
| 27 |
+
/*
|
| 28 |
+
* [Note: hacky wrapper removal for TensorOptions]
|
| 29 |
+
*
|
| 30 |
+
* The kernel implementation takes a TensorOptions argument but the dispatcher
|
| 31 |
+
* expects separate arguments for dtype, layout, device, pin_memory.
|
| 32 |
+
*
|
| 33 |
+
* To remove the hacky wrapper, the kernel implementation is changed to take
|
| 34 |
+
* the 4 arguments (dtype, layout, device, pin_memory), and assemble the
|
| 35 |
+
* TensorOptions value at the beginning of the function, e.g.:
|
| 36 |
+
* > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
|
| 37 |
+
* > .device(device).pinned_memory(pin_memory);
|
| 38 |
+
*
|
| 39 |
+
* We may want make the kernel handle these parameters directly without going
|
| 40 |
+
* through the creation of a TensorOptions value.
|
| 41 |
+
*/
|
| 42 |
+
|
| 43 |
+
namespace c10::impl {
|
| 44 |
+
|
| 45 |
+
TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
|
| 46 |
+
|
| 47 |
+
inline void check_and_update_common_device(std::optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 48 |
+
// TODO: Remove this once the following issue is addressed:
|
| 49 |
+
// https://github.com/pytorch/pytorch/issues/57380
|
| 50 |
+
if (!tensor.defined()) {
|
| 51 |
+
return;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
if (!common_device.has_value()) {
|
| 55 |
+
common_device = tensor.device();
|
| 56 |
+
return;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
if (C10_UNLIKELY(common_device != tensor.device())) {
|
| 60 |
+
common_device_check_failure(*common_device, tensor, methodName, argName);
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
inline void check_and_update_common_device(std::optional<Device>& common_device, const std::optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 65 |
+
if (tensor.has_value()) {
|
| 66 |
+
check_and_update_common_device(common_device, tensor.value(), methodName, argName);
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
inline void check_and_update_common_device(std::optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 71 |
+
for (const auto& tensor : tensors) {
|
| 72 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
inline void check_and_update_common_device(std::optional<Device>& common_device, const List<std::optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 77 |
+
for (const auto& tensor : tensors) {
|
| 78 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
} // namespace c10::impl
|
lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* This file contains functionality to take a C++ function and infer its
|
| 5 |
+
* c10::FunctionSchema.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <ATen/core/function_schema.h>
|
| 9 |
+
#include <c10/util/Metaprogramming.h>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
namespace detail::infer_schema {
|
| 13 |
+
|
| 14 |
+
/// The templated inference code creates `ArgumentDef` instead of `Argument`,
|
| 15 |
+
/// because that can be constructed at compile time and has a much smaller
|
| 16 |
+
/// binary size than having calls to `Argument` constructors in the template.
|
| 17 |
+
/// Creating `Argument` objects from `ArgumentDef` can then be done at
|
| 18 |
+
/// runtime in a non-templated way.
|
| 19 |
+
struct ArgumentDef final {
|
| 20 |
+
using GetTypeFn = TypePtr();
|
| 21 |
+
GetTypeFn* getTypeFn;
|
| 22 |
+
GetTypeFn* getFakeTypeFn;
|
| 23 |
+
constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
|
| 24 |
+
explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
template<bool V>
|
| 28 |
+
struct bool_t {};
|
| 29 |
+
template<> struct bool_t<true> : std::true_type {};
|
| 30 |
+
template<> struct bool_t<false> : std::false_type {};
|
| 31 |
+
|
| 32 |
+
/// Checks the static C++ types `Types` for correctness to catch common error cases.
|
| 33 |
+
template <class... Types>
|
| 34 |
+
constexpr int checkStaticTypes() {
|
| 35 |
+
// Give nice error messages for some of the common error cases.
|
| 36 |
+
// Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
|
| 37 |
+
static_assert(std::conjunction<
|
| 38 |
+
bool_t<!std::is_integral_v<Types> || std::is_same_v<Types, int8_t> || std::is_same_v<Types, int64_t> || std::is_same_v<Types, bool>>...
|
| 39 |
+
>::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
|
| 40 |
+
static_assert(std::conjunction<
|
| 41 |
+
bool_t<!std::is_same_v<Types, float>>...
|
| 42 |
+
>::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
|
| 43 |
+
return 0;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
template <typename... Ts, size_t... Is>
|
| 47 |
+
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
|
| 48 |
+
return (
|
| 49 |
+
// Check types for common errors
|
| 50 |
+
checkStaticTypes<Ts...>(),
|
| 51 |
+
|
| 52 |
+
// Create the return value
|
| 53 |
+
std::array<ArgumentDef, sizeof...(Ts)>{
|
| 54 |
+
ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
|
| 55 |
+
);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 59 |
+
/// as template arguments.
|
| 60 |
+
template<class ParameterTypes> struct createArguments final {};
|
| 61 |
+
template<class... ParameterTypes>
|
| 62 |
+
struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
|
| 63 |
+
static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
|
| 64 |
+
return createArgumentVectorFromTypes<ParameterTypes...>(
|
| 65 |
+
std::make_index_sequence<sizeof...(ParameterTypes)>()
|
| 66 |
+
);
|
| 67 |
+
}
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 71 |
+
/// as a tuple (i.e. in the way c10 kernels return values).
|
| 72 |
+
/// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
|
| 73 |
+
/// It can be an empty tuple<>, or void for kernels that don't return anything.
|
| 74 |
+
/// It can be a single type A (i.e. no tuple) for the case where a kernel just
|
| 75 |
+
/// returns one value.
|
| 76 |
+
template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
|
| 77 |
+
|
| 78 |
+
template<class... ReturnTypes>
|
| 79 |
+
struct createReturns<std::tuple<ReturnTypes...>, void> final {
|
| 80 |
+
static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
|
| 81 |
+
return createArgumentVectorFromTypes<ReturnTypes...>(
|
| 82 |
+
std::make_index_sequence<sizeof...(ReturnTypes)>()
|
| 83 |
+
);
|
| 84 |
+
}
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
template<class ReturnType>
|
| 88 |
+
struct createReturns<ReturnType, std::enable_if_t<!std::is_same_v<void, ReturnType> && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
|
| 89 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 90 |
+
return createReturns<std::tuple<ReturnType>>::call();
|
| 91 |
+
}
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
template<>
|
| 95 |
+
struct createReturns<void, void> final {
|
| 96 |
+
static constexpr std::array<ArgumentDef, 0> call() {
|
| 97 |
+
return createReturns<std::tuple<>>::call();
|
| 98 |
+
}
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
template <typename ReturnType>
|
| 102 |
+
struct createSingleReturn {
|
| 103 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 104 |
+
return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
|
| 105 |
+
}
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 109 |
+
TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 110 |
+
|
| 111 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 112 |
+
/// function. Flattens std::tuple returns into multiple return types
|
| 113 |
+
template <typename FunctionTraits>
|
| 114 |
+
FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
|
| 115 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 116 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 117 |
+
|
| 118 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 119 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 120 |
+
// of the arguments/returns from the std::array.
|
| 121 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 122 |
+
constexpr auto returns = createReturns<ReturnType>::call();
|
| 123 |
+
|
| 124 |
+
return make_function_schema(arguments, returns);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 128 |
+
/// function. Preserves std::tuple returns as a Tuple return type
|
| 129 |
+
template <typename FunctionTraits>
|
| 130 |
+
FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 131 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 132 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 133 |
+
|
| 134 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 135 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 136 |
+
// of the arguments/returns from the std::array.
|
| 137 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 138 |
+
constexpr auto returns = createSingleReturn<ReturnType>::call();
|
| 139 |
+
|
| 140 |
+
return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
template<class FuncType>
|
| 146 |
+
FunctionSchema inferFunctionSchemaFlattenedReturns() {
|
| 147 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
template<class FuncType>
|
| 151 |
+
FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 152 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
TORCH_API std::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
| 156 |
+
|
| 157 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
|
| 4 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
| 5 |
+
#include <ATen/selected_mobile_ops.h>
|
| 6 |
+
#endif
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* This header implements functionality to build PyTorch with only a certain
|
| 10 |
+
* set of operators (+ dependencies) included.
|
| 11 |
+
*
|
| 12 |
+
* - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
|
| 13 |
+
* two ops will be included in your build. The allowlist records operators
|
| 14 |
+
* only, no overloads; if you include aten::add, all overloads of aten::add
|
| 15 |
+
* will be included.
|
| 16 |
+
*
|
| 17 |
+
* Internally, this is done by removing the operator registration calls
|
| 18 |
+
* using compile time programming, and the linker will then prune all
|
| 19 |
+
* operator functions that weren't registered.
|
| 20 |
+
* See Note [Selective build] for more details
|
| 21 |
+
*
|
| 22 |
+
* WARNING: The allowlist mechanism doesn't work for all ways you could go about
|
| 23 |
+
* registering an operator. If the dispatch key / operator name is not
|
| 24 |
+
* sufficiently obvious at compile time, then the allowlisting mechanism
|
| 25 |
+
* will fail (and the operator will be included in the binary anyway).
|
| 26 |
+
*/
|
| 27 |
+
|
| 28 |
+
#include <c10/util/string_view.h>
|
| 29 |
+
#include <c10/core/DispatchKey.h>
|
| 30 |
+
#include <c10/macros/Macros.h>
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 34 |
+
#include <ATen/record_function.h>
|
| 35 |
+
#endif
|
| 36 |
+
|
| 37 |
+
namespace c10::impl {
|
| 38 |
+
|
| 39 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
|
| 40 |
+
|
| 41 |
+
/**
|
| 42 |
+
* In selective build mode returns true/false depending on whether a build
|
| 43 |
+
* feature is available or not.
|
| 44 |
+
*
|
| 45 |
+
* In instrumenting mode (tracing mode), always returns true, and doesn't
|
| 46 |
+
* trigger any side effects.
|
| 47 |
+
*/
|
| 48 |
+
constexpr bool is_build_feature_available(const char* name) {
|
| 49 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 50 |
+
// Selective Build mode.
|
| 51 |
+
#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 52 |
+
(void)name;
|
| 53 |
+
return true;
|
| 54 |
+
#else
|
| 55 |
+
return allowlist_contains(
|
| 56 |
+
C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
|
| 57 |
+
name);
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#else
|
| 61 |
+
// Instrumenting mode.
|
| 62 |
+
(void)name;
|
| 63 |
+
return true;
|
| 64 |
+
#endif
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
[[noreturn]] void build_feature_required_feature_not_available(const char* feature);
|
| 68 |
+
|
| 69 |
+
/**
|
| 70 |
+
* Use BUILD_FEATURE_REQUIRED macro in user-code.
|
| 71 |
+
*
|
| 72 |
+
* In selective build mode becomes a no-op if the build feature passed
|
| 73 |
+
* in is available. If not available, throws an exception (c10::Error).
|
| 74 |
+
* The compiler is able to perform dead code elimination for code
|
| 75 |
+
* following this method if the build feature is not available.
|
| 76 |
+
*
|
| 77 |
+
* In instrumenting mode (tracing mode), registers (as a side effect)
|
| 78 |
+
* the presence of this specific build feature being triggered.
|
| 79 |
+
*/
|
| 80 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
|
| 81 |
+
|
| 82 |
+
#if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 83 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 84 |
+
if (!c10::impl::is_build_feature_available(NAME)) { \
|
| 85 |
+
::c10::impl::build_feature_required_feature_not_available(NAME); \
|
| 86 |
+
}
|
| 87 |
+
#else // Everything trivially selected
|
| 88 |
+
#define BUILD_FEATURE_REQUIRED(NAME)
|
| 89 |
+
|
| 90 |
+
#endif
|
| 91 |
+
|
| 92 |
+
#else // trace mode
|
| 93 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 94 |
+
RECORD_FUNCTION_WITH_SCOPE( \
|
| 95 |
+
at::RecordScope::BUILD_FEATURE, \
|
| 96 |
+
std::string(NAME), \
|
| 97 |
+
{});
|
| 98 |
+
#endif
|
| 99 |
+
|
| 100 |
+
// Use this macro, and not is_build_feature_available
|
| 101 |
+
#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
|
| 102 |
+
|
| 103 |
+
// returns true iff allowlist contains item
|
| 104 |
+
// allowlist_contains("a;bc;d", "bc") == true
|
| 105 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item) {
|
| 106 |
+
//Choose a really big value for next so that if something goes wrong
|
| 107 |
+
//this code will blow up in a hopefully detectable way.
|
| 108 |
+
size_t next = std::numeric_limits<size_t>::max();
|
| 109 |
+
for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
|
| 110 |
+
next = allowlist.find(';', cur);
|
| 111 |
+
if (next != string_view::npos) {
|
| 112 |
+
if (allowlist.substr(cur, next - cur).compare(item) == 0) {
|
| 113 |
+
return true;
|
| 114 |
+
}
|
| 115 |
+
next++;
|
| 116 |
+
} else {
|
| 117 |
+
if (allowlist.substr(cur).compare(item) == 0) {
|
| 118 |
+
return true;
|
| 119 |
+
}
|
| 120 |
+
break;
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
return false;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// Returns true iff the given op name is on the allowlist
|
| 127 |
+
// and should be registered
|
| 128 |
+
constexpr bool op_allowlist_check(string_view op_name [[maybe_unused]]) {
|
| 129 |
+
assert(op_name.find("::") != string_view::npos);
|
| 130 |
+
// Use assert() instead of throw() due to a gcc bug. See:
|
| 131 |
+
// https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
|
| 132 |
+
// https://github.com/fmtlib/fmt/issues/682
|
| 133 |
+
assert(op_name.find("(") == string_view::npos);
|
| 134 |
+
#if !defined(TORCH_OPERATOR_WHITELIST)
|
| 135 |
+
// If the TORCH_OPERATOR_WHITELIST parameter is not defined,
|
| 136 |
+
// all ops are to be registered
|
| 137 |
+
return true;
|
| 138 |
+
#else
|
| 139 |
+
return allowlist_contains(
|
| 140 |
+
C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
|
| 141 |
+
// This function is majorly used for mobile selective build with
|
| 142 |
+
// root operators, where the overload is included in the allowlist.
|
| 143 |
+
op_name);
|
| 144 |
+
// // Strip overload name (as allowlist doesn't contain overloads)
|
| 145 |
+
// // Another function based on this may be added when there's usage
|
| 146 |
+
// // on op names without overload.
|
| 147 |
+
// OperatorNameView::parse(op_name).name);
|
| 148 |
+
#endif
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
// Returns true iff the given schema string is on the allowlist
|
| 152 |
+
// and should be registered
|
| 153 |
+
constexpr bool schema_allowlist_check(string_view schema) {
|
| 154 |
+
#if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
|
| 155 |
+
return true;
|
| 156 |
+
#else
|
| 157 |
+
return op_allowlist_check(schema.substr(0, schema.find("(")));
|
| 158 |
+
#endif
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// Returns true iff the given custom class name is on the allowlist
|
| 162 |
+
// and should be registered
|
| 163 |
+
constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
|
| 164 |
+
#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
|
| 165 |
+
// If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
|
| 166 |
+
// all custom classes are to be registered
|
| 167 |
+
(void)custom_class_name;
|
| 168 |
+
return true;
|
| 169 |
+
#else
|
| 170 |
+
return allowlist_contains(
|
| 171 |
+
C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
|
| 172 |
+
custom_class_name);
|
| 173 |
+
#endif
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
|
| 177 |
+
// Add this API to pass arbitrary allowlist.
|
| 178 |
+
constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
|
| 179 |
+
return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
// Returns true iff the given dispatch key is on the allowlist
|
| 183 |
+
// and should be registered. When we turn this on, the list of valid
|
| 184 |
+
// mobile dispatch keys is hard coded (but you need to make sure
|
| 185 |
+
// that you have the correct set of dispatch keys for this).
|
| 186 |
+
constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
|
| 187 |
+
#ifdef C10_MOBILE
|
| 188 |
+
return true;
|
| 189 |
+
// Disabled for now: to be enabled later!
|
| 190 |
+
// return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
|
| 191 |
+
#else
|
| 192 |
+
return true;
|
| 193 |
+
#endif
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
} // namespace c10::impl
|
lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* Include this file if you want to register operators. It includes all
|
| 5 |
+
* functionality needed to do so for you.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <c10/core/DispatchKey.h>
|
| 9 |
+
#include <c10/core/DispatchKeySet.h>
|
| 10 |
+
#include <c10/core/CompileTimeFunctionPointer.h>
|
| 11 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 12 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 13 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 14 |
+
#include <ATen/core/op_registration/infer_schema.h>
|
| 15 |
+
#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
|
| 16 |
+
#include <torch/csrc/jit/frontend/function_schema_parser.h>
|
| 17 |
+
#endif
|
| 18 |
+
#include <ATen/core/ATenOpList.h>
|
| 19 |
+
|
| 20 |
+
namespace c10 {
|
| 21 |
+
|
| 22 |
+
namespace detail {
|
| 23 |
+
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
|
| 24 |
+
// We do this because every argument in a function schema is expected to be convertable
|
| 25 |
+
// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
|
| 26 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 27 |
+
template<class KernelFunctor>
|
| 28 |
+
std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
|
| 29 |
+
using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
|
| 30 |
+
return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* An instance of this class handles the registration for one or more operators.
|
| 36 |
+
* Make sure you keep the RegisterOperators instance around since it will
|
| 37 |
+
* deregister the operator it's responsible for in its destructor.
|
| 38 |
+
*
|
| 39 |
+
* Example:
|
| 40 |
+
*
|
| 41 |
+
* > namespace {
|
| 42 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 43 |
+
* > public:
|
| 44 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 45 |
+
* > };
|
| 46 |
+
* > }
|
| 47 |
+
* >
|
| 48 |
+
* > static auto registry = c10::RegisterOperators()
|
| 49 |
+
* > .op(c10::RegisterOperators::options()
|
| 50 |
+
* > .schema("my_op")
|
| 51 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 52 |
+
*/
|
| 53 |
+
class TORCH_API RegisterOperators final {
|
| 54 |
+
public:
|
| 55 |
+
RegisterOperators() = default;
|
| 56 |
+
~RegisterOperators() = default;
|
| 57 |
+
|
| 58 |
+
RegisterOperators(const RegisterOperators&) = delete;
|
| 59 |
+
RegisterOperators& operator=(const RegisterOperators&) = delete;
|
| 60 |
+
RegisterOperators(RegisterOperators&&) noexcept = default;
|
| 61 |
+
RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
|
| 62 |
+
|
| 63 |
+
class TORCH_API Options final {
|
| 64 |
+
public:
|
| 65 |
+
Options(const Options&) = delete;
|
| 66 |
+
Options(Options&&) noexcept = delete;
|
| 67 |
+
Options& operator=(const Options&) = delete;
|
| 68 |
+
Options& operator=(Options&&) noexcept = delete;
|
| 69 |
+
|
| 70 |
+
// internal-only for registering stack based kernels
|
| 71 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 72 |
+
Options&& kernel(DispatchKey dispatch_key) && {
|
| 73 |
+
return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), std::nullopt, nullptr);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// internal-only for registering stack based catch-all kernels
|
| 77 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 78 |
+
Options&& catchAllKernel() && {
|
| 79 |
+
return std::move(*this).kernel(std::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), std::nullopt, nullptr);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// internal only for registering caffe2 ops
|
| 83 |
+
Options&& schema(FunctionSchema&& schema) {
|
| 84 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
|
| 85 |
+
schemaOrName_ = FunctionSchema(std::move(schema));
|
| 86 |
+
return std::move(*this);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/**
|
| 90 |
+
* Use this to specify the schema for an operator. You can also specify
|
| 91 |
+
* the operator name only to have the function signature part of the
|
| 92 |
+
* schema be inferred from the kernel function.
|
| 93 |
+
*
|
| 94 |
+
* Example:
|
| 95 |
+
*
|
| 96 |
+
* > // Infer function signature from my_kernel_cpu
|
| 97 |
+
* > static auto registry = c10::RegisterOperators()
|
| 98 |
+
* > .op(c10::RegisterOperators::options()
|
| 99 |
+
* > .schema("my_op")
|
| 100 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 101 |
+
* >
|
| 102 |
+
* >
|
| 103 |
+
* > // Explicitly specify full schema
|
| 104 |
+
* > static auto registry = c10::RegisterOperators()
|
| 105 |
+
* > .op(c10::RegisterOperators::options()
|
| 106 |
+
* > .schema("my_op(Tensor a) -> Tensor")
|
| 107 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 108 |
+
*/
|
| 109 |
+
Options&& schema(const std::string& schemaOrName) {
|
| 110 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
|
| 111 |
+
|
| 112 |
+
#if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
|
| 113 |
+
throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
|
| 114 |
+
#else
|
| 115 |
+
schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
|
| 116 |
+
#endif
|
| 117 |
+
|
| 118 |
+
return std::move(*this);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
/**
|
| 122 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 123 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 124 |
+
* You can register multiple kernels for different dispatch keys.
|
| 125 |
+
*
|
| 126 |
+
* Example:
|
| 127 |
+
*
|
| 128 |
+
* > namespace {
|
| 129 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 130 |
+
* > public:
|
| 131 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 132 |
+
* > };
|
| 133 |
+
* > }
|
| 134 |
+
* >
|
| 135 |
+
* > static auto registry = c10::RegisterOperators()
|
| 136 |
+
* > .op(c10::RegisterOperators::options()
|
| 137 |
+
* > .schema("my_op")
|
| 138 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 139 |
+
*
|
| 140 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 141 |
+
* The arguments are defined in the kernel registration.
|
| 142 |
+
* Example:
|
| 143 |
+
*
|
| 144 |
+
* > namespace {
|
| 145 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 146 |
+
* > public:
|
| 147 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 148 |
+
* > : ... {...}
|
| 149 |
+
* >
|
| 150 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 151 |
+
* > };
|
| 152 |
+
* > }
|
| 153 |
+
* >
|
| 154 |
+
* > static auto registry = c10::RegisterOperators()
|
| 155 |
+
* > .op(c10::RegisterOperators::options()
|
| 156 |
+
* > .schema("my_op")
|
| 157 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
|
| 158 |
+
*/
|
| 159 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 160 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 161 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
|
| 162 |
+
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 163 |
+
static_assert(std::is_constructible_v<KernelFunctor, ConstructorParameters...>, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 164 |
+
|
| 165 |
+
return std::move(*this).kernel(
|
| 166 |
+
dispatch_key,
|
| 167 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 168 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 169 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 170 |
+
);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
/**
|
| 174 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 175 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 176 |
+
* the input. Dispatch is disabled for this operator.
|
| 177 |
+
*
|
| 178 |
+
* Example:
|
| 179 |
+
*
|
| 180 |
+
* > namespace {
|
| 181 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 182 |
+
* > public:
|
| 183 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 184 |
+
* > };
|
| 185 |
+
* > }
|
| 186 |
+
* >
|
| 187 |
+
* > static auto registry = c10::RegisterOperators()
|
| 188 |
+
* > .op(c10::RegisterOperators::options()
|
| 189 |
+
* > .schema("my_op")
|
| 190 |
+
* > .catchAllKernel<my_kernel_cpu>());
|
| 191 |
+
*
|
| 192 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 193 |
+
* The arguments are defined in the kernel registration.
|
| 194 |
+
* Example:
|
| 195 |
+
*
|
| 196 |
+
* > namespace {
|
| 197 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 198 |
+
* > public:
|
| 199 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 200 |
+
* > : ... {...}
|
| 201 |
+
* >
|
| 202 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 203 |
+
* > };
|
| 204 |
+
* > }
|
| 205 |
+
* >
|
| 206 |
+
* > static auto registry = c10::RegisterOperators()
|
| 207 |
+
* > .op(c10::RegisterOperators::options()
|
| 208 |
+
* > .schema("my_op")
|
| 209 |
+
* > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
|
| 210 |
+
*/
|
| 211 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 212 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 213 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
|
| 214 |
+
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 215 |
+
static_assert(std::is_constructible_v<KernelFunctor, ConstructorParameters...>, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 216 |
+
|
| 217 |
+
return std::move(*this).kernel(
|
| 218 |
+
std::nullopt,
|
| 219 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 220 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 221 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 222 |
+
);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
/**
|
| 226 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 227 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 228 |
+
* You can register multiple kernels for different dispatch keys.
|
| 229 |
+
*
|
| 230 |
+
* Example:
|
| 231 |
+
*
|
| 232 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 233 |
+
* >
|
| 234 |
+
* > static auto registry = c10::RegisterOperators()
|
| 235 |
+
* > .op(c10::RegisterOperators::options()
|
| 236 |
+
* > .schema("my_op")
|
| 237 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
|
| 238 |
+
*/
|
| 239 |
+
template<class FuncType, FuncType* kernel_func>
|
| 240 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 241 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
|
| 242 |
+
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 243 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 244 |
+
|
| 245 |
+
return std::move(*this).kernel(
|
| 246 |
+
dispatch_key,
|
| 247 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 248 |
+
impl::CppSignature::make<FuncType>(),
|
| 249 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 250 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 251 |
+
);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 256 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 257 |
+
* the input. Dispatch is disabled for this operator.
|
| 258 |
+
*
|
| 259 |
+
* Example:
|
| 260 |
+
*
|
| 261 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 262 |
+
* >
|
| 263 |
+
* > static auto registry = c10::RegisterOperators()
|
| 264 |
+
* > .op(c10::RegisterOperators::options()
|
| 265 |
+
* > .schema("my_op")
|
| 266 |
+
* > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 267 |
+
*/
|
| 268 |
+
template<class FuncType, FuncType* kernel_func>
|
| 269 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 270 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
|
| 271 |
+
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 272 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 273 |
+
|
| 274 |
+
return std::move(*this).kernel(
|
| 275 |
+
std::nullopt,
|
| 276 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 277 |
+
impl::CppSignature::make<FuncType>(),
|
| 278 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 279 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 280 |
+
);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
template<class FuncType>
|
| 284 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 285 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
|
| 286 |
+
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 287 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 288 |
+
|
| 289 |
+
return std::move(*this).kernel(
|
| 290 |
+
dispatch_key,
|
| 291 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 292 |
+
impl::CppSignature::make<FuncType>(),
|
| 293 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 294 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 295 |
+
);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
template<class FuncType>
|
| 299 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 300 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
|
| 301 |
+
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 302 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 303 |
+
|
| 304 |
+
return std::move(*this).kernel(
|
| 305 |
+
std::nullopt,
|
| 306 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 307 |
+
impl::CppSignature::make<FuncType>(),
|
| 308 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 309 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 310 |
+
);
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/**
|
| 314 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 315 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 316 |
+
* You can register multiple kernels for different dispatch keys.
|
| 317 |
+
*
|
| 318 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 319 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 320 |
+
* functor instead.
|
| 321 |
+
*
|
| 322 |
+
* Example:
|
| 323 |
+
*
|
| 324 |
+
* > static auto registry = c10::RegisterOperators()
|
| 325 |
+
* > .op(c10::RegisterOperators::options()
|
| 326 |
+
* > .schema("my_op")
|
| 327 |
+
* > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
|
| 328 |
+
*/
|
| 329 |
+
template<class Lambda>
|
| 330 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 331 |
+
std::enable_if_t<
|
| 332 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 333 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 334 |
+
Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
|
| 335 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 336 |
+
|
| 337 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 338 |
+
// behavior would be nonobvious. A functor kernel with cache gets a new instance of
|
| 339 |
+
// its cache each time the kernel is looked up from the dispatch table.
|
| 340 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 341 |
+
// So, instead of making users having to think about it (including the thread-safety
|
| 342 |
+
// issues this causes), let's just forbid stateful lambdas altogether.
|
| 343 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 344 |
+
|
| 345 |
+
return std::move(*this).kernel(
|
| 346 |
+
dispatch_key,
|
| 347 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
|
| 348 |
+
impl::CppSignature::make<Lambda>(),
|
| 349 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 350 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 351 |
+
);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
/**
|
| 355 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 356 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 357 |
+
* the input. Dispatch is disabled for this operator.
|
| 358 |
+
*
|
| 359 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 360 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 361 |
+
* functor instead.
|
| 362 |
+
*
|
| 363 |
+
* Example:
|
| 364 |
+
*
|
| 365 |
+
* > static auto registry = c10::RegisterOperators()
|
| 366 |
+
* > .op(c10::RegisterOperators::options()
|
| 367 |
+
* > .schema("my_op")
|
| 368 |
+
* > .catchAllKernel([] (Tensor a) -> Tensor {...}));
|
| 369 |
+
*/
|
| 370 |
+
template<class Lambda>
|
| 371 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 372 |
+
std::enable_if_t<
|
| 373 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 374 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 375 |
+
Options&&> catchAllKernel(Lambda&& lambda) && {
|
| 376 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 377 |
+
|
| 378 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 379 |
+
// behavior would be nonobvious.
|
| 380 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 381 |
+
// This would be a likely source for unexpected race conditions, so we forbid it.
|
| 382 |
+
// If a kernel really needs global state, they can just have regular global state
|
| 383 |
+
// in their .cpp file next to the kernel lambda.
|
| 384 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 385 |
+
|
| 386 |
+
return std::move(*this).kernel(
|
| 387 |
+
std::nullopt,
|
| 388 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
|
| 389 |
+
impl::CppSignature::make<Lambda>(),
|
| 390 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 391 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 392 |
+
);
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
|
| 396 |
+
TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
|
| 397 |
+
aliasAnalysisKind_ = aliasAnalysisKind;
|
| 398 |
+
return std::move(*this);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
private:
|
| 402 |
+
Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
|
| 403 |
+
KernelRegistrationConfig config;
|
| 404 |
+
config.dispatch_key = dispatch_key;
|
| 405 |
+
config.func = std::move(func);
|
| 406 |
+
config.cpp_signature = cpp_signature;
|
| 407 |
+
config.inferred_function_schema = std::move(inferred_function_schema);
|
| 408 |
+
kernels.push_back(std::move(config));
|
| 409 |
+
return std::move(*this);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
Options()
|
| 413 |
+
: schemaOrName_(std::nullopt)
|
| 414 |
+
, kernels()
|
| 415 |
+
, aliasAnalysisKind_(std::nullopt)
|
| 416 |
+
{}
|
| 417 |
+
|
| 418 |
+
// KernelRegistrationConfig accumulates all information from the config
|
| 419 |
+
// parameters passed to a RegisterOperators::op() call into one object.
|
| 420 |
+
struct KernelRegistrationConfig final {
|
| 421 |
+
KernelRegistrationConfig()
|
| 422 |
+
: dispatch_key(std::nullopt)
|
| 423 |
+
, func()
|
| 424 |
+
, cpp_signature(std::nullopt)
|
| 425 |
+
, inferred_function_schema(nullptr)
|
| 426 |
+
{}
|
| 427 |
+
|
| 428 |
+
std::optional<DispatchKey> dispatch_key;
|
| 429 |
+
KernelFunction func;
|
| 430 |
+
std::optional<impl::CppSignature> cpp_signature;
|
| 431 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 432 |
+
};
|
| 433 |
+
|
| 434 |
+
std::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
|
| 435 |
+
|
| 436 |
+
std::vector<KernelRegistrationConfig> kernels;
|
| 437 |
+
std::optional<AliasAnalysisKind> aliasAnalysisKind_;
|
| 438 |
+
friend class RegisterOperators;
|
| 439 |
+
friend class Library;
|
| 440 |
+
};
|
| 441 |
+
|
| 442 |
+
/**
|
| 443 |
+
* Call this to get an instance of registration options, which
|
| 444 |
+
* can be passed to a call to RegisterOperators::op() to specify
|
| 445 |
+
* these options for the operator registration.
|
| 446 |
+
* See class doc comment for examples.
|
| 447 |
+
*/
|
| 448 |
+
static Options options() {
|
| 449 |
+
return {};
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
/**
|
| 453 |
+
* Call this to register an operator. See class doc comment for examples.
|
| 454 |
+
*/
|
| 455 |
+
RegisterOperators&& op(Options&& options) && {
|
| 456 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 457 |
+
return std::move(*this);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
// Regular mutator version of the && version above
|
| 461 |
+
RegisterOperators& op(Options&& options) & {
|
| 462 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 463 |
+
return *this;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
/**
|
| 467 |
+
* This is a shorthand for RegisterOperators::op(Options) where you can
|
| 468 |
+
* specify the operator schema outside of the options parameter.
|
| 469 |
+
* See class doc comment for examples.
|
| 470 |
+
*/
|
| 471 |
+
RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
|
| 472 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName));
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
// internal only for registering caffe2 ops
|
| 476 |
+
RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
|
| 477 |
+
return std::move(*this).op(std::move(options).schema(std::move(schema)));
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
template<class FuncType>
|
| 481 |
+
explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
|
| 482 |
+
: RegisterOperators() {
|
| 483 |
+
std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
/**
|
| 487 |
+
* This API registers an operator based on a kernel function pointer.
|
| 488 |
+
*
|
| 489 |
+
* Given a kernel
|
| 490 |
+
*
|
| 491 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 492 |
+
*
|
| 493 |
+
* This API looks like:
|
| 494 |
+
*
|
| 495 |
+
* > static auto registry = c10::RegisterOperators()
|
| 496 |
+
* > .op("my_op", &my_kernel_cpu);
|
| 497 |
+
*
|
| 498 |
+
* If your kernel is small and the overhead of calling it matters,
|
| 499 |
+
* then this API might be the wrong choice since the following API
|
| 500 |
+
* has a slightly lower overhead for calling into the kernel:
|
| 501 |
+
*
|
| 502 |
+
* > static auto registry = c10::RegisterOperators()
|
| 503 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 504 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 505 |
+
*
|
| 506 |
+
* Or, alternatively, write your kernel as a functor:
|
| 507 |
+
*
|
| 508 |
+
* > namespace {
|
| 509 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 510 |
+
* > public:
|
| 511 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 512 |
+
* > };
|
| 513 |
+
* > }
|
| 514 |
+
* >
|
| 515 |
+
* > static auto registry = c10::RegisterOperators()
|
| 516 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 517 |
+
* > .kernel<my_kernel_cpu>());
|
| 518 |
+
*/
|
| 519 |
+
template<class FuncType>
|
| 520 |
+
// enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
|
| 521 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, RegisterOperators&&>
|
| 522 |
+
op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
|
| 523 |
+
constexpr bool AllowLegacyTypes = true;
|
| 524 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 525 |
+
std::nullopt,
|
| 526 |
+
KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
|
| 527 |
+
impl::CppSignature::make<FuncType>(),
|
| 528 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 529 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 530 |
+
));
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
/**
|
| 534 |
+
* This API registers an operator based on a kernel lambda.
|
| 535 |
+
*
|
| 536 |
+
* This API looks like:
|
| 537 |
+
*
|
| 538 |
+
* > static auto registry = c10::RegisterOperators()
|
| 539 |
+
* > .op("my_op", [] (Tensor a, Tensor b) {...});
|
| 540 |
+
*
|
| 541 |
+
* This is equivalent to:
|
| 542 |
+
*
|
| 543 |
+
* > static auto registry = c10::RegisterOperators()
|
| 544 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 545 |
+
* > .catchAllKernel([] (Tensor a, Tensor b) {...}));
|
| 546 |
+
*
|
| 547 |
+
*/
|
| 548 |
+
template<class Lambda>
|
| 549 |
+
// enable_if: only enable it if Lambda is actually a stateless lambda
|
| 550 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 551 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 552 |
+
static_assert(!std::is_base_of_v<OperatorKernel, Lambda>, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 553 |
+
|
| 554 |
+
constexpr bool AllowLegacyTypes = true;
|
| 555 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 556 |
+
std::nullopt,
|
| 557 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 558 |
+
impl::CppSignature::make<Lambda>(),
|
| 559 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 560 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 561 |
+
));
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
template<class Lambda>
|
| 565 |
+
C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
|
| 566 |
+
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
|
| 567 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 568 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 569 |
+
static_assert(!std::is_base_of_v<OperatorKernel, Lambda>, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 570 |
+
|
| 571 |
+
constexpr bool AllowLegacyTypes = true;
|
| 572 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 573 |
+
std::nullopt,
|
| 574 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 575 |
+
impl::CppSignature::make<Lambda>(),
|
| 576 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 577 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 578 |
+
));
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
private:
|
| 582 |
+
void checkSchemaAndRegisterOp_(Options&& config);
|
| 583 |
+
|
| 584 |
+
static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
|
| 585 |
+
void checkNoDuplicateKernels_(const Options& options);
|
| 586 |
+
void registerOp_(Options&& options);
|
| 587 |
+
|
| 588 |
+
std::vector<RegistrationHandleRAII> registrars_;
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
} // namespace c10
|
| 592 |
+
|
| 593 |
+
namespace torch {
|
| 594 |
+
// Old-style API
|
| 595 |
+
using RegisterOperators = c10::RegisterOperators;
|
| 596 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// ARM NEON uses 128-bit vector registers.
|
| 3 |
+
|
| 4 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 5 |
+
|
| 6 |
+
#ifdef __aarch64__
|
| 7 |
+
#if !defined(CPU_CAPABILITY_SVE)
|
| 8 |
+
#include <ATen/cpu/vec/vec128/vec128_bfloat16_neon.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec128/vec128_float_neon.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec128/vec128_half_neon.h>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
#include <ATen/cpu/vec/vec128/vec128_convert.h>
|
| 14 |
+
#endif
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_bfloat16_neon.h
ADDED
|
@@ -0,0 +1,556 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
#include <ATen/cpu/vec/vec128/vec128_float_neon.h>
|
| 6 |
+
#include <ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/BFloat16.h>
|
| 9 |
+
#include <c10/util/bit_cast.h>
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
|
| 12 |
+
namespace at::vec {
|
| 13 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 14 |
+
inline
|
| 15 |
+
namespace CPU_CAPABILITY {
|
| 16 |
+
|
| 17 |
+
// Following vec128_half_neon.h, we only support aarch64.
|
| 18 |
+
#if !defined(C10_MOBILE) && defined(__aarch64__)
|
| 19 |
+
#ifdef __BIG_ENDIAN__
|
| 20 |
+
#error "Big endian is not supported."
|
| 21 |
+
#endif
|
| 22 |
+
|
| 23 |
+
// Unlike the float16_t family of types, bfloat16_t is not available
|
| 24 |
+
// when we're not targeting bfloat16 hardware support on some
|
| 25 |
+
// platforms (but not Mac, so we have to be careful not to shadow the
|
| 26 |
+
// definitions in case they are actually there!). (See
|
| 27 |
+
// https://godbolt.org/z/orv6e94n4 ) So, we need to handle it as
|
| 28 |
+
// uint16_t in that case.
|
| 29 |
+
#define IMPLEMENT_AT_BF16_SHIM(vec_suffix) \
|
| 30 |
+
inline at_bfloat16x4_t at_vget_low_bf16( \
|
| 31 |
+
at_bfloat16x8_t a) { \
|
| 32 |
+
return vget_low_##vec_suffix(a); \
|
| 33 |
+
} \
|
| 34 |
+
\
|
| 35 |
+
inline at_bfloat16x4_t at_vget_high_bf16( \
|
| 36 |
+
at_bfloat16x8_t a) { \
|
| 37 |
+
return vget_high_##vec_suffix(a); \
|
| 38 |
+
} \
|
| 39 |
+
\
|
| 40 |
+
inline at_bfloat16x8_t at_vcombine_bf16( \
|
| 41 |
+
at_bfloat16x4_t low, \
|
| 42 |
+
at_bfloat16x4_t high) { \
|
| 43 |
+
return vcombine_##vec_suffix(low, high); \
|
| 44 |
+
} \
|
| 45 |
+
\
|
| 46 |
+
inline at_bfloat16x8_t at_vdupq_n_bf16( \
|
| 47 |
+
at_bfloat16_t value) { \
|
| 48 |
+
return vdupq_n_##vec_suffix(value); \
|
| 49 |
+
} \
|
| 50 |
+
\
|
| 51 |
+
inline at_bfloat16x8_t at_vld1q_bf16( \
|
| 52 |
+
const at_bfloat16_t* ptr) { \
|
| 53 |
+
return vld1q_##vec_suffix(ptr); \
|
| 54 |
+
} \
|
| 55 |
+
\
|
| 56 |
+
inline void at_vst1q_bf16( \
|
| 57 |
+
at_bfloat16_t* ptr, \
|
| 58 |
+
at_bfloat16x8_t value) { \
|
| 59 |
+
vst1q_##vec_suffix(ptr, value); \
|
| 60 |
+
} \
|
| 61 |
+
\
|
| 62 |
+
template <typename T> \
|
| 63 |
+
inline at_bfloat16x8_t at_vreinterpretq_bf16_u16(T val) { \
|
| 64 |
+
if constexpr (std::is_same_v<at_bfloat16x8_t, uint16x8_t>) { \
|
| 65 |
+
return val; \
|
| 66 |
+
} else { \
|
| 67 |
+
return vreinterpretq_bf16_u16(val); \
|
| 68 |
+
} \
|
| 69 |
+
} \
|
| 70 |
+
template <typename T> \
|
| 71 |
+
inline at_bfloat16x4_t at_vreinterpret_bf16_u16(T val) { \
|
| 72 |
+
if constexpr (std::is_same_v<at_bfloat16x4_t, uint16x4_t>) { \
|
| 73 |
+
return val; \
|
| 74 |
+
} else { \
|
| 75 |
+
return vreinterpret_bf16_u16(val); \
|
| 76 |
+
} \
|
| 77 |
+
} \
|
| 78 |
+
template <typename T> \
|
| 79 |
+
inline uint16x8_t at_vreinterpretq_u16_bf16(T val) { \
|
| 80 |
+
if constexpr (std::is_same_v<at_bfloat16x8_t, uint16x8_t>) { \
|
| 81 |
+
return val; \
|
| 82 |
+
} else { \
|
| 83 |
+
return vreinterpretq_u16_bf16(val); \
|
| 84 |
+
} \
|
| 85 |
+
} \
|
| 86 |
+
template <typename T> \
|
| 87 |
+
inline uint16x4_t at_vreinterpret_u16_bf16(T val) { \
|
| 88 |
+
if constexpr (std::is_same_v<at_bfloat16x4_t, uint16x4_t>) { \
|
| 89 |
+
return val; \
|
| 90 |
+
} else { \
|
| 91 |
+
return vreinterpret_u16_bf16(val); \
|
| 92 |
+
} \
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
#ifdef __ARM_FEATURE_BF16
|
| 96 |
+
using at_bfloat16x8_t = bfloat16x8_t;
|
| 97 |
+
using at_bfloat16x4_t = bfloat16x4_t;
|
| 98 |
+
using at_bfloat16_t = bfloat16_t;
|
| 99 |
+
IMPLEMENT_AT_BF16_SHIM(bf16)
|
| 100 |
+
#define at_vsetq_lane_bf16 vsetq_lane_bf16
|
| 101 |
+
#define at_vgetq_lane_bf16 vgetq_lane_bf16
|
| 102 |
+
#else
|
| 103 |
+
using at_bfloat16x8_t = uint16x8_t;
|
| 104 |
+
using at_bfloat16x4_t = uint16x4_t;
|
| 105 |
+
using at_bfloat16_t = uint16_t;
|
| 106 |
+
IMPLEMENT_AT_BF16_SHIM(u16)
|
| 107 |
+
#define at_vsetq_lane_bf16 vsetq_lane_u16
|
| 108 |
+
#define at_vgetq_lane_bf16 vgetq_lane_u16
|
| 109 |
+
#endif // __ARM_FEATURE_BF16
|
| 110 |
+
|
| 111 |
+
template <int index, bool mask_val>
|
| 112 |
+
struct BlendBFloat16Regs {
|
| 113 |
+
static at_bfloat16x8_t impl(
|
| 114 |
+
const at_bfloat16x8_t& a,
|
| 115 |
+
const at_bfloat16x8_t& b,
|
| 116 |
+
at_bfloat16x8_t& res);
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
template <int index>
|
| 120 |
+
struct BlendBFloat16Regs<index, true> {
|
| 121 |
+
static at_bfloat16x8_t impl(
|
| 122 |
+
const at_bfloat16x8_t& a,
|
| 123 |
+
const at_bfloat16x8_t& b,
|
| 124 |
+
at_bfloat16x8_t& res) {
|
| 125 |
+
return at_vsetq_lane_bf16(at_vgetq_lane_bf16(b, index), res, index);
|
| 126 |
+
}
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
template <int index>
|
| 130 |
+
struct BlendBFloat16Regs<index, false> {
|
| 131 |
+
static at_bfloat16x8_t impl(
|
| 132 |
+
const at_bfloat16x8_t& a,
|
| 133 |
+
const at_bfloat16x8_t& b,
|
| 134 |
+
at_bfloat16x8_t& res) {
|
| 135 |
+
return at_vsetq_lane_bf16(at_vgetq_lane_bf16(a, index), res, index);
|
| 136 |
+
}
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
template <>
|
| 140 |
+
class Vectorized<c10::BFloat16> : public Vectorized16<at_bfloat16x8_t, c10::BFloat16, BlendBFloat16Regs, Vectorized<c10::BFloat16>> {
|
| 141 |
+
using Base = Vectorized16<at_bfloat16x8_t, c10::BFloat16, BlendBFloat16Regs, Vectorized<c10::BFloat16>>;
|
| 142 |
+
friend Base;
|
| 143 |
+
friend std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float(const Vectorized<c10::BFloat16>& a);
|
| 144 |
+
friend Vectorized<c10::BFloat16> convert_float_bfloat16(const Vectorized<float>& a, const Vectorized<float>& b);
|
| 145 |
+
private:
|
| 146 |
+
Vectorized<c10::BFloat16> map2(
|
| 147 |
+
const Vectorized<c10::BFloat16>& second,
|
| 148 |
+
c10::BFloat16 (*const f)(c10::BFloat16, c10::BFloat16)) const {
|
| 149 |
+
__at_align__ c10::BFloat16 tmp_first[size()];
|
| 150 |
+
__at_align__ c10::BFloat16 tmp_second[size()];
|
| 151 |
+
store(tmp_first); // store this to tmp_first
|
| 152 |
+
second.store(tmp_second);
|
| 153 |
+
for (const auto i : c10::irange(size())) {
|
| 154 |
+
tmp_first[i] = f(tmp_first[i], tmp_second[i]);
|
| 155 |
+
}
|
| 156 |
+
return loadu(tmp_first);
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
static float32x4_t convert_f32_bf16(at_bfloat16x4_t bf16) {
|
| 160 |
+
#ifdef __ARM_FEATURE_BF16
|
| 161 |
+
return vcvt_f32_bf16(bf16);
|
| 162 |
+
#else
|
| 163 |
+
int32x4_t shift = vdupq_n_s32(16);
|
| 164 |
+
return vreinterpretq_f32_u32(vshlq_u32(vmovl_u16(bf16), shift));
|
| 165 |
+
#endif // __ARM_FEATURE_BF16
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
static at_bfloat16x4_t convert_bf16_f32(const Vectorized<float>& f32) {
|
| 169 |
+
#ifdef __ARM_FEATURE_BF16
|
| 170 |
+
return vcvt_bf16_f32(f32);
|
| 171 |
+
#else
|
| 172 |
+
static_assert(std::is_same_v<uint16x4_t, at_bfloat16x4_t>);
|
| 173 |
+
uint32x4_t as_uint32 = vreinterpretq_u32_f32(f32);
|
| 174 |
+
uint32x4_t rounding_bias = vaddq_u32(vandq_u32(vshrq_n_u32(as_uint32, 16), vdupq_n_u32(1)), vdupq_n_u32(0x7FFF));
|
| 175 |
+
at_bfloat16x4_t rounded = vshrn_n_u32(vaddq_u32(as_uint32, rounding_bias), 16);
|
| 176 |
+
const auto bf16_nan = vdup_n_u16(0x7FC0);
|
| 177 |
+
return vbsl_u16(vmovn_u32(vreinterpretq_u32_f32(f32.isnan())), bf16_nan, rounded);
|
| 178 |
+
#endif // __ARM_FEATURE_BF16
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
Vectorized<c10::BFloat16> map_with_vec_float_method(
|
| 182 |
+
Vectorized<float> (Vectorized<float>::*m)() const) const {
|
| 183 |
+
float32x4_t v00 = convert_f32_bf16(at_vget_low_bf16(values));
|
| 184 |
+
float32x4_t v01 = convert_f32_bf16(at_vget_high_bf16(values));
|
| 185 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)();
|
| 186 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)();
|
| 187 |
+
at_bfloat16x4_t r00 = convert_bf16_f32(mv0);
|
| 188 |
+
at_bfloat16x4_t r01 = convert_bf16_f32(mv1);
|
| 189 |
+
return Vectorized<c10::BFloat16>(at_vcombine_bf16(r00, r01));
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
Vectorized<c10::BFloat16> map2_with_vec_float_method(
|
| 193 |
+
const Vectorized<c10::BFloat16>& second,
|
| 194 |
+
Vectorized<float> (Vectorized<float>::*m)(const Vectorized<float>&)
|
| 195 |
+
const) const {
|
| 196 |
+
float32x4_t v00 = convert_f32_bf16(at_vget_low_bf16(values));
|
| 197 |
+
float32x4_t v01 = convert_f32_bf16(at_vget_high_bf16(values));
|
| 198 |
+
float32x4_t second_v00 = convert_f32_bf16(at_vget_low_bf16(second.values));
|
| 199 |
+
float32x4_t second_v01 = convert_f32_bf16(at_vget_high_bf16(second.values));
|
| 200 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)(second_v00);
|
| 201 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)(second_v01);
|
| 202 |
+
at_bfloat16x4_t r00 = convert_bf16_f32(mv0);
|
| 203 |
+
at_bfloat16x4_t r01 = convert_bf16_f32(mv1);
|
| 204 |
+
return Vectorized<c10::BFloat16>(at_vcombine_bf16(r00, r01));
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
Vectorized<c10::BFloat16> map2_bitmask_with_vec_float_method(
|
| 208 |
+
const Vectorized<c10::BFloat16>& second,
|
| 209 |
+
Vectorized<float> (Vectorized<float>::*m)(const Vectorized<float>&)
|
| 210 |
+
const) const {
|
| 211 |
+
float32x4_t v00 = convert_f32_bf16(at_vget_low_bf16(values));
|
| 212 |
+
float32x4_t v01 = convert_f32_bf16(at_vget_high_bf16(values));
|
| 213 |
+
float32x4_t second_v00 = convert_f32_bf16(at_vget_low_bf16(second.values));
|
| 214 |
+
float32x4_t second_v01 = convert_f32_bf16(at_vget_high_bf16(second.values));
|
| 215 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)(second_v00);
|
| 216 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)(second_v01);
|
| 217 |
+
// Assume the operator returns a bitmask, not "real" floats, and
|
| 218 |
+
// just narrow the bits. All-ones is a NaN and will get mangled by conversion!
|
| 219 |
+
at_bfloat16x4_t r00 = at_vreinterpret_bf16_u16(vmovn_u32(vreinterpretq_u32_f32(mv0)));
|
| 220 |
+
at_bfloat16x4_t r01 = at_vreinterpret_bf16_u16(vmovn_u32(vreinterpretq_u32_f32(mv1)));
|
| 221 |
+
return Vectorized<c10::BFloat16>(at_vcombine_bf16(r00, r01));
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
public:
|
| 225 |
+
using Vectorized16::Vectorized16;
|
| 226 |
+
|
| 227 |
+
Vectorized() = default;
|
| 228 |
+
|
| 229 |
+
Vectorized(c10::BFloat16 val) : Vectorized16(at_vdupq_n_bf16(val.x)) {}
|
| 230 |
+
Vectorized(float val) : Vectorized(c10::BFloat16(val)) {}
|
| 231 |
+
Vectorized(
|
| 232 |
+
value_type val0,
|
| 233 |
+
value_type val1,
|
| 234 |
+
value_type val2,
|
| 235 |
+
value_type val3,
|
| 236 |
+
value_type val4,
|
| 237 |
+
value_type val5,
|
| 238 |
+
value_type val6,
|
| 239 |
+
value_type val7)
|
| 240 |
+
: Vectorized16(at_bfloat16x8_t{
|
| 241 |
+
c10::bit_cast<at_bfloat16_t>(val0.x),
|
| 242 |
+
c10::bit_cast<at_bfloat16_t>(val1.x),
|
| 243 |
+
c10::bit_cast<at_bfloat16_t>(val2.x),
|
| 244 |
+
c10::bit_cast<at_bfloat16_t>(val3.x),
|
| 245 |
+
c10::bit_cast<at_bfloat16_t>(val4.x),
|
| 246 |
+
c10::bit_cast<at_bfloat16_t>(val5.x),
|
| 247 |
+
c10::bit_cast<at_bfloat16_t>(val6.x),
|
| 248 |
+
c10::bit_cast<at_bfloat16_t>(val7.x)}) {}
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
static Vectorized<c10::BFloat16> blendv(
|
| 252 |
+
const Vectorized<c10::BFloat16>& a,
|
| 253 |
+
const Vectorized<c10::BFloat16>& b,
|
| 254 |
+
const Vectorized<c10::BFloat16>& mask) {
|
| 255 |
+
// NOTE: blendv has the same problems as it does for Half; see comments in vec128_half_neon.h.
|
| 256 |
+
Vectorized<c10::BFloat16> vec(mask.values);
|
| 257 |
+
vec.values = at_vreinterpretq_bf16_u16(
|
| 258 |
+
vbslq_u16(
|
| 259 |
+
at_vreinterpretq_u16_bf16(vec.values),
|
| 260 |
+
at_vreinterpretq_u16_bf16(b.values),
|
| 261 |
+
at_vreinterpretq_u16_bf16(a.values)));
|
| 262 |
+
return vec;
|
| 263 |
+
}
|
| 264 |
+
static Vectorized<c10::BFloat16> set(
|
| 265 |
+
const Vectorized<c10::BFloat16>& a,
|
| 266 |
+
const Vectorized<c10::BFloat16>& b,
|
| 267 |
+
int64_t count = size()) {
|
| 268 |
+
uint16_t pre_mask[size()] = {0};
|
| 269 |
+
for (int i = 0; i < count; i++) {
|
| 270 |
+
pre_mask[i] = 0xFFFF;
|
| 271 |
+
}
|
| 272 |
+
uint16x8_t mask = vld1q_u16(pre_mask);
|
| 273 |
+
|
| 274 |
+
Vectorized<c10::BFloat16> vec(
|
| 275 |
+
at_vreinterpretq_bf16_u16(
|
| 276 |
+
vbslq_u16(
|
| 277 |
+
at_vreinterpretq_u16_bf16(mask),
|
| 278 |
+
at_vreinterpretq_u16_bf16(b.values),
|
| 279 |
+
at_vreinterpretq_u16_bf16(a.values))));
|
| 280 |
+
|
| 281 |
+
return vec;
|
| 282 |
+
}
|
| 283 |
+
static Vectorized<c10::BFloat16> loadu(const void* ptr, int64_t count = size()) {
|
| 284 |
+
if (count == size()) {
|
| 285 |
+
return at_vld1q_bf16(reinterpret_cast<const at_bfloat16_t*>(ptr));
|
| 286 |
+
}
|
| 287 |
+
__at_align__ at_bfloat16_t tmp_values[size()];
|
| 288 |
+
for (const auto i : c10::irange(size())) {
|
| 289 |
+
tmp_values[i] = 0;
|
| 290 |
+
}
|
| 291 |
+
std::memcpy(
|
| 292 |
+
tmp_values,
|
| 293 |
+
reinterpret_cast<const at_bfloat16_t*>(ptr),
|
| 294 |
+
count * sizeof(at_bfloat16_t));
|
| 295 |
+
return at_vld1q_bf16(reinterpret_cast<const at_bfloat16_t*>(tmp_values));
|
| 296 |
+
}
|
| 297 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 298 |
+
if (count == size()) {
|
| 299 |
+
at_vst1q_bf16(reinterpret_cast<at_bfloat16_t*>(ptr), values);
|
| 300 |
+
return;
|
| 301 |
+
} else {
|
| 302 |
+
at_bfloat16_t tmp_values[size()];
|
| 303 |
+
at_vst1q_bf16(reinterpret_cast<at_bfloat16_t*>(tmp_values), values);
|
| 304 |
+
std::memcpy(ptr, tmp_values, count * sizeof(at_bfloat16_t));
|
| 305 |
+
}
|
| 306 |
+
}
|
| 307 |
+
Vectorized<c10::BFloat16> isnan() const {
|
| 308 |
+
// NOTE: we could make this faster by doing vectorized checks of
|
| 309 |
+
// exponent/payload bits.
|
| 310 |
+
__at_align__ c10::BFloat16 tmp[size()];
|
| 311 |
+
__at_align__ c10::BFloat16 res[size()];
|
| 312 |
+
store(tmp);
|
| 313 |
+
for (const auto i : c10::irange(size())) {
|
| 314 |
+
if (_isnan(tmp[i])) {
|
| 315 |
+
std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(c10::BFloat16));
|
| 316 |
+
} else {
|
| 317 |
+
std::memset(static_cast<void*>(&res[i]), 0, sizeof(c10::BFloat16));
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
return loadu(res);
|
| 321 |
+
}
|
| 322 |
+
bool has_inf_nan() const {
|
| 323 |
+
__at_align__ c10::BFloat16 tmp[size()];
|
| 324 |
+
store(tmp);
|
| 325 |
+
for (const auto i : c10::irange(size())) {
|
| 326 |
+
if (_isnan(tmp[i]) || _isinf(tmp[i])) {
|
| 327 |
+
return true;
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
return false;
|
| 331 |
+
}
|
| 332 |
+
#define DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(name) \
|
| 333 |
+
Vectorized name() const { \
|
| 334 |
+
return map_with_vec_float_method(&Vectorized<float>::name); \
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
#define DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(name) \
|
| 338 |
+
Vectorized name(const Vectorized& other) const { \
|
| 339 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::name); \
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(abs)
|
| 343 |
+
Vectorized frac() const;
|
| 344 |
+
DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(neg)
|
| 345 |
+
DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(trunc)
|
| 346 |
+
DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(sqrt)
|
| 347 |
+
DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD(reciprocal)
|
| 348 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator==)
|
| 349 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator!=)
|
| 350 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator<)
|
| 351 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator<=)
|
| 352 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator>)
|
| 353 |
+
DEFINE_BINARY_COMPARISON_OPERATOR_VIA_FLOAT_METHOD(operator>=)
|
| 354 |
+
|
| 355 |
+
#undef DEFINE_UNARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD
|
| 356 |
+
#undef DEFINE_BINARY_ELEMENTWISE_FUNC_VIA_FLOAT_METHOD
|
| 357 |
+
|
| 358 |
+
Vectorized eq(const Vectorized& other) const;
|
| 359 |
+
Vectorized ne(const Vectorized& other) const;
|
| 360 |
+
Vectorized gt(const Vectorized& other) const;
|
| 361 |
+
Vectorized ge(const Vectorized& other) const;
|
| 362 |
+
Vectorized lt(const Vectorized& other) const;
|
| 363 |
+
Vectorized le(const Vectorized& other) const;
|
| 364 |
+
}; // Vectorized<c10::BFloat16>
|
| 365 |
+
|
| 366 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float(const Vectorized<c10::BFloat16>& a) {
|
| 367 |
+
static_assert(Vectorized<c10::BFloat16>::size() == 2 * Vectorized<float>::size());
|
| 368 |
+
at_bfloat16x8_t x = a;
|
| 369 |
+
float32x4_t x1 = Vectorized<c10::BFloat16>::convert_f32_bf16(at_vget_low_bf16(x));
|
| 370 |
+
float32x4_t x2 = Vectorized<c10::BFloat16>::convert_f32_bf16(at_vget_high_bf16(x));
|
| 371 |
+
return { Vectorized<float>(x1), Vectorized<float>(x2) };
|
| 372 |
+
}
|
| 373 |
+
inline Vectorized<c10::BFloat16> convert_float_bfloat16(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 374 |
+
static_assert(Vectorized<c10::BFloat16>::size() == 2 * Vectorized<float>::size());
|
| 375 |
+
at_bfloat16x4_t x1 = Vectorized<c10::BFloat16>::convert_bf16_f32(a);
|
| 376 |
+
at_bfloat16x4_t x2 = Vectorized<c10::BFloat16>::convert_bf16_f32(b);
|
| 377 |
+
return Vectorized<c10::BFloat16>(at_vcombine_bf16(x1, x2));
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
template <typename Op>
|
| 381 |
+
Vectorized<c10::BFloat16> binary_operator_via_float(
|
| 382 |
+
Op op,
|
| 383 |
+
const Vectorized<c10::BFloat16>& a,
|
| 384 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 385 |
+
const auto [a_float_low, a_float_high] = convert_bfloat16_float(a);
|
| 386 |
+
const auto [b_float_low, b_float_high] = convert_bfloat16_float(b);
|
| 387 |
+
return convert_float_bfloat16(
|
| 388 |
+
op(a_float_low, b_float_low),
|
| 389 |
+
op(a_float_high, b_float_high));
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
template <>
|
| 393 |
+
Vectorized<c10::BFloat16> inline operator+(
|
| 394 |
+
const Vectorized<c10::BFloat16>& a,
|
| 395 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 396 |
+
return binary_operator_via_float(std::plus<Vectorized<float>>(), a, b);
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
template <>
|
| 400 |
+
Vectorized<c10::BFloat16> inline operator-(
|
| 401 |
+
const Vectorized<c10::BFloat16>& a,
|
| 402 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 403 |
+
return binary_operator_via_float(std::minus<Vectorized<float>>(), a, b);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
template <>
|
| 407 |
+
Vectorized<c10::BFloat16> inline operator*(
|
| 408 |
+
const Vectorized<c10::BFloat16>& a,
|
| 409 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 410 |
+
return binary_operator_via_float(std::multiplies<Vectorized<float>>(), a, b);
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
template <>
|
| 414 |
+
Vectorized<c10::BFloat16> inline operator/(
|
| 415 |
+
const Vectorized<c10::BFloat16>& a,
|
| 416 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 417 |
+
return binary_operator_via_float(std::divides<Vectorized<float>>(), a, b);
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
// frac. Implement this here so we can use subtraction
|
| 421 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::frac() const {
|
| 422 |
+
return *this - this->trunc();
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
template <>
|
| 426 |
+
Vectorized<c10::BFloat16> inline maximum(
|
| 427 |
+
const Vectorized<c10::BFloat16>& a,
|
| 428 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 429 |
+
return binary_operator_via_float(
|
| 430 |
+
static_cast<Vectorized<float>(*)(const Vectorized<float>&, const Vectorized<float>&)>(&maximum),
|
| 431 |
+
a,
|
| 432 |
+
b);
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <>
|
| 436 |
+
Vectorized<c10::BFloat16> inline minimum(
|
| 437 |
+
const Vectorized<c10::BFloat16>& a,
|
| 438 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 439 |
+
return binary_operator_via_float(
|
| 440 |
+
static_cast<Vectorized<float>(*)(const Vectorized<float>&, const Vectorized<float>&)>(&minimum),
|
| 441 |
+
a,
|
| 442 |
+
b);
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
template <>
|
| 446 |
+
Vectorized<c10::BFloat16> inline clamp(
|
| 447 |
+
const Vectorized<c10::BFloat16>& a,
|
| 448 |
+
const Vectorized<c10::BFloat16>& min,
|
| 449 |
+
const Vectorized<c10::BFloat16>& max) {
|
| 450 |
+
return minimum(max, maximum(min, a));
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
template <>
|
| 454 |
+
Vectorized<c10::BFloat16> inline clamp_max(
|
| 455 |
+
const Vectorized<c10::BFloat16>& a,
|
| 456 |
+
const Vectorized<c10::BFloat16>& max) {
|
| 457 |
+
return minimum(max, a);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
template <>
|
| 461 |
+
Vectorized<c10::BFloat16> inline clamp_min(
|
| 462 |
+
const Vectorized<c10::BFloat16>& a,
|
| 463 |
+
const Vectorized<c10::BFloat16>& min) {
|
| 464 |
+
return maximum(min, a);
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
template <>
|
| 468 |
+
Vectorized<c10::BFloat16> inline operator&(
|
| 469 |
+
const Vectorized<c10::BFloat16>& a,
|
| 470 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 471 |
+
return Vectorized<c10::BFloat16>(at_vreinterpretq_bf16_u16(vandq_u16(
|
| 472 |
+
at_vreinterpretq_u16_bf16(a), at_vreinterpretq_u16_bf16(b))));
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
template <>
|
| 476 |
+
Vectorized<c10::BFloat16> inline operator|(
|
| 477 |
+
const Vectorized<c10::BFloat16>& a,
|
| 478 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 479 |
+
return Vectorized<c10::BFloat16>(at_vreinterpretq_bf16_u16(vorrq_u16(
|
| 480 |
+
at_vreinterpretq_u16_bf16(a), at_vreinterpretq_u16_bf16(b))));
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
template <>
|
| 484 |
+
Vectorized<c10::BFloat16> inline operator^(
|
| 485 |
+
const Vectorized<c10::BFloat16>& a,
|
| 486 |
+
const Vectorized<c10::BFloat16>& b) {
|
| 487 |
+
return Vectorized<c10::BFloat16>(at_vreinterpretq_bf16_u16(veorq_u16(
|
| 488 |
+
at_vreinterpretq_u16_bf16(a), at_vreinterpretq_u16_bf16(b))));
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::eq(
|
| 492 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 493 |
+
return (*this == other) & Vectorized<c10::BFloat16>(1);
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::ne(
|
| 497 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 498 |
+
return (*this != other) & Vectorized<c10::BFloat16>(1);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::gt(
|
| 502 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 503 |
+
return (*this > other) & Vectorized<c10::BFloat16>(1);
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::ge(
|
| 507 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 508 |
+
return (*this >= other) & Vectorized<c10::BFloat16>(1);
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::lt(
|
| 512 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 513 |
+
return (*this < other) & Vectorized<c10::BFloat16>(1);
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
inline Vectorized<c10::BFloat16> Vectorized<c10::BFloat16>::le(
|
| 517 |
+
const Vectorized<c10::BFloat16>& other) const {
|
| 518 |
+
return (*this <= other) & Vectorized<c10::BFloat16>(1);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
template <>
|
| 522 |
+
Vectorized<c10::BFloat16> inline fmadd(
|
| 523 |
+
const Vectorized<c10::BFloat16>& a,
|
| 524 |
+
const Vectorized<c10::BFloat16>& b,
|
| 525 |
+
const Vectorized<c10::BFloat16>& c) {
|
| 526 |
+
// NOTE [BF16 FMA]: There isn't an FMA that accumulates into BF16! Also,
|
| 527 |
+
// vbfmlalbq_f32 and vbfmlaltq_f32 take the even and odd-numbered
|
| 528 |
+
// elements, not the bottom and top half, so they don't seem
|
| 529 |
+
// particularly useful here. Ideally we would include dot product in
|
| 530 |
+
// the Vectorized interface...
|
| 531 |
+
const auto [a_float_low, a_float_high] = convert_bfloat16_float(a);
|
| 532 |
+
const auto [b_float_low, b_float_high] = convert_bfloat16_float(b);
|
| 533 |
+
const auto [c_float_low, c_float_high] = convert_bfloat16_float(c);
|
| 534 |
+
return convert_float_bfloat16(
|
| 535 |
+
fmadd(a_float_low, b_float_low, c_float_low),
|
| 536 |
+
fmadd(a_float_high, b_float_high, c_float_high));
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
template <>
|
| 540 |
+
Vectorized<c10::BFloat16> inline fmsub(
|
| 541 |
+
const Vectorized<c10::BFloat16>& a,
|
| 542 |
+
const Vectorized<c10::BFloat16>& b,
|
| 543 |
+
const Vectorized<c10::BFloat16>& c) {
|
| 544 |
+
// See NOTE [BF16 FMA] above.
|
| 545 |
+
const auto [a_float_low, a_float_high] = convert_bfloat16_float(a);
|
| 546 |
+
const auto [b_float_low, b_float_high] = convert_bfloat16_float(b);
|
| 547 |
+
const auto [c_float_low, c_float_high] = convert_bfloat16_float(c);
|
| 548 |
+
return convert_float_bfloat16(
|
| 549 |
+
fmsub(a_float_low, b_float_low, c_float_low),
|
| 550 |
+
fmsub(a_float_high, b_float_high, c_float_high));
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
#endif // !defined(C10_MOBILE) && defined(__aarch64__)
|
| 554 |
+
|
| 555 |
+
} // namespace CPU_CAPABILITY
|
| 556 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_convert.h
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 3 |
+
#include <ATen/cpu/vec/vec_convert.h>
|
| 4 |
+
|
| 5 |
+
namespace at::vec {
|
| 6 |
+
inline namespace CPU_CAPABILITY {
|
| 7 |
+
#if (defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256))
|
| 8 |
+
template <typename src_t>
|
| 9 |
+
struct VecConvert<
|
| 10 |
+
float,
|
| 11 |
+
1,
|
| 12 |
+
src_t,
|
| 13 |
+
1,
|
| 14 |
+
typename std::enable_if_t<is_8bit_integer_v<src_t>,
|
| 15 |
+
void>> {
|
| 16 |
+
static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
|
| 17 |
+
return convert_int8_half_register_to_float(src[0]);
|
| 18 |
+
}
|
| 19 |
+
};
|
| 20 |
+
template <typename src_t>
|
| 21 |
+
struct VecConvert<
|
| 22 |
+
float,
|
| 23 |
+
2,
|
| 24 |
+
src_t,
|
| 25 |
+
1,
|
| 26 |
+
typename std::enable_if_t<is_8bit_integer_v<src_t>,
|
| 27 |
+
void>> {
|
| 28 |
+
static inline VectorizedN<float, 2> apply(const VectorizedN<src_t, 1>& src) {
|
| 29 |
+
const auto [v0, v1] = convert_int8_to_float(src[0]);
|
| 30 |
+
return VectorizedN<float, 2>(v0, v1);
|
| 31 |
+
}
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
struct VecConvert<float, 2, BFloat16, 1> {
|
| 36 |
+
static inline VectorizedN<float, 2> apply(
|
| 37 |
+
const VectorizedN<BFloat16, 1>& src) {
|
| 38 |
+
VectorizedN<float, 2> result;
|
| 39 |
+
uint16x8_t u16_8 = vld1q_u16(reinterpret_cast<const uint16_t*>(&src[0]));
|
| 40 |
+
auto u16_low1 = vget_low_u16(u16_8);
|
| 41 |
+
auto u16_high1 = vget_high_u16(u16_8);
|
| 42 |
+
float32x4_t f32x4_0 = vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(u16_low1), 16));
|
| 43 |
+
float32x4_t f32x4_1 = vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(u16_high1), 16));
|
| 44 |
+
result[0] = f32x4_0;
|
| 45 |
+
result[1] = f32x4_1;
|
| 46 |
+
return result;
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
// Half register to full register.
|
| 50 |
+
template <>
|
| 51 |
+
struct VecConvert<float, 1, BFloat16, 1> {
|
| 52 |
+
static inline VectorizedN<float, 1> apply(
|
| 53 |
+
const VectorizedN<BFloat16, 1>& src) {
|
| 54 |
+
VectorizedN<float, 1> result;
|
| 55 |
+
uint16x4_t u16_8 = vld1_u16(reinterpret_cast<const uint16_t*>(&src[0]));
|
| 56 |
+
float32x4_t f32x4_0 = vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(u16_8), 16));
|
| 57 |
+
result[0] = f32x4_0;
|
| 58 |
+
return result;
|
| 59 |
+
}
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
#endif // defined(__aarch64__) && !defined(CPU_CAPABILITY_SVE256)
|
| 63 |
+
} // namespace CPU_CAPABILITY
|
| 64 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_float_neon.h
ADDED
|
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
|
| 11 |
+
#include <sleef.h>
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
// Sleef offers vectorized versions of some transcedentals
|
| 15 |
+
// such as sin, cos, tan etc..
|
| 16 |
+
// However for now opting for STL, since we are not building
|
| 17 |
+
// with Sleef for mobile yet.
|
| 18 |
+
|
| 19 |
+
namespace at::vec {
|
| 20 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 21 |
+
inline namespace CPU_CAPABILITY {
|
| 22 |
+
|
| 23 |
+
// Right now contains only aarch64 implementation.
|
| 24 |
+
// Due to follow two reasons aarch32 is not currently supported.
|
| 25 |
+
// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
|
| 26 |
+
// that work for aarch64 dont work for aarch32.
|
| 27 |
+
// 2. Android NDK r21 has problems with compiling aarch32.
|
| 28 |
+
// Clang seg faults.
|
| 29 |
+
// https://github.com/android/ndk/issues/1248
|
| 30 |
+
// https://bugs.llvm.org/show_bug.cgi?id=45824
|
| 31 |
+
// Most likely we will do aarch32 support with inline asm.
|
| 32 |
+
#if defined(__aarch64__)
|
| 33 |
+
|
| 34 |
+
#ifdef __BIG_ENDIAN__
|
| 35 |
+
#error "Big endian is not supported."
|
| 36 |
+
#endif
|
| 37 |
+
|
| 38 |
+
#if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
|
| 39 |
+
#define USE_SLEEF(sleef_code, non_sleef_code) sleef_code
|
| 40 |
+
#else
|
| 41 |
+
#define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
template<int index, bool mask_val>
|
| 45 |
+
struct BlendRegs {
|
| 46 |
+
static float32x4_t impl(
|
| 47 |
+
const float32x4_t& a, const float32x4_t& b, float32x4_t& res);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
template<int index>
|
| 51 |
+
struct BlendRegs<index, true>{
|
| 52 |
+
static float32x4_t impl(
|
| 53 |
+
const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
|
| 54 |
+
return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index);
|
| 55 |
+
}
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
template<int index>
|
| 59 |
+
struct BlendRegs<index, false>{
|
| 60 |
+
static float32x4_t impl(
|
| 61 |
+
const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
|
| 62 |
+
return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index);
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <> class Vectorized<float> {
|
| 67 |
+
private:
|
| 68 |
+
float32x4_t values;
|
| 69 |
+
public:
|
| 70 |
+
using value_type = float;
|
| 71 |
+
using size_type = int;
|
| 72 |
+
static constexpr size_type size() {
|
| 73 |
+
return 4;
|
| 74 |
+
}
|
| 75 |
+
Vectorized() {}
|
| 76 |
+
Vectorized(float32x4_t v) : values(v) {}
|
| 77 |
+
Vectorized(float val) : values{vdupq_n_f32(val)} {}
|
| 78 |
+
Vectorized(float val0, float val1, float val2, float val3) :
|
| 79 |
+
values{val0, val1, val2, val3} {}
|
| 80 |
+
Vectorized(float (&arr)[4]) : Vectorized(arr[0], arr[1], arr[2], arr[3]) {}
|
| 81 |
+
operator float32x4_t() const {
|
| 82 |
+
return values;
|
| 83 |
+
}
|
| 84 |
+
template <int64_t mask>
|
| 85 |
+
static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 86 |
+
Vectorized<float> vec;
|
| 87 |
+
vec.values =
|
| 88 |
+
BlendRegs<0, (mask & 0x01)!=0>::impl(
|
| 89 |
+
a.values, b.values, vec.values);
|
| 90 |
+
vec.values =
|
| 91 |
+
BlendRegs<1, (mask & 0x02)!=0>::impl(
|
| 92 |
+
a.values, b.values, vec.values);
|
| 93 |
+
vec.values =
|
| 94 |
+
BlendRegs<2, (mask & 0x04)!=0>::impl(
|
| 95 |
+
a.values, b.values, vec.values);
|
| 96 |
+
vec.values =
|
| 97 |
+
BlendRegs<3, (mask & 0x08)!=0>::impl(
|
| 98 |
+
a.values, b.values, vec.values);
|
| 99 |
+
return vec;
|
| 100 |
+
}
|
| 101 |
+
static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 102 |
+
const Vectorized<float>& mask) {
|
| 103 |
+
// TODO
|
| 104 |
+
// NB: This requires that each value, i.e., each uint value,
|
| 105 |
+
// of the mask either all be zeros or all be 1s.
|
| 106 |
+
// We perhaps need some kind of an assert?
|
| 107 |
+
// But that will affect performance.
|
| 108 |
+
Vectorized<float> vec(mask.values);
|
| 109 |
+
vec.values = vbslq_f32(
|
| 110 |
+
vreinterpretq_u32_f32(vec.values),
|
| 111 |
+
b.values,
|
| 112 |
+
a.values);
|
| 113 |
+
return vec;
|
| 114 |
+
}
|
| 115 |
+
template<typename step_t>
|
| 116 |
+
static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 117 |
+
const Vectorized<float> base_vec(base);
|
| 118 |
+
const Vectorized<float> step_vec(step);
|
| 119 |
+
const Vectorized<float> step_sizes(0, 1, 2, 3);
|
| 120 |
+
return fmadd(step_sizes, step_vec, base_vec);
|
| 121 |
+
}
|
| 122 |
+
static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 123 |
+
int64_t count = size()) {
|
| 124 |
+
switch (count) {
|
| 125 |
+
case 0:
|
| 126 |
+
return a;
|
| 127 |
+
case 1:
|
| 128 |
+
{
|
| 129 |
+
Vectorized<float> vec;
|
| 130 |
+
static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0};
|
| 131 |
+
vec.values = vreinterpretq_f32_u32(mask_low);
|
| 132 |
+
vec.values = vbslq_f32(
|
| 133 |
+
vreinterpretq_u32_f32(vec.values),
|
| 134 |
+
b.values,
|
| 135 |
+
a.values);
|
| 136 |
+
return vec;
|
| 137 |
+
}
|
| 138 |
+
case 2:
|
| 139 |
+
{
|
| 140 |
+
Vectorized<float> vec;
|
| 141 |
+
static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
|
| 142 |
+
vec.values = vreinterpretq_f32_u32(mask_low);
|
| 143 |
+
vec.values = vbslq_f32(
|
| 144 |
+
vreinterpretq_u32_f32(vec.values),
|
| 145 |
+
b.values,
|
| 146 |
+
a.values);
|
| 147 |
+
return vec;
|
| 148 |
+
}
|
| 149 |
+
case 3:
|
| 150 |
+
{
|
| 151 |
+
Vectorized<float> vec;
|
| 152 |
+
static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
|
| 153 |
+
vec.values = vreinterpretq_f32_u32(mask_low);
|
| 154 |
+
vec.values = vbslq_f32(
|
| 155 |
+
vreinterpretq_u32_f32(vec.values),
|
| 156 |
+
b.values,
|
| 157 |
+
a.values);
|
| 158 |
+
return vec;
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
return b;
|
| 162 |
+
}
|
| 163 |
+
static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
|
| 164 |
+
if (count == size()) {
|
| 165 |
+
return vld1q_f32(reinterpret_cast<const float*>(ptr));
|
| 166 |
+
} else {
|
| 167 |
+
__at_align__ float tmp_values[size()];
|
| 168 |
+
for (const auto i : c10::irange(size())) {
|
| 169 |
+
tmp_values[i] = 0.0;
|
| 170 |
+
}
|
| 171 |
+
std::memcpy(
|
| 172 |
+
tmp_values,
|
| 173 |
+
reinterpret_cast<const float*>(ptr),
|
| 174 |
+
count * sizeof(float));
|
| 175 |
+
return vld1q_f32(reinterpret_cast<const float*>(tmp_values));
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 179 |
+
if (count == size()) {
|
| 180 |
+
vst1q_f32(reinterpret_cast<float*>(ptr), values);
|
| 181 |
+
} else {
|
| 182 |
+
float tmp_values[size()];
|
| 183 |
+
vst1q_f32(reinterpret_cast<float*>(tmp_values), values);
|
| 184 |
+
std::memcpy(ptr, tmp_values, count * sizeof(float));
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
// Very slow implementation of indexing.
|
| 188 |
+
// Only required because vec256_qint refers to this.
|
| 189 |
+
// Once we specialize that implementation for ARM
|
| 190 |
+
// this should be removed. TODO (kimishpatel)
|
| 191 |
+
float operator[](int idx) const {
|
| 192 |
+
__at_align__ float tmp[size()];
|
| 193 |
+
store(tmp);
|
| 194 |
+
return tmp[idx];
|
| 195 |
+
}
|
| 196 |
+
float operator[](int idx) {
|
| 197 |
+
__at_align__ float tmp[size()];
|
| 198 |
+
store(tmp);
|
| 199 |
+
return tmp[idx];
|
| 200 |
+
}
|
| 201 |
+
// For boolean version where we want to if any 1/all zero
|
| 202 |
+
// etc. can be done faster in a different way.
|
| 203 |
+
int zero_mask() const {
|
| 204 |
+
__at_align__ float tmp[size()];
|
| 205 |
+
store(tmp);
|
| 206 |
+
int mask = 0;
|
| 207 |
+
for (int i = 0; i < size(); ++ i) {
|
| 208 |
+
if (tmp[i] == 0.f) {
|
| 209 |
+
mask |= (1 << i);
|
| 210 |
+
}
|
| 211 |
+
}
|
| 212 |
+
return mask;
|
| 213 |
+
}
|
| 214 |
+
Vectorized<float> isnan() const {
|
| 215 |
+
return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(values, values)));
|
| 216 |
+
}
|
| 217 |
+
bool has_inf_nan() const {
|
| 218 |
+
__at_align__ float tmp[size()];
|
| 219 |
+
store(tmp);
|
| 220 |
+
for (const auto i : c10::irange(size())) {
|
| 221 |
+
if(_isnan(tmp[i]) || _isinf(tmp[i])) {
|
| 222 |
+
return true;
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
return false;
|
| 226 |
+
}
|
| 227 |
+
Vectorized<float> map(float (*const f)(float)) const {
|
| 228 |
+
__at_align__ float tmp[size()];
|
| 229 |
+
store(tmp);
|
| 230 |
+
for (const auto i : c10::irange(size())) {
|
| 231 |
+
tmp[i] = f(tmp[i]);
|
| 232 |
+
}
|
| 233 |
+
return loadu(tmp);
|
| 234 |
+
}
|
| 235 |
+
Vectorized<float> map2(
|
| 236 |
+
const Vectorized<float>& second,
|
| 237 |
+
float (*const f)(float, float)) const {
|
| 238 |
+
__at_align__ float tmp[size()];
|
| 239 |
+
__at_align__ float tmp_second[size()];
|
| 240 |
+
store(tmp);
|
| 241 |
+
second.store(tmp_second);
|
| 242 |
+
for (const auto i : c10::irange(size())) {
|
| 243 |
+
tmp[i] = f(tmp[i], tmp_second[i]);
|
| 244 |
+
}
|
| 245 |
+
return loadu(tmp);
|
| 246 |
+
}
|
| 247 |
+
Vectorized<float> abs() const {
|
| 248 |
+
return Vectorized<float>(vabsq_f32(values));
|
| 249 |
+
}
|
| 250 |
+
Vectorized<float> angle() const {
|
| 251 |
+
auto zero = Vectorized<float>(0);
|
| 252 |
+
auto pi = Vectorized<float>(c10::pi<float>);
|
| 253 |
+
auto tmp = blendv(zero, pi, *this < zero);
|
| 254 |
+
return blendv(tmp, *this, isnan());
|
| 255 |
+
}
|
| 256 |
+
Vectorized<float> real() const {
|
| 257 |
+
return *this;
|
| 258 |
+
}
|
| 259 |
+
Vectorized<float> imag() const {
|
| 260 |
+
return Vectorized<float>(0.f);
|
| 261 |
+
}
|
| 262 |
+
Vectorized<float> conj() const {
|
| 263 |
+
return *this;
|
| 264 |
+
}
|
| 265 |
+
#define DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(name, sleef_name) \
|
| 266 |
+
Vectorized<float> name() const { \
|
| 267 |
+
return USE_SLEEF( \
|
| 268 |
+
Vectorized<float>(sleef_name(values)), \
|
| 269 |
+
map(std::name) \
|
| 270 |
+
); \
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
#define DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(name) \
|
| 274 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(name, Sleef_##name##f4_u10)
|
| 275 |
+
|
| 276 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(acos)
|
| 277 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(acosh)
|
| 278 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(asin)
|
| 279 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(atan)
|
| 280 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(atanh)
|
| 281 |
+
|
| 282 |
+
#define DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(name, sleef_name) \
|
| 283 |
+
Vectorized<float> name(const Vectorized<float> &arg) const { \
|
| 284 |
+
return USE_SLEEF( \
|
| 285 |
+
Vectorized<float>(sleef_name(values, arg.values)), \
|
| 286 |
+
map2(arg, std::name) \
|
| 287 |
+
); \
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
#define DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC(name) \
|
| 291 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(name, Sleef_##name##f4_u10)
|
| 292 |
+
|
| 293 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC(atan2)
|
| 294 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(copysign, Sleef_copysignf4)
|
| 295 |
+
Vectorized<float> erf() const;
|
| 296 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(erfc, Sleef_erfcf4_u15)
|
| 297 |
+
Vectorized<float> erfinv() const {
|
| 298 |
+
return map(calc_erfinv);
|
| 299 |
+
}
|
| 300 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(exp)
|
| 301 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(exp2)
|
| 302 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(expm1)
|
| 303 |
+
Vectorized<float> exp_u20() const {
|
| 304 |
+
return exp();
|
| 305 |
+
}
|
| 306 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(fmod, Sleef_fmodf4)
|
| 307 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(hypot, Sleef_hypotf4_u05)
|
| 308 |
+
Vectorized<float> i0() const {
|
| 309 |
+
return map(calc_i0);
|
| 310 |
+
}
|
| 311 |
+
Vectorized<float> i0e() const {
|
| 312 |
+
return map(calc_i0e);
|
| 313 |
+
}
|
| 314 |
+
Vectorized<float> digamma() const {
|
| 315 |
+
return map(calc_digamma);
|
| 316 |
+
}
|
| 317 |
+
Vectorized<float> igamma(const Vectorized<float> &x) const {
|
| 318 |
+
return map2(x, calc_igamma);
|
| 319 |
+
}
|
| 320 |
+
Vectorized<float> igammac(const Vectorized<float> &x) const {
|
| 321 |
+
return map2(x, calc_igammac);
|
| 322 |
+
}
|
| 323 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(log)
|
| 324 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(log10)
|
| 325 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(log1p)
|
| 326 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(log2)
|
| 327 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC_WITH_SLEEF_NAME(nextafter, Sleef_nextafterf4)
|
| 328 |
+
Vectorized<float> frac() const;
|
| 329 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(sin)
|
| 330 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(sinh)
|
| 331 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(cos)
|
| 332 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(cosh)
|
| 333 |
+
Vectorized<float> ceil() const {
|
| 334 |
+
return map(at::native::ceil_impl);
|
| 335 |
+
}
|
| 336 |
+
Vectorized<float> floor() const {
|
| 337 |
+
return map(at::native::floor_impl);
|
| 338 |
+
}
|
| 339 |
+
Vectorized<float> neg() const {
|
| 340 |
+
return Vectorized<float>(
|
| 341 |
+
vnegq_f32(values));
|
| 342 |
+
}
|
| 343 |
+
Vectorized<float> round() const {
|
| 344 |
+
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
|
| 345 |
+
return map(at::native::round_impl);
|
| 346 |
+
}
|
| 347 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(tan)
|
| 348 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(tanh)
|
| 349 |
+
Vectorized<float> trunc() const {
|
| 350 |
+
return Vectorized<float>(vrndq_f32(values));
|
| 351 |
+
}
|
| 352 |
+
DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC(lgamma)
|
| 353 |
+
Vectorized<float> sqrt() const {
|
| 354 |
+
return Vectorized<float>(vsqrtq_f32(values));
|
| 355 |
+
}
|
| 356 |
+
Vectorized<float> reciprocal() const {
|
| 357 |
+
return Vectorized<float>(vdivq_f32(vdupq_n_f32(1.0f), values));
|
| 358 |
+
}
|
| 359 |
+
Vectorized<float> rsqrt() const {
|
| 360 |
+
return this->sqrt().reciprocal();
|
| 361 |
+
}
|
| 362 |
+
DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC(pow)
|
| 363 |
+
Vectorized<float> operator==(const Vectorized<float>& other) const {
|
| 364 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vceqq_f32(values, other.values)));
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
Vectorized<float> operator!=(const Vectorized<float>& other) const {
|
| 368 |
+
float32x4_t r0 = vreinterpretq_f32_u32(
|
| 369 |
+
vmvnq_u32(vceqq_f32(values, other.values)));
|
| 370 |
+
return Vectorized<float>(r0);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
Vectorized<float> operator<(const Vectorized<float>& other) const {
|
| 374 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vcltq_f32(values, other.values)));
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
Vectorized<float> operator<=(const Vectorized<float>& other) const {
|
| 378 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vcleq_f32(values, other.values)));
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
Vectorized<float> operator>(const Vectorized<float>& other) const {
|
| 382 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vcgtq_f32(values, other.values)));
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
Vectorized<float> operator>=(const Vectorized<float>& other) const {
|
| 386 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vcgeq_f32(values, other.values)));
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
Vectorized<float> eq(const Vectorized<float>& other) const;
|
| 390 |
+
Vectorized<float> ne(const Vectorized<float>& other) const;
|
| 391 |
+
Vectorized<float> gt(const Vectorized<float>& other) const;
|
| 392 |
+
Vectorized<float> ge(const Vectorized<float>& other) const;
|
| 393 |
+
Vectorized<float> lt(const Vectorized<float>& other) const;
|
| 394 |
+
Vectorized<float> le(const Vectorized<float>& other) const;
|
| 395 |
+
};
|
| 396 |
+
|
| 397 |
+
template <>
|
| 398 |
+
Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 399 |
+
return Vectorized<float>(vaddq_f32(a, b));
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
template <>
|
| 403 |
+
Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 404 |
+
return Vectorized<float>(vsubq_f32(a, b));
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
template <>
|
| 408 |
+
Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 409 |
+
return Vectorized<float>(vmulq_f32(a, b));
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
template <>
|
| 413 |
+
Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 414 |
+
return Vectorized<float>(vdivq_f32(a, b));
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
// frac. Implement this here so we can use subtraction
|
| 418 |
+
inline Vectorized<float> Vectorized<float>::frac() const {
|
| 419 |
+
return *this - this->trunc();
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
//Added sleef Implementation for Maximum
|
| 423 |
+
Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 424 |
+
if(!a.has_inf_nan() && !b.has_inf_nan()){
|
| 425 |
+
return USE_SLEEF(
|
| 426 |
+
Vectorized<float>(Sleef_fmaxf4(a, b)),
|
| 427 |
+
Vectorized<float>(vmaxq_f32(a,b)));
|
| 428 |
+
}
|
| 429 |
+
else{
|
| 430 |
+
return Vectorized<float>(vmaxq_f32(a, b));
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 435 |
+
// either input is a NaN.
|
| 436 |
+
template <>
|
| 437 |
+
Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 438 |
+
return Vectorized<float>(vminq_f32(a, b));
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
template <>
|
| 442 |
+
Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
|
| 443 |
+
return minimum(max, maximum(min, a));
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
template <>
|
| 447 |
+
Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
|
| 448 |
+
return minimum(max, a);
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
template <>
|
| 452 |
+
Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
|
| 453 |
+
return maximum(min, a);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
template <>
|
| 457 |
+
Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 458 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vandq_u32(
|
| 459 |
+
vreinterpretq_u32_f32(a),
|
| 460 |
+
vreinterpretq_u32_f32(b))));
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
template <>
|
| 464 |
+
Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 465 |
+
return Vectorized<float>(vreinterpretq_f32_u32(vorrq_u32(
|
| 466 |
+
vreinterpretq_u32_f32(a),
|
| 467 |
+
vreinterpretq_u32_f32(b))));
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
template <>
|
| 471 |
+
Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 472 |
+
return Vectorized<float>(vreinterpretq_f32_u32(veorq_u32(
|
| 473 |
+
vreinterpretq_u32_f32(a),
|
| 474 |
+
vreinterpretq_u32_f32(b))));
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
|
| 478 |
+
return (*this == other) & Vectorized<float>(1.0f);
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
|
| 482 |
+
return (*this != other) & Vectorized<float>(1.0f);
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
|
| 486 |
+
return (*this > other) & Vectorized<float>(1.0f);
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
|
| 490 |
+
return (*this >= other) & Vectorized<float>(1.0f);
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
|
| 494 |
+
return (*this < other) & Vectorized<float>(1.0f);
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
|
| 498 |
+
return (*this <= other) & Vectorized<float>(1.0f);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
template <>
|
| 502 |
+
inline void convert(const float* src, int32_t* dst, int64_t n) {
|
| 503 |
+
int64_t i;
|
| 504 |
+
#ifndef __msvc_cl__
|
| 505 |
+
#pragma unroll
|
| 506 |
+
#endif
|
| 507 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 508 |
+
vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
|
| 509 |
+
}
|
| 510 |
+
#ifndef __msvc_cl__
|
| 511 |
+
#pragma unroll
|
| 512 |
+
#endif
|
| 513 |
+
for (; i < n; i++) {
|
| 514 |
+
dst[i] = static_cast<int32_t>(src[i]);
|
| 515 |
+
}
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
template <>
|
| 519 |
+
inline void convert(const int32_t* src, float* dst, int64_t n) {
|
| 520 |
+
int64_t i;
|
| 521 |
+
#ifndef __msvc_cl__
|
| 522 |
+
#pragma unroll
|
| 523 |
+
#endif
|
| 524 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 525 |
+
vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
|
| 526 |
+
}
|
| 527 |
+
#ifndef __msvc_cl__
|
| 528 |
+
#pragma unroll
|
| 529 |
+
#endif
|
| 530 |
+
for (; i < n; i++) {
|
| 531 |
+
dst[i] = static_cast<float>(src[i]);
|
| 532 |
+
}
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
template <>
|
| 536 |
+
Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 537 |
+
return Vectorized<float>(vfmaq_f32(c, a, b));
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
template <>
|
| 541 |
+
Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 542 |
+
return Vectorized<float>(vfmsq_f32(c, a, b));
|
| 543 |
+
}
|
| 544 |
+
|
| 545 |
+
inline Vectorized<float> Vectorized<float>::erf() const{
|
| 546 |
+
// constants
|
| 547 |
+
const Vectorized<float> neg_zero_vec(-0.f);
|
| 548 |
+
const Vectorized<float> one_vec(1.0f);
|
| 549 |
+
const Vectorized<float> p(0.3275911f);
|
| 550 |
+
const Vectorized<float> p1(0.254829592f);
|
| 551 |
+
const Vectorized<float> p2(-0.284496736f);
|
| 552 |
+
const Vectorized<float> p3(1.421413741f);
|
| 553 |
+
const Vectorized<float> p4(-1.453152027f);
|
| 554 |
+
const Vectorized<float> p5(1.061405429f);
|
| 555 |
+
// sign(x)
|
| 556 |
+
auto sign_mask = neg_zero_vec & *this;
|
| 557 |
+
auto abs_vec = this->abs();
|
| 558 |
+
// t = 1 / (p * abs(x) + 1)
|
| 559 |
+
auto tmp0 = fmadd(p, abs_vec, one_vec);
|
| 560 |
+
auto t = one_vec / tmp0;
|
| 561 |
+
// r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
|
| 562 |
+
auto tmp1 = fmadd(p5, t, p4);
|
| 563 |
+
auto tmp2 = fmadd(tmp1, t, p3);
|
| 564 |
+
auto tmp3 = fmadd(tmp2, t, p2);
|
| 565 |
+
auto r = fmadd(tmp3, t, p1);
|
| 566 |
+
// - exp(- x * x)
|
| 567 |
+
auto pow_2 = (*this) * (*this);
|
| 568 |
+
auto neg_pow_2 = pow_2 ^ neg_zero_vec;
|
| 569 |
+
auto tmp4 = neg_pow_2.map(std::exp); // This can be swapped for a faster implementation of exp.
|
| 570 |
+
auto tmp5 = tmp4 ^ neg_zero_vec;
|
| 571 |
+
// erf(x) = sign(x) * (1 - r * t * exp(- x * x))
|
| 572 |
+
auto tmp6 = t * tmp5;
|
| 573 |
+
auto tmp7 = fmadd(tmp6, r, one_vec);
|
| 574 |
+
return tmp7 ^ sign_mask;
|
| 575 |
+
}
|
| 576 |
+
#undef DEFINE_SLEEF_COMPATIBLE_BINARY_ELEMENTWISE_FUNC
|
| 577 |
+
#undef DEFINE_SLEEF_COMPATIBLE_UNARY_ELEMENTWISE_FUNC
|
| 578 |
+
#endif /* defined(aarch64) */
|
| 579 |
+
|
| 580 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_half_neon.h
ADDED
|
@@ -0,0 +1,603 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec128/vec128_convert.h>
|
| 8 |
+
#include <ATen/cpu/vec/vec128/vec128_float_neon.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 11 |
+
#include <c10/util/Half.h>
|
| 12 |
+
#include <c10/util/irange.h>
|
| 13 |
+
|
| 14 |
+
namespace at::vec {
|
| 15 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 16 |
+
inline namespace CPU_CAPABILITY {
|
| 17 |
+
|
| 18 |
+
// Right now contains only aarch64 implementation.
|
| 19 |
+
// Due to follow two reasons aarch32 is not currently supported.
|
| 20 |
+
// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
|
| 21 |
+
// that work for aarch64 dont work for aarch32.
|
| 22 |
+
// 2. Android NDK r21 has problems with compiling aarch32.
|
| 23 |
+
// Clang seg faults.
|
| 24 |
+
// https://github.com/android/ndk/issues/1248
|
| 25 |
+
// https://bugs.llvm.org/show_bug.cgi?id=45824
|
| 26 |
+
// Most likely we will do aarch32 support with inline asm.
|
| 27 |
+
#if !defined(C10_MOBILE) && defined(__aarch64__)
|
| 28 |
+
|
| 29 |
+
#ifdef __BIG_ENDIAN__
|
| 30 |
+
#error "Big endian is not supported."
|
| 31 |
+
#endif
|
| 32 |
+
|
| 33 |
+
template <int index, bool mask_val>
|
| 34 |
+
struct BlendHalfRegs {
|
| 35 |
+
static float16x8_t impl(
|
| 36 |
+
const float16x8_t& a,
|
| 37 |
+
const float16x8_t& b,
|
| 38 |
+
float16x8_t& res);
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
template <int index>
|
| 42 |
+
struct BlendHalfRegs<index, true> {
|
| 43 |
+
static float16x8_t impl(
|
| 44 |
+
const float16x8_t& a,
|
| 45 |
+
const float16x8_t& b,
|
| 46 |
+
float16x8_t& res) {
|
| 47 |
+
return vsetq_lane_f16(vgetq_lane_f16(b, index), res, index);
|
| 48 |
+
}
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
template <int index>
|
| 52 |
+
struct BlendHalfRegs<index, false> {
|
| 53 |
+
static float16x8_t impl(
|
| 54 |
+
const float16x8_t& a,
|
| 55 |
+
const float16x8_t& b,
|
| 56 |
+
float16x8_t& res) {
|
| 57 |
+
return vsetq_lane_f16(vgetq_lane_f16(a, index), res, index);
|
| 58 |
+
}
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
// On ARM, Half type supports float16_t->Half constructor and Half->float16_t
|
| 62 |
+
// conversion
|
| 63 |
+
template <>
|
| 64 |
+
class Vectorized<c10::Half> : public Vectorized16<float16x8_t, c10::Half, BlendHalfRegs, Vectorized<c10::Half>> {
|
| 65 |
+
using Base = Vectorized16<float16x8_t, c10::Half, BlendHalfRegs, Vectorized<c10::Half>>;
|
| 66 |
+
friend Base;
|
| 67 |
+
private:
|
| 68 |
+
// We use these private map functions to implement various methods
|
| 69 |
+
Vectorized<c10::Half> map_with_vec_float_method(
|
| 70 |
+
Vectorized<float> (Vectorized<float>::*m)() const) const {
|
| 71 |
+
float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values));
|
| 72 |
+
float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values));
|
| 73 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)();
|
| 74 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)();
|
| 75 |
+
float16x4_t r00 = vcvt_f16_f32(mv0);
|
| 76 |
+
float16x4_t r01 = vcvt_f16_f32(mv1);
|
| 77 |
+
return Vectorized<c10::Half>(vcombine_f16(r00, r01));
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
Vectorized<c10::Half> map2_with_vec_float_method(
|
| 81 |
+
const Vectorized<c10::Half>& second,
|
| 82 |
+
Vectorized<float> (Vectorized<float>::*m)(const Vectorized<float>&)
|
| 83 |
+
const) const {
|
| 84 |
+
float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values));
|
| 85 |
+
float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values));
|
| 86 |
+
float32x4_t second_v00 = vcvt_f32_f16(vget_low_f16(second.values));
|
| 87 |
+
float32x4_t second_v01 = vcvt_f32_f16(vget_high_f16(second.values));
|
| 88 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)(Vectorized<float>(second_v00));
|
| 89 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)(Vectorized<float>(second_v01));
|
| 90 |
+
float16x4_t r00 = vcvt_f16_f32(mv0);
|
| 91 |
+
float16x4_t r01 = vcvt_f16_f32(mv1);
|
| 92 |
+
|
| 93 |
+
// Pack result into Vectorized<c10::Half>
|
| 94 |
+
return Vectorized<c10::Half>(vcombine_f16(r00, r01));
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
Vectorized<c10::Half> map2_bitmask_with_vec_float_method(
|
| 98 |
+
const Vectorized<c10::Half>& second,
|
| 99 |
+
Vectorized<float> (Vectorized<float>::*m)(const Vectorized<float>&)
|
| 100 |
+
const) const {
|
| 101 |
+
float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values));
|
| 102 |
+
float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values));
|
| 103 |
+
float32x4_t second_v00 = vcvt_f32_f16(vget_low_f16(second.values));
|
| 104 |
+
float32x4_t second_v01 = vcvt_f32_f16(vget_high_f16(second.values));
|
| 105 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00).*m)(Vectorized<float>(second_v00));
|
| 106 |
+
Vectorized<float> mv1 = (Vectorized<float>(v01).*m)(Vectorized<float>(second_v01));
|
| 107 |
+
// Assume the operator returns a bitmask, not "real" floats, and
|
| 108 |
+
// just narrow the bits. All-ones is a NaN and will get mangled by conversion!
|
| 109 |
+
float16x4_t r00 = vreinterpret_f16_u16(vmovn_u32(vreinterpretq_u32_f32(mv0)));
|
| 110 |
+
float16x4_t r01 = vreinterpret_f16_u16(vmovn_u32(vreinterpretq_u32_f32(mv1)));
|
| 111 |
+
|
| 112 |
+
// Pack result into Vectorized<c10::Half>
|
| 113 |
+
return Vectorized<c10::Half>(vcombine_f16(r00, r01));
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
public:
|
| 117 |
+
using Vectorized16::Vectorized16;
|
| 118 |
+
|
| 119 |
+
Vectorized() = default;
|
| 120 |
+
|
| 121 |
+
// A ctor that accepts c10::Half is needed to fit interface with vec_base.h
|
| 122 |
+
// A second constructor that takes float16_t is also included
|
| 123 |
+
Vectorized(c10::Half val)
|
| 124 |
+
: Vectorized((float16_t)val) {}
|
| 125 |
+
Vectorized(float16_t val)
|
| 126 |
+
: Vectorized16(vdupq_n_f16(val)) {}
|
| 127 |
+
Vectorized(
|
| 128 |
+
value_type val0,
|
| 129 |
+
value_type val1,
|
| 130 |
+
value_type val2,
|
| 131 |
+
value_type val3,
|
| 132 |
+
value_type val4,
|
| 133 |
+
value_type val5,
|
| 134 |
+
value_type val6,
|
| 135 |
+
value_type val7)
|
| 136 |
+
: Vectorized16(float16x8_t{
|
| 137 |
+
val0,
|
| 138 |
+
val1,
|
| 139 |
+
val2,
|
| 140 |
+
val3,
|
| 141 |
+
val4,
|
| 142 |
+
val5,
|
| 143 |
+
val6,
|
| 144 |
+
val7}) {}
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
static Vectorized<c10::Half> blendv(
|
| 148 |
+
const Vectorized<c10::Half>& a,
|
| 149 |
+
const Vectorized<c10::Half>& b,
|
| 150 |
+
const Vectorized<c10::Half>& mask) {
|
| 151 |
+
// Note: using blendv is very awkward because 0xFFFF is one of
|
| 152 |
+
// many NaN's in FP16 It's unfortunate that the mask has type Half
|
| 153 |
+
// (required from vec_base)
|
| 154 |
+
|
| 155 |
+
// TODO
|
| 156 |
+
// NB: This requires that each value, i.e., each uint value,
|
| 157 |
+
// of the mask either all be zeros or all be 1s.
|
| 158 |
+
// We perhaps need some kind of an assert?
|
| 159 |
+
// But that will affect performance.
|
| 160 |
+
|
| 161 |
+
// NOTE [vbslq_f16]: vbslq_f16 doesn't work on clang without
|
| 162 |
+
// __ARM_FEATURE_FP16_VECTOR_ARITHMETIC. vbslq_u16 generates the
|
| 163 |
+
// same instruction anyway. see https://godbolt.org/z/cY4a55Y7P
|
| 164 |
+
Vectorized<c10::Half> vec(mask.values);
|
| 165 |
+
vec.values = vreinterpretq_f16_u16(
|
| 166 |
+
vbslq_u16(
|
| 167 |
+
vreinterpretq_u16_f16(vec.values),
|
| 168 |
+
vreinterpretq_u16_f16(b.values),
|
| 169 |
+
vreinterpretq_u16_f16(a.values)));
|
| 170 |
+
return vec;
|
| 171 |
+
}
|
| 172 |
+
static Vectorized<c10::Half> set(
|
| 173 |
+
const Vectorized<c10::Half>& a,
|
| 174 |
+
const Vectorized<c10::Half>& b,
|
| 175 |
+
int64_t count = size()) {
|
| 176 |
+
uint16_t pre_mask[size()] = {0};
|
| 177 |
+
for (int i = 0; i < count; i++) {
|
| 178 |
+
pre_mask[i] = 0xFFFF;
|
| 179 |
+
}
|
| 180 |
+
uint16x8_t mask = vld1q_u16(pre_mask);
|
| 181 |
+
|
| 182 |
+
// Using blendv is awkward because 0xFFFF is one of many NaN's in FP16
|
| 183 |
+
// so we directly use vbslq_u16 instead. (See NOTE [vbslq_f16] above.)
|
| 184 |
+
Vectorized<c10::Half> vec(
|
| 185 |
+
vreinterpretq_f16_u16(
|
| 186 |
+
vbslq_u16(
|
| 187 |
+
mask,
|
| 188 |
+
vreinterpretq_u16_f16(b.values),
|
| 189 |
+
vreinterpretq_u16_f16(a.values))));
|
| 190 |
+
|
| 191 |
+
return vec;
|
| 192 |
+
}
|
| 193 |
+
static Vectorized<c10::Half> loadu(const void* ptr, int64_t count = size()) {
|
| 194 |
+
if (count == size()) {
|
| 195 |
+
return vld1q_f16(reinterpret_cast<const float16_t*>(ptr));
|
| 196 |
+
}
|
| 197 |
+
__at_align__ float16_t tmp_values[size()];
|
| 198 |
+
for (const auto i : c10::irange(size())) {
|
| 199 |
+
tmp_values[i] = 0;
|
| 200 |
+
}
|
| 201 |
+
std::memcpy(
|
| 202 |
+
tmp_values,
|
| 203 |
+
reinterpret_cast<const float16_t*>(ptr),
|
| 204 |
+
count * sizeof(float16_t));
|
| 205 |
+
return vld1q_f16(reinterpret_cast<const float16_t*>(tmp_values));
|
| 206 |
+
}
|
| 207 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 208 |
+
if (count == size()) {
|
| 209 |
+
vst1q_f16(reinterpret_cast<float16_t*>(ptr), values);
|
| 210 |
+
return;
|
| 211 |
+
} else {
|
| 212 |
+
float16_t tmp_values[size()];
|
| 213 |
+
vst1q_f16(reinterpret_cast<float16_t*>(tmp_values), values);
|
| 214 |
+
std::memcpy(ptr, tmp_values, count * sizeof(float16_t));
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
// For boolean version where we want to if any 1/all zero
|
| 218 |
+
// etc. can be done faster in a different way.
|
| 219 |
+
Vectorized<c10::Half> isnan() const {
|
| 220 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 221 |
+
return vreinterpretq_f16_u16(vmvnq_u16(vceqq_f16(values, values)));
|
| 222 |
+
#else
|
| 223 |
+
// NOTE: we could make this faster by doing vectorized checks of
|
| 224 |
+
// exponent/payload bits.
|
| 225 |
+
__at_align__ c10::Half tmp[size()];
|
| 226 |
+
__at_align__ c10::Half res[size()];
|
| 227 |
+
store(tmp);
|
| 228 |
+
for (const auto i : c10::irange(size())) {
|
| 229 |
+
if (_isnan(tmp[i])) {
|
| 230 |
+
std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(c10::Half));
|
| 231 |
+
} else {
|
| 232 |
+
std::memset(static_cast<void*>(&res[i]), 0, sizeof(c10::Half));
|
| 233 |
+
}
|
| 234 |
+
}
|
| 235 |
+
return loadu(res);
|
| 236 |
+
#endif
|
| 237 |
+
}
|
| 238 |
+
bool has_inf_nan() const {
|
| 239 |
+
__at_align__ c10::Half tmp[size()];
|
| 240 |
+
store(tmp);
|
| 241 |
+
for (const auto i : c10::irange(size())) {
|
| 242 |
+
if (_isnan(tmp[i]) || _isinf(tmp[i])) {
|
| 243 |
+
return true;
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
return false;
|
| 247 |
+
}
|
| 248 |
+
Vectorized<c10::Half> abs() const {
|
| 249 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 250 |
+
return Vectorized<c10::Half>(vabsq_f16(values));
|
| 251 |
+
#else
|
| 252 |
+
return map_with_vec_float_method(&Vectorized<float>::abs);
|
| 253 |
+
#endif
|
| 254 |
+
}
|
| 255 |
+
Vectorized<c10::Half> frac() const;
|
| 256 |
+
Vectorized<c10::Half> neg() const {
|
| 257 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 258 |
+
return Vectorized<c10::Half>(vnegq_f16(values));
|
| 259 |
+
#else
|
| 260 |
+
return map_with_vec_float_method(&Vectorized<float>::neg);
|
| 261 |
+
#endif
|
| 262 |
+
}
|
| 263 |
+
Vectorized<c10::Half> trunc() const {
|
| 264 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 265 |
+
return Vectorized<c10::Half>(vrndq_f16(values));
|
| 266 |
+
#else
|
| 267 |
+
return map_with_vec_float_method(&Vectorized<float>::trunc);
|
| 268 |
+
#endif
|
| 269 |
+
}
|
| 270 |
+
Vectorized<c10::Half> sqrt() const {
|
| 271 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 272 |
+
return Vectorized<c10::Half>(vsqrtq_f16(values));
|
| 273 |
+
#else
|
| 274 |
+
return map_with_vec_float_method(&Vectorized<float>::sqrt);
|
| 275 |
+
#endif
|
| 276 |
+
}
|
| 277 |
+
Vectorized<c10::Half> reciprocal() const {
|
| 278 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 279 |
+
auto ones = vdupq_n_f16(1.0f);
|
| 280 |
+
return Vectorized<c10::Half>(vdivq_f16(ones, values));
|
| 281 |
+
#else
|
| 282 |
+
return map_with_vec_float_method(&Vectorized<float>::reciprocal);
|
| 283 |
+
#endif
|
| 284 |
+
}
|
| 285 |
+
Vectorized<c10::Half> operator==(const Vectorized<c10::Half>& other) const {
|
| 286 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 287 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vceqq_f16(values, other.values)));
|
| 288 |
+
#else
|
| 289 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator==);
|
| 290 |
+
#endif
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
Vectorized<c10::Half> operator!=(const Vectorized<c10::Half>& other) const {
|
| 294 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 295 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(
|
| 296 |
+
vmvnq_u16(vceqq_f16(values, other.values))));
|
| 297 |
+
#else
|
| 298 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator!=);
|
| 299 |
+
#endif
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
Vectorized<c10::Half> operator<(const Vectorized<c10::Half>& other) const {
|
| 303 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 304 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vcltq_f16(values, other.values)));
|
| 305 |
+
#else
|
| 306 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator<);
|
| 307 |
+
#endif
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
Vectorized<c10::Half> operator<=(const Vectorized<c10::Half>& other) const {
|
| 311 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 312 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vcleq_f16(values, other.values)));
|
| 313 |
+
#else
|
| 314 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator<=);
|
| 315 |
+
#endif
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
Vectorized<c10::Half> operator>(const Vectorized<c10::Half>& other) const {
|
| 319 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 320 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vcgtq_f16(values, other.values)));
|
| 321 |
+
#else
|
| 322 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator>);
|
| 323 |
+
#endif
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
Vectorized<c10::Half> operator>=(const Vectorized<c10::Half>& other) const {
|
| 327 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 328 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vcgeq_f16(values, other.values)));
|
| 329 |
+
#else
|
| 330 |
+
return map2_bitmask_with_vec_float_method(other, &Vectorized<float>::operator>=);
|
| 331 |
+
#endif
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
Vectorized<c10::Half> eq(const Vectorized<c10::Half>& other) const;
|
| 335 |
+
Vectorized<c10::Half> ne(const Vectorized<c10::Half>& other) const;
|
| 336 |
+
Vectorized<c10::Half> gt(const Vectorized<c10::Half>& other) const;
|
| 337 |
+
Vectorized<c10::Half> ge(const Vectorized<c10::Half>& other) const;
|
| 338 |
+
Vectorized<c10::Half> lt(const Vectorized<c10::Half>& other) const;
|
| 339 |
+
Vectorized<c10::Half> le(const Vectorized<c10::Half>& other) const;
|
| 340 |
+
}; // Vectorized<Half>
|
| 341 |
+
|
| 342 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(const Vectorized<Half>& a) {
|
| 343 |
+
static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
|
| 344 |
+
float16x8_t x = a;
|
| 345 |
+
float32x4_t x1 = vcvt_f32_f16(vget_low_f16(x));
|
| 346 |
+
float32x4_t x2 = vcvt_f32_f16(vget_high_f16(x));
|
| 347 |
+
return { Vectorized<float>(x1), Vectorized<float>(x2) };
|
| 348 |
+
}
|
| 349 |
+
inline Vectorized<Half> convert_float_half(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 350 |
+
static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
|
| 351 |
+
float32x4_t x = a;
|
| 352 |
+
float32x4_t y = b;
|
| 353 |
+
float16x4_t x1 = vcvt_f16_f32(x);
|
| 354 |
+
float16x4_t x2 = vcvt_f16_f32(y);
|
| 355 |
+
return Vectorized<Half>(vcombine_f16(x1, x2));
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
template <typename Op>
|
| 359 |
+
Vectorized<c10::Half> binary_operator_via_float(
|
| 360 |
+
Op op,
|
| 361 |
+
const Vectorized<c10::Half>& a,
|
| 362 |
+
const Vectorized<c10::Half>& b) {
|
| 363 |
+
const auto [a_float_low, a_float_high] = convert_half_float(a);
|
| 364 |
+
const auto [b_float_low, b_float_high] = convert_half_float(b);
|
| 365 |
+
return convert_float_half(
|
| 366 |
+
op(a_float_low, b_float_low),
|
| 367 |
+
op(a_float_high, b_float_high));
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
template <>
|
| 371 |
+
Vectorized<c10::Half> inline operator+(
|
| 372 |
+
const Vectorized<c10::Half>& a,
|
| 373 |
+
const Vectorized<c10::Half>& b) {
|
| 374 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 375 |
+
return Vectorized<c10::Half>(vaddq_f16(a, b));
|
| 376 |
+
#else
|
| 377 |
+
return binary_operator_via_float(std::plus<Vectorized<float>>(), a, b);
|
| 378 |
+
#endif
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
template <>
|
| 382 |
+
Vectorized<c10::Half> inline operator-(
|
| 383 |
+
const Vectorized<c10::Half>& a,
|
| 384 |
+
const Vectorized<c10::Half>& b) {
|
| 385 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 386 |
+
return Vectorized<c10::Half>(vsubq_f16(a, b));
|
| 387 |
+
#else
|
| 388 |
+
return binary_operator_via_float(std::minus<Vectorized<float>>(), a, b);
|
| 389 |
+
#endif
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
template <>
|
| 393 |
+
Vectorized<c10::Half> inline operator*(
|
| 394 |
+
const Vectorized<c10::Half>& a,
|
| 395 |
+
const Vectorized<c10::Half>& b) {
|
| 396 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 397 |
+
return Vectorized<c10::Half>(vmulq_f16(a, b));
|
| 398 |
+
#else
|
| 399 |
+
return binary_operator_via_float(std::multiplies<Vectorized<float>>(), a, b);
|
| 400 |
+
#endif
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
template <>
|
| 404 |
+
Vectorized<c10::Half> inline operator/(
|
| 405 |
+
const Vectorized<c10::Half>& a,
|
| 406 |
+
const Vectorized<c10::Half>& b) {
|
| 407 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 408 |
+
return Vectorized<c10::Half>(vdivq_f16(a, b));
|
| 409 |
+
#else
|
| 410 |
+
return binary_operator_via_float(std::divides<Vectorized<float>>(), a, b);
|
| 411 |
+
#endif
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
// frac. Implement this here so we can use subtraction
|
| 415 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::frac() const {
|
| 416 |
+
return *this - this->trunc();
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 420 |
+
// either input is a NaN.
|
| 421 |
+
template <>
|
| 422 |
+
Vectorized<c10::Half> inline maximum(
|
| 423 |
+
const Vectorized<c10::Half>& a,
|
| 424 |
+
const Vectorized<c10::Half>& b) {
|
| 425 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 426 |
+
return Vectorized<c10::Half>(vmaxq_f16(a, b));
|
| 427 |
+
#else
|
| 428 |
+
return binary_operator_via_float(
|
| 429 |
+
static_cast<Vectorized<float>(*)(const Vectorized<float>&, const Vectorized<float>&)>(&maximum),
|
| 430 |
+
a,
|
| 431 |
+
b);
|
| 432 |
+
#endif
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 436 |
+
// either input is a NaN.
|
| 437 |
+
template <>
|
| 438 |
+
Vectorized<c10::Half> inline minimum(
|
| 439 |
+
const Vectorized<c10::Half>& a,
|
| 440 |
+
const Vectorized<c10::Half>& b) {
|
| 441 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 442 |
+
return Vectorized<c10::Half>(vminq_f16(a, b));
|
| 443 |
+
#else
|
| 444 |
+
return binary_operator_via_float(
|
| 445 |
+
static_cast<Vectorized<float>(*)(const Vectorized<float>&, const Vectorized<float>&)>(&minimum),
|
| 446 |
+
a,
|
| 447 |
+
b);
|
| 448 |
+
#endif
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
template <>
|
| 452 |
+
Vectorized<c10::Half> inline clamp(
|
| 453 |
+
const Vectorized<c10::Half>& a,
|
| 454 |
+
const Vectorized<c10::Half>& min,
|
| 455 |
+
const Vectorized<c10::Half>& max) {
|
| 456 |
+
return minimum(max, maximum(min, a));
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
template <>
|
| 460 |
+
Vectorized<c10::Half> inline clamp_max(
|
| 461 |
+
const Vectorized<c10::Half>& a,
|
| 462 |
+
const Vectorized<c10::Half>& max) {
|
| 463 |
+
return minimum(max, a);
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
template <>
|
| 467 |
+
Vectorized<c10::Half> inline clamp_min(
|
| 468 |
+
const Vectorized<c10::Half>& a,
|
| 469 |
+
const Vectorized<c10::Half>& min) {
|
| 470 |
+
return maximum(min, a);
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
template <>
|
| 474 |
+
Vectorized<c10::Half> inline operator&(
|
| 475 |
+
const Vectorized<c10::Half>& a,
|
| 476 |
+
const Vectorized<c10::Half>& b) {
|
| 477 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vandq_u16(
|
| 478 |
+
vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))));
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template <>
|
| 482 |
+
Vectorized<c10::Half> inline operator|(
|
| 483 |
+
const Vectorized<c10::Half>& a,
|
| 484 |
+
const Vectorized<c10::Half>& b) {
|
| 485 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(vorrq_u16(
|
| 486 |
+
vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))));
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
template <>
|
| 490 |
+
Vectorized<c10::Half> inline operator^(
|
| 491 |
+
const Vectorized<c10::Half>& a,
|
| 492 |
+
const Vectorized<c10::Half>& b) {
|
| 493 |
+
return Vectorized<c10::Half>(vreinterpretq_f16_u16(veorq_u16(
|
| 494 |
+
vreinterpretq_u16_f16(a), vreinterpretq_u16_f16(b))));
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::eq(
|
| 498 |
+
const Vectorized<c10::Half>& other) const {
|
| 499 |
+
return (*this == other) & Vectorized<c10::Half>(1);
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::ne(
|
| 503 |
+
const Vectorized<c10::Half>& other) const {
|
| 504 |
+
return (*this != other) & Vectorized<c10::Half>(1);
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::gt(
|
| 508 |
+
const Vectorized<c10::Half>& other) const {
|
| 509 |
+
return (*this > other) & Vectorized<c10::Half>(1);
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::ge(
|
| 513 |
+
const Vectorized<c10::Half>& other) const {
|
| 514 |
+
return (*this >= other) & Vectorized<c10::Half>(1);
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::lt(
|
| 518 |
+
const Vectorized<c10::Half>& other) const {
|
| 519 |
+
return (*this < other) & Vectorized<c10::Half>(1);
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::le(
|
| 523 |
+
const Vectorized<c10::Half>& other) const {
|
| 524 |
+
return (*this <= other) & Vectorized<c10::Half>(1);
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
// These are global functions, so the defaults in vec_base.h should
|
| 528 |
+
// work fine if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is not available.
|
| 529 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 530 |
+
template <>
|
| 531 |
+
inline void convert(const float16_t* src, int16_t* dst, int64_t n) {
|
| 532 |
+
int64_t i;
|
| 533 |
+
#ifndef __msvc_cl__
|
| 534 |
+
#pragma unroll
|
| 535 |
+
#endif
|
| 536 |
+
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
| 537 |
+
i += Vectorized<c10::Half>::size()) {
|
| 538 |
+
vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i)));
|
| 539 |
+
}
|
| 540 |
+
#ifndef __msvc_cl__
|
| 541 |
+
#pragma unroll
|
| 542 |
+
#endif
|
| 543 |
+
for (; i < n; i++) {
|
| 544 |
+
dst[i] = static_cast<int16_t>(src[i]);
|
| 545 |
+
}
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
template <>
|
| 549 |
+
inline void convert(const int16_t* src, float16_t* dst, int64_t n) {
|
| 550 |
+
int64_t i;
|
| 551 |
+
#ifndef __msvc_cl__
|
| 552 |
+
#pragma unroll
|
| 553 |
+
#endif
|
| 554 |
+
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
| 555 |
+
i += Vectorized<c10::Half>::size()) {
|
| 556 |
+
vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i)));
|
| 557 |
+
}
|
| 558 |
+
#ifndef __msvc_cl__
|
| 559 |
+
#pragma unroll
|
| 560 |
+
#endif
|
| 561 |
+
for (; i < n; i++) {
|
| 562 |
+
dst[i] = static_cast<float16_t>(src[i]);
|
| 563 |
+
}
|
| 564 |
+
}
|
| 565 |
+
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 566 |
+
|
| 567 |
+
template <>
|
| 568 |
+
Vectorized<c10::Half> inline fmadd(
|
| 569 |
+
const Vectorized<c10::Half>& a,
|
| 570 |
+
const Vectorized<c10::Half>& b,
|
| 571 |
+
const Vectorized<c10::Half>& c) {
|
| 572 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 573 |
+
return Vectorized<c10::Half>(vfmaq_f16(c, a, b));
|
| 574 |
+
#else
|
| 575 |
+
const auto [a_float_low, a_float_high] = convert_half_float(a);
|
| 576 |
+
const auto [b_float_low, b_float_high] = convert_half_float(b);
|
| 577 |
+
const auto [c_float_low, c_float_high] = convert_half_float(c);
|
| 578 |
+
return convert_float_half(
|
| 579 |
+
fmadd(a_float_low, b_float_low, c_float_low),
|
| 580 |
+
fmadd(a_float_high, b_float_high, c_float_high));
|
| 581 |
+
#endif
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
template <>
|
| 585 |
+
Vectorized<c10::Half> inline fmsub(
|
| 586 |
+
const Vectorized<c10::Half>& a,
|
| 587 |
+
const Vectorized<c10::Half>& b,
|
| 588 |
+
const Vectorized<c10::Half>& c) {
|
| 589 |
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
|
| 590 |
+
return Vectorized<c10::Half>(vfmsq_f16(c, a, b));
|
| 591 |
+
#else
|
| 592 |
+
const auto [a_float_low, a_float_high] = convert_half_float(a);
|
| 593 |
+
const auto [b_float_low, b_float_high] = convert_half_float(b);
|
| 594 |
+
const auto [c_float_low, c_float_high] = convert_half_float(c);
|
| 595 |
+
return convert_float_half(
|
| 596 |
+
fmsub(a_float_low, b_float_low, c_float_low),
|
| 597 |
+
fmsub(a_float_high, b_float_high, c_float_high));
|
| 598 |
+
#endif
|
| 599 |
+
}
|
| 600 |
+
#endif // !defined(C10_MOBILE) && defined(__aarch64__)
|
| 601 |
+
|
| 602 |
+
} // namespace CPU_CAPABILITY
|
| 603 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec128/vec128_reduced_precision_common_neon.h
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// Shared code for bfloat16 and float16.
|
| 3 |
+
|
| 4 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 5 |
+
// See Note [Do not compile initializers with AVX]
|
| 6 |
+
|
| 7 |
+
namespace at::vec {
|
| 8 |
+
inline namespace CPU_CAPABILITY {
|
| 9 |
+
|
| 10 |
+
// Shared implementation between Vectorized<c10::Half> and
|
| 11 |
+
// Vectorized<c10::BFloat16>. Uses CRTP to allow derived class
|
| 12 |
+
// customization.
|
| 13 |
+
template <typename VecT, typename ValueT, template <int, bool> typename BlendRegs, typename Derived>
|
| 14 |
+
struct Vectorized16 {
|
| 15 |
+
protected:
|
| 16 |
+
VecT values;
|
| 17 |
+
public:
|
| 18 |
+
using value_type = ValueT;
|
| 19 |
+
using size_type = int;
|
| 20 |
+
static constexpr size_type size() {
|
| 21 |
+
static_assert(sizeof(VecT) == 8 * sizeof(value_type));
|
| 22 |
+
return 8;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
protected:
|
| 26 |
+
Derived map2(
|
| 27 |
+
const Derived& second,
|
| 28 |
+
value_type (*const f)(value_type, value_type)) const {
|
| 29 |
+
__at_align__ value_type tmp_first[size()];
|
| 30 |
+
__at_align__ value_type tmp_second[size()];
|
| 31 |
+
static_cast<const Derived*>(this)->store(tmp_first); // store this to tmp_first
|
| 32 |
+
second.store(tmp_second);
|
| 33 |
+
for (const auto i : c10::irange(size())) {
|
| 34 |
+
tmp_first[i] = f(tmp_first[i], tmp_second[i]);
|
| 35 |
+
}
|
| 36 |
+
return Derived::loadu(tmp_first);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
public:
|
| 40 |
+
Vectorized16() = default;
|
| 41 |
+
Vectorized16(VecT v) : values(v) {}
|
| 42 |
+
|
| 43 |
+
operator VecT() const {
|
| 44 |
+
return values;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
template <int64_t mask>
|
| 48 |
+
static Derived blend(const Derived& a, const Derived& b) {
|
| 49 |
+
Derived vec;
|
| 50 |
+
vec.values = BlendRegs<0, (mask & 0x01) != 0>::impl(
|
| 51 |
+
a.values, b.values, vec.values);
|
| 52 |
+
vec.values = BlendRegs<1, (mask & 0x02) != 0>::impl(
|
| 53 |
+
a.values, b.values, vec.values);
|
| 54 |
+
vec.values = BlendRegs<2, (mask & 0x04) != 0>::impl(
|
| 55 |
+
a.values, b.values, vec.values);
|
| 56 |
+
vec.values = BlendRegs<3, (mask & 0x08) != 0>::impl(
|
| 57 |
+
a.values, b.values, vec.values);
|
| 58 |
+
|
| 59 |
+
vec.values = BlendRegs<4, (mask & 0x10) != 0>::impl(
|
| 60 |
+
a.values, b.values, vec.values);
|
| 61 |
+
vec.values = BlendRegs<5, (mask & 0x20) != 0>::impl(
|
| 62 |
+
a.values, b.values, vec.values);
|
| 63 |
+
vec.values = BlendRegs<6, (mask & 0x40) != 0>::impl(
|
| 64 |
+
a.values, b.values, vec.values);
|
| 65 |
+
vec.values = BlendRegs<7, (mask & 0x80) != 0>::impl(
|
| 66 |
+
a.values, b.values, vec.values);
|
| 67 |
+
|
| 68 |
+
return vec;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template <typename step_t>
|
| 72 |
+
static Derived arange(
|
| 73 |
+
value_type base = 0,
|
| 74 |
+
step_t step = static_cast<step_t>(1)) {
|
| 75 |
+
const Derived base_vec(base);
|
| 76 |
+
const Derived step_vec(step);
|
| 77 |
+
const Derived step_sizes(
|
| 78 |
+
value_type(0),
|
| 79 |
+
value_type(1),
|
| 80 |
+
value_type(2),
|
| 81 |
+
value_type(3),
|
| 82 |
+
value_type(4),
|
| 83 |
+
value_type(5),
|
| 84 |
+
value_type(6),
|
| 85 |
+
value_type(7));
|
| 86 |
+
return fmadd(step_sizes, step_vec, base_vec);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
// Very slow implementation of indexing.
|
| 90 |
+
// Only required because vec256_qint refers to this.
|
| 91 |
+
// Once we specialize that implementation for ARM
|
| 92 |
+
// this should be removed. TODO (kimishpatel)
|
| 93 |
+
value_type operator[](int idx) const {
|
| 94 |
+
__at_align__ value_type tmp[size()];
|
| 95 |
+
static_cast<const Derived*>(this)->store(tmp);
|
| 96 |
+
return tmp[idx];
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
int zero_mask() const {
|
| 100 |
+
__at_align__ value_type tmp[size()];
|
| 101 |
+
static_cast<const Derived*>(this)->store(tmp);
|
| 102 |
+
int mask = 0;
|
| 103 |
+
for (int i = 0; i < size(); ++i) {
|
| 104 |
+
if (tmp[i] == 0) {
|
| 105 |
+
mask |= (1 << i);
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
return mask;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
Derived map(value_type (*const f)(value_type)) const {
|
| 112 |
+
__at_align__ value_type tmp[size()];
|
| 113 |
+
static_cast<const Derived*>(this)->store(tmp);
|
| 114 |
+
for (const auto i : c10::irange(size())) {
|
| 115 |
+
tmp[i] = f(tmp[i]);
|
| 116 |
+
}
|
| 117 |
+
return Derived::loadu(tmp);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
Derived angle() const {
|
| 121 |
+
auto zero = Derived(0);
|
| 122 |
+
auto pi = Derived(c10::pi<value_type>);
|
| 123 |
+
auto tmp = Derived::blendv(zero, pi, *static_cast<const Derived*>(this) < zero);
|
| 124 |
+
return Derived::blendv(tmp, *static_cast<const Derived*>(this), static_cast<const Derived*>(this)->isnan());
|
| 125 |
+
}
|
| 126 |
+
Derived real() const {
|
| 127 |
+
return *this;
|
| 128 |
+
}
|
| 129 |
+
Derived imag() const {
|
| 130 |
+
return Derived(0);
|
| 131 |
+
}
|
| 132 |
+
Derived conj() const {
|
| 133 |
+
return *this;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
// Sleef does not support FP16/BF16, so many math functions are applied by
|
| 137 |
+
// converting to FP32, applying the math function, and then converting back to
|
| 138 |
+
// FP16/BF16.
|
| 139 |
+
Derived acos() const {
|
| 140 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::acos);
|
| 141 |
+
}
|
| 142 |
+
Derived acosh() const {
|
| 143 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::acosh);
|
| 144 |
+
}
|
| 145 |
+
Derived asin() const {
|
| 146 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::asin);
|
| 147 |
+
}
|
| 148 |
+
Derived atan() const {
|
| 149 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::atan);
|
| 150 |
+
}
|
| 151 |
+
Derived atanh() const {
|
| 152 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::atanh);
|
| 153 |
+
}
|
| 154 |
+
Derived atan2(const Derived& exp) const {
|
| 155 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(exp, &Vectorized<float>::atan2);
|
| 156 |
+
}
|
| 157 |
+
Derived copysign(const Derived& sign) const {
|
| 158 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(sign, &Vectorized<float>::copysign);
|
| 159 |
+
}
|
| 160 |
+
Derived erf() const {
|
| 161 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::erf);
|
| 162 |
+
}
|
| 163 |
+
Derived erfc() const {
|
| 164 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::erfc);
|
| 165 |
+
}
|
| 166 |
+
Derived erfinv() const {
|
| 167 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::erfinv);
|
| 168 |
+
}
|
| 169 |
+
Derived exp() const {
|
| 170 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::exp);
|
| 171 |
+
}
|
| 172 |
+
Derived exp2() const {
|
| 173 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::exp2);
|
| 174 |
+
}
|
| 175 |
+
Derived expm1() const {
|
| 176 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::expm1);
|
| 177 |
+
}
|
| 178 |
+
Derived exp_u20() const {
|
| 179 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::exp_u20);
|
| 180 |
+
}
|
| 181 |
+
Derived fmod(const Derived& q) const {
|
| 182 |
+
// This function is questionable with a conversion, so we use map2
|
| 183 |
+
return map2(q, std::fmod);
|
| 184 |
+
}
|
| 185 |
+
Derived hypot(const Derived& b) const {
|
| 186 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(b, &Vectorized<float>::hypot);
|
| 187 |
+
}
|
| 188 |
+
Derived i0() const {
|
| 189 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::i0);
|
| 190 |
+
}
|
| 191 |
+
Derived i0e() const {
|
| 192 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::i0e);
|
| 193 |
+
}
|
| 194 |
+
Derived digamma() const {
|
| 195 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::digamma);
|
| 196 |
+
}
|
| 197 |
+
Derived igamma(const Derived& x) const {
|
| 198 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(x, &Vectorized<float>::igamma);
|
| 199 |
+
}
|
| 200 |
+
Derived igammac(const Derived& x) const {
|
| 201 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(x, &Vectorized<float>::igammac);
|
| 202 |
+
}
|
| 203 |
+
Derived log() const {
|
| 204 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::log);
|
| 205 |
+
}
|
| 206 |
+
Derived log10() const {
|
| 207 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::log10);
|
| 208 |
+
}
|
| 209 |
+
Derived log1p() const {
|
| 210 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::log1p);
|
| 211 |
+
}
|
| 212 |
+
Derived log2() const {
|
| 213 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::log2);
|
| 214 |
+
}
|
| 215 |
+
Derived nextafter(const Derived& b) const {
|
| 216 |
+
// This function does not make sense with conversion, so we use map2
|
| 217 |
+
return map2(b, std::nextafter);
|
| 218 |
+
}
|
| 219 |
+
Derived sin() const {
|
| 220 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::sin);
|
| 221 |
+
}
|
| 222 |
+
Derived sinh() const {
|
| 223 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::sinh);
|
| 224 |
+
}
|
| 225 |
+
Derived cos() const {
|
| 226 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::cos);
|
| 227 |
+
}
|
| 228 |
+
Derived cosh() const {
|
| 229 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::cosh);
|
| 230 |
+
}
|
| 231 |
+
Derived ceil() const {
|
| 232 |
+
// This function is questionable with a conversion, so we use map
|
| 233 |
+
return map(at::native::ceil_impl);
|
| 234 |
+
}
|
| 235 |
+
Derived floor() const {
|
| 236 |
+
// This function is questionable with a conversion, so we use map
|
| 237 |
+
return map(at::native::floor_impl);
|
| 238 |
+
}
|
| 239 |
+
Derived round() const {
|
| 240 |
+
// This function is questionable with a conversion, so we use map
|
| 241 |
+
return map(at::native::round_impl);
|
| 242 |
+
}
|
| 243 |
+
Derived tan() const {
|
| 244 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::tan);
|
| 245 |
+
}
|
| 246 |
+
Derived tanh() const {
|
| 247 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::tanh);
|
| 248 |
+
}
|
| 249 |
+
Derived lgamma() const {
|
| 250 |
+
return static_cast<const Derived*>(this)->map_with_vec_float_method(&Vectorized<float>::lgamma);
|
| 251 |
+
}
|
| 252 |
+
Derived rsqrt() const {
|
| 253 |
+
return static_cast<const Derived*>(this)->sqrt().reciprocal();
|
| 254 |
+
}
|
| 255 |
+
Derived pow(const Derived& exp) const {
|
| 256 |
+
return static_cast<const Derived*>(this)->map2_with_vec_float_method(exp, &Vectorized<float>::pow);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
};
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
} // namespace CPU_CAPABILITY
|
| 263 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h
ADDED
|
@@ -0,0 +1,1670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
#ifndef SLEEF_CONST
|
| 23 |
+
#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
|
| 24 |
+
#define SLEEF_CONST const
|
| 25 |
+
#else
|
| 26 |
+
#define SLEEF_CONST
|
| 27 |
+
#endif
|
| 28 |
+
#define SLEEF_CONST_OLD SLEEF_CONST
|
| 29 |
+
#else
|
| 30 |
+
#define SLEEF_CONST_OLD
|
| 31 |
+
#endif
|
| 32 |
+
|
| 33 |
+
// bfloat16 conversion
|
| 34 |
+
static inline void cvtbf16_fp32(const __m256i& a, __m512& o) {
|
| 35 |
+
o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 39 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 40 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 41 |
+
cvtbf16_fp32(lo, o1);
|
| 42 |
+
cvtbf16_fp32(hi, o2);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
static inline __m256i cvtfp32_bf16(const __m512& src) {
|
| 46 |
+
__m512i value = _mm512_castps_si512(src);
|
| 47 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 48 |
+
auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
|
| 49 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 50 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 51 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 52 |
+
auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
|
| 53 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 54 |
+
t_value = _mm512_add_epi32(t_value, vec_bias);
|
| 55 |
+
// input += rounding_bias;
|
| 56 |
+
t_value = _mm512_add_epi32(t_value, value);
|
| 57 |
+
// input = input >> 16;
|
| 58 |
+
t_value = _mm512_srli_epi32(t_value, 16);
|
| 59 |
+
// Check NaN before converting back to bf16
|
| 60 |
+
t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
|
| 61 |
+
return _mm512_cvtusepi32_epi16(t_value);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) {
|
| 65 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 66 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 67 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 68 |
+
auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
|
| 69 |
+
auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q);
|
| 70 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 71 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 72 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 73 |
+
auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones);
|
| 74 |
+
auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones);
|
| 75 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 76 |
+
t_lo = _mm512_add_epi32(t_lo, vec_bias);
|
| 77 |
+
t_hi = _mm512_add_epi32(t_hi, vec_bias);
|
| 78 |
+
// input += rounding_bias;
|
| 79 |
+
t_lo = _mm512_add_epi32(t_lo, lo);
|
| 80 |
+
t_hi = _mm512_add_epi32(t_hi, hi);
|
| 81 |
+
// input = input >> 16;
|
| 82 |
+
t_lo = _mm512_srli_epi32(t_lo, 16);
|
| 83 |
+
t_hi = _mm512_srli_epi32(t_hi, 16);
|
| 84 |
+
// Check NaN before converting back to bf16
|
| 85 |
+
t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo);
|
| 86 |
+
t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi);
|
| 87 |
+
|
| 88 |
+
t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
|
| 89 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 90 |
+
return _mm512_permutexvar_epi64(idx, t_lo);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
static inline __m512i merge_compare_result(const __m512& a, const __m512& b) {
|
| 94 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 95 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 96 |
+
lo = _mm512_srli_epi32(lo, 16);
|
| 97 |
+
hi = _mm512_srli_epi32(hi, 16);
|
| 98 |
+
auto out = _mm512_packus_epi32(lo, hi);
|
| 99 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 100 |
+
return _mm512_permutexvar_epi64(idx, out);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// float16 conversion
|
| 104 |
+
static inline void cvtfp16_fp32(const __m256i& a, __m512& o) {
|
| 105 |
+
o = _mm512_cvtph_ps(a);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 109 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 110 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 111 |
+
cvtfp16_fp32(lo, o1);
|
| 112 |
+
cvtfp16_fp32(hi, o2);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
static inline __m256i cvtfp32_fp16(const __m512& src) {
|
| 116 |
+
return _mm512_cvtps_ph(
|
| 117 |
+
src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) {
|
| 121 |
+
__m256i lo = _mm512_cvtps_ph(
|
| 122 |
+
a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 123 |
+
__m256i hi = _mm512_cvtps_ph(
|
| 124 |
+
b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 125 |
+
__m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo));
|
| 126 |
+
__m256 t_hi = _mm256_castsi256_ps(hi);
|
| 127 |
+
return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1));
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// dtype conversion between float16/bfloat16 and float32
|
| 131 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 132 |
+
inline void cvt_to_fp32(const __m256i& a, __m512& o);
|
| 133 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m512& o) {
|
| 134 |
+
cvtbf16_fp32(a, o);
|
| 135 |
+
}
|
| 136 |
+
template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m512& o) {
|
| 137 |
+
cvtfp16_fp32(a, o);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 141 |
+
inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2);
|
| 142 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 143 |
+
cvtbf16_fp32(a, o1, o2);
|
| 144 |
+
}
|
| 145 |
+
template <> inline void cvt_to_fp32<Half>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 146 |
+
cvtfp16_fp32(a, o1, o2);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
template <typename T, bool is_compare_op = false,
|
| 150 |
+
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 151 |
+
inline __m512i cvt_from_fp32(const __m512& a, const __m512& b);
|
| 152 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, false>(const __m512& a, const __m512& b) {
|
| 153 |
+
return cvtfp32_bf16(a, b);
|
| 154 |
+
}
|
| 155 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, true>(const __m512& a, const __m512& b) {
|
| 156 |
+
return merge_compare_result(a, b);
|
| 157 |
+
}
|
| 158 |
+
template <> inline __m512i cvt_from_fp32<Half, false>(const __m512& a, const __m512& b) {
|
| 159 |
+
return cvtfp32_fp16(a, b);
|
| 160 |
+
}
|
| 161 |
+
template <> inline __m512i cvt_from_fp32<Half, true>(const __m512& a, const __m512& b) {
|
| 162 |
+
return cvtfp32_fp16(a, b);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template <typename T>
|
| 166 |
+
class Vectorized16 {
|
| 167 |
+
static_assert(
|
| 168 |
+
is_reduced_floating_point_v<T>,
|
| 169 |
+
"Support only float16 and bfloat16.");
|
| 170 |
+
private:
|
| 171 |
+
__m512i values;
|
| 172 |
+
public:
|
| 173 |
+
using value_type = uint16_t;
|
| 174 |
+
using size_type = int;
|
| 175 |
+
static constexpr size_type size() {
|
| 176 |
+
return 32;
|
| 177 |
+
}
|
| 178 |
+
Vectorized16() {}
|
| 179 |
+
Vectorized16(__m512i v) : values(v) {}
|
| 180 |
+
Vectorized16(T val) {
|
| 181 |
+
value_type uw = val.x;
|
| 182 |
+
values = _mm512_set1_epi16(uw);
|
| 183 |
+
}
|
| 184 |
+
Vectorized16(T val1, T val2, T val3, T val4,
|
| 185 |
+
T val5, T val6, T val7, T val8,
|
| 186 |
+
T val9, T val10, T val11, T val12,
|
| 187 |
+
T val13, T val14, T val15, T val16,
|
| 188 |
+
T val17, T val18, T val19, T val20,
|
| 189 |
+
T val21, T val22, T val23, T val24,
|
| 190 |
+
T val25, T val26, T val27, T val28,
|
| 191 |
+
T val29, T val30, T val31, T val32) {
|
| 192 |
+
values = _mm512_set_epi16(
|
| 193 |
+
val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x,
|
| 194 |
+
val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x,
|
| 195 |
+
val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x,
|
| 196 |
+
val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x);
|
| 197 |
+
}
|
| 198 |
+
operator __m512i() const {
|
| 199 |
+
return values;
|
| 200 |
+
}
|
| 201 |
+
T& operator[](int idx) = delete;
|
| 202 |
+
const T& operator[](int idx) const = delete;
|
| 203 |
+
int zero_mask() const {
|
| 204 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 205 |
+
return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0));
|
| 206 |
+
}
|
| 207 |
+
static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
|
| 208 |
+
if (count == size())
|
| 209 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 210 |
+
|
| 211 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 212 |
+
return _mm512_maskz_loadu_epi16(mask, ptr);
|
| 213 |
+
}
|
| 214 |
+
void store(void* ptr, int count = size()) const {
|
| 215 |
+
if (count == size()) {
|
| 216 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 217 |
+
} else if (count > 0) {
|
| 218 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 219 |
+
_mm512_mask_storeu_epi16(ptr, mask, values);
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
template <int64_t mask>
|
| 223 |
+
static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 224 |
+
return _mm512_mask_blend_epi16(mask, a.values, b.values);
|
| 225 |
+
}
|
| 226 |
+
static Vectorized<T> blendv(const Vectorized<T>& a,
|
| 227 |
+
const Vectorized<T>& b, const Vectorized<T>& mask) {
|
| 228 |
+
auto all_ones = _mm512_set1_epi16(0xFFFF);
|
| 229 |
+
auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ);
|
| 230 |
+
return _mm512_mask_blend_epi16(mask_, a.values, b.values);
|
| 231 |
+
}
|
| 232 |
+
template<typename step_t>
|
| 233 |
+
static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 234 |
+
return Vectorized<T>(
|
| 235 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 236 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 237 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 238 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 239 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 240 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 241 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 242 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
|
| 243 |
+
}
|
| 244 |
+
static Vectorized<T> set(const Vectorized<T>& a,
|
| 245 |
+
const Vectorized<T>& b, int64_t count = size()) {
|
| 246 |
+
switch (count) {
|
| 247 |
+
case 0:
|
| 248 |
+
return a;
|
| 249 |
+
case 1:
|
| 250 |
+
return blend<1>(a, b);
|
| 251 |
+
case 2:
|
| 252 |
+
return blend<3>(a, b);
|
| 253 |
+
case 3:
|
| 254 |
+
return blend<7>(a, b);
|
| 255 |
+
case 4:
|
| 256 |
+
return blend<15>(a, b);
|
| 257 |
+
case 5:
|
| 258 |
+
return blend<31>(a, b);
|
| 259 |
+
case 6:
|
| 260 |
+
return blend<63>(a, b);
|
| 261 |
+
case 7:
|
| 262 |
+
return blend<127>(a, b);
|
| 263 |
+
case 8:
|
| 264 |
+
return blend<255>(a, b);
|
| 265 |
+
case 9:
|
| 266 |
+
return blend<511>(a, b);
|
| 267 |
+
case 10:
|
| 268 |
+
return blend<1023>(a, b);
|
| 269 |
+
case 11:
|
| 270 |
+
return blend<2047>(a, b);
|
| 271 |
+
case 12:
|
| 272 |
+
return blend<4095>(a, b);
|
| 273 |
+
case 13:
|
| 274 |
+
return blend<8191>(a, b);
|
| 275 |
+
case 14:
|
| 276 |
+
return blend<16383>(a, b);
|
| 277 |
+
case 15:
|
| 278 |
+
return blend<32767>(a, b);
|
| 279 |
+
case 16:
|
| 280 |
+
return blend<65535>(a, b);
|
| 281 |
+
case 17:
|
| 282 |
+
return blend<131071>(a, b);
|
| 283 |
+
case 18:
|
| 284 |
+
return blend<262143>(a, b);
|
| 285 |
+
case 19:
|
| 286 |
+
return blend<524287>(a, b);
|
| 287 |
+
case 20:
|
| 288 |
+
return blend<1048575>(a, b);
|
| 289 |
+
case 21:
|
| 290 |
+
return blend<2097151>(a, b);
|
| 291 |
+
case 22:
|
| 292 |
+
return blend<4194303>(a, b);
|
| 293 |
+
case 23:
|
| 294 |
+
return blend<8388607>(a, b);
|
| 295 |
+
case 24:
|
| 296 |
+
return blend<16777215>(a, b);
|
| 297 |
+
case 25:
|
| 298 |
+
return blend<33554431>(a, b);
|
| 299 |
+
case 26:
|
| 300 |
+
return blend<67108863>(a, b);
|
| 301 |
+
case 27:
|
| 302 |
+
return blend<134217727>(a, b);
|
| 303 |
+
case 28:
|
| 304 |
+
return blend<268435455>(a, b);
|
| 305 |
+
case 29:
|
| 306 |
+
return blend<536870911>(a, b);
|
| 307 |
+
case 30:
|
| 308 |
+
return blend<1073741823>(a, b);
|
| 309 |
+
case 31:
|
| 310 |
+
return blend<2147483647>(a, b);
|
| 311 |
+
}
|
| 312 |
+
return b;
|
| 313 |
+
}
|
| 314 |
+
#pragma clang diagnostic push
|
| 315 |
+
#pragma clang diagnostic ignored "-Wignored-qualifiers"
|
| 316 |
+
|
| 317 |
+
Vectorized<T> map(SLEEF_CONST __m512 (*SLEEF_CONST_OLD vop)(__m512)) const {
|
| 318 |
+
__m512 lo, hi;
|
| 319 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 320 |
+
const auto o1 = vop(lo);
|
| 321 |
+
const auto o2 = vop(hi);
|
| 322 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 323 |
+
}
|
| 324 |
+
Vectorized<T> isnan() const {
|
| 325 |
+
__m512 lo, hi;
|
| 326 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 327 |
+
__mmask16 lo_mask, hi_mask;
|
| 328 |
+
__m512 zero = _mm512_set1_ps(0.0);
|
| 329 |
+
__m512i zeroi = _mm512_castps_si512(zero);
|
| 330 |
+
lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q);
|
| 331 |
+
lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF));
|
| 332 |
+
hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q);
|
| 333 |
+
hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF));
|
| 334 |
+
return merge_compare_result(lo, hi);
|
| 335 |
+
}
|
| 336 |
+
#pragma clang diagnostic pop
|
| 337 |
+
Vectorized<T> abs() const {
|
| 338 |
+
return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values);
|
| 339 |
+
}
|
| 340 |
+
Vectorized<T> angle() const {
|
| 341 |
+
__m512 lo, hi;
|
| 342 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 343 |
+
auto angle_lambda = [](__m512 values) {
|
| 344 |
+
const auto zero_vec = _mm512_set1_ps(0.f);
|
| 345 |
+
const auto nan_vec = _mm512_set1_ps(NAN);
|
| 346 |
+
const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
|
| 347 |
+
const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
|
| 348 |
+
not_nan_mask, 0xFFFFFFFF);
|
| 349 |
+
const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec),
|
| 350 |
+
zero_vec, _CMP_EQ_OQ);
|
| 351 |
+
const auto pi = _mm512_set1_ps(c10::pi<float>);
|
| 352 |
+
|
| 353 |
+
const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
|
| 354 |
+
auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
|
| 355 |
+
angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
|
| 356 |
+
return angle;
|
| 357 |
+
};
|
| 358 |
+
auto o1 = angle_lambda(lo);
|
| 359 |
+
auto o2 = angle_lambda(hi);
|
| 360 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 361 |
+
}
|
| 362 |
+
Vectorized<T> real() const {
|
| 363 |
+
return *this;
|
| 364 |
+
}
|
| 365 |
+
Vectorized<T> imag() const {
|
| 366 |
+
return _mm512_set1_epi16(0);
|
| 367 |
+
}
|
| 368 |
+
Vectorized<T> conj() const {
|
| 369 |
+
return *this;
|
| 370 |
+
}
|
| 371 |
+
Vectorized<T> acos() const {
|
| 372 |
+
return map(Sleef_acosf16_u10);
|
| 373 |
+
}
|
| 374 |
+
Vectorized<T> acosh() const {
|
| 375 |
+
return map(Sleef_acoshf16_u10);
|
| 376 |
+
}
|
| 377 |
+
Vectorized<T> asin() const {
|
| 378 |
+
return map(Sleef_asinf16_u10);
|
| 379 |
+
}
|
| 380 |
+
Vectorized<T> atan() const {
|
| 381 |
+
return map(Sleef_atanf16_u10);
|
| 382 |
+
}
|
| 383 |
+
Vectorized<T> atanh() const {
|
| 384 |
+
return map(Sleef_atanhf16_u10);
|
| 385 |
+
}
|
| 386 |
+
Vectorized<T> atan2(const Vectorized<T> &b) const {
|
| 387 |
+
__m512 lo, hi;
|
| 388 |
+
__m512 b1, b2;
|
| 389 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 390 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 391 |
+
auto o1 = Sleef_atan2f16_u10(lo, b1);
|
| 392 |
+
auto o2 = Sleef_atan2f16_u10(hi, b2);
|
| 393 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 394 |
+
}
|
| 395 |
+
Vectorized<T> copysign(const Vectorized<T> &sign) const {
|
| 396 |
+
// copy sign bit (0x8000) from sign and remaining bits from values
|
| 397 |
+
__m512i mask_value = _mm512_set1_epi32(~0x80008000);
|
| 398 |
+
__m512i mask_signbit = _mm512_set1_epi32(0x80008000);
|
| 399 |
+
return Vectorized<T>(
|
| 400 |
+
_mm512_or_si512(
|
| 401 |
+
_mm512_and_si512(values, mask_value),
|
| 402 |
+
_mm512_and_si512(sign, mask_signbit)));
|
| 403 |
+
}
|
| 404 |
+
Vectorized<T> erf() const {
|
| 405 |
+
return map(Sleef_erff16_u10);
|
| 406 |
+
}
|
| 407 |
+
Vectorized<T> erfc() const {
|
| 408 |
+
return map(Sleef_erfcf16_u15);
|
| 409 |
+
}
|
| 410 |
+
Vectorized<T> erfinv() const {
|
| 411 |
+
__m512 lo, hi;
|
| 412 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 413 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 414 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 415 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 416 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 417 |
+
tmp1[i] = calc_erfinv(tmp1[i]);
|
| 418 |
+
tmp2[i] = calc_erfinv(tmp2[i]);
|
| 419 |
+
}
|
| 420 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 421 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 422 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 423 |
+
}
|
| 424 |
+
Vectorized<T> exp() const {
|
| 425 |
+
return map(Sleef_expf16_u10);
|
| 426 |
+
}
|
| 427 |
+
Vectorized<T> exp2() const {
|
| 428 |
+
return map(Sleef_exp2f16_u10);
|
| 429 |
+
}
|
| 430 |
+
Vectorized<T> expm1() const {
|
| 431 |
+
return map(Sleef_expm1f16_u10);
|
| 432 |
+
}
|
| 433 |
+
Vectorized<T> exp_u20() const {
|
| 434 |
+
return exp();
|
| 435 |
+
}
|
| 436 |
+
Vectorized<T> fmod(const Vectorized<T> & q) const {
|
| 437 |
+
__m512 x_lo, x_hi;
|
| 438 |
+
cvt_to_fp32<T>(values, x_lo, x_hi);
|
| 439 |
+
__m512 q_lo, q_hi;
|
| 440 |
+
cvtbf16_fp32(q.values, q_lo, q_hi);
|
| 441 |
+
auto o1 = Sleef_fmodf16(x_lo, q_lo);
|
| 442 |
+
auto o2 = Sleef_fmodf16(x_hi, q_hi);
|
| 443 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 444 |
+
}
|
| 445 |
+
Vectorized<T> hypot(const Vectorized<T> &b) const {
|
| 446 |
+
__m512 lo, hi;
|
| 447 |
+
__m512 b1, b2;
|
| 448 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 449 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 450 |
+
auto o1 = Sleef_hypotf16_u05(lo, b1);
|
| 451 |
+
auto o2 = Sleef_hypotf16_u05(hi, b2);
|
| 452 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 453 |
+
}
|
| 454 |
+
Vectorized<T> i0() const {
|
| 455 |
+
__m512 lo, hi;
|
| 456 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 457 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 458 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 459 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 460 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 461 |
+
tmp1[i] = calc_i0(tmp1[i]);
|
| 462 |
+
tmp2[i] = calc_i0(tmp2[i]);
|
| 463 |
+
}
|
| 464 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 465 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 466 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 467 |
+
}
|
| 468 |
+
Vectorized<T> i0e() const {
|
| 469 |
+
__m512 lo, hi;
|
| 470 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 471 |
+
constexpr auto sz = size();
|
| 472 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 473 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 474 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 475 |
+
|
| 476 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 477 |
+
tmp1[i] = calc_i0e(tmp1[i]);
|
| 478 |
+
tmp2[i] = calc_i0e(tmp2[i]);
|
| 479 |
+
}
|
| 480 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 481 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 482 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 483 |
+
}
|
| 484 |
+
Vectorized<T> digamma() const {
|
| 485 |
+
__m512 lo, hi;
|
| 486 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 487 |
+
constexpr auto sz = size();
|
| 488 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 489 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 490 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 491 |
+
|
| 492 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 493 |
+
tmp1[i] = calc_digamma(tmp1[i]);
|
| 494 |
+
tmp2[i] = calc_digamma(tmp2[i]);
|
| 495 |
+
}
|
| 496 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 497 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 498 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 499 |
+
}
|
| 500 |
+
Vectorized<T> igamma(const Vectorized<T> &x) const {
|
| 501 |
+
__m512 lo, hi;
|
| 502 |
+
__m512 xlo, xhi;
|
| 503 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 504 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 505 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 506 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 507 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 508 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 509 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 510 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 511 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 512 |
+
tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
|
| 513 |
+
tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
|
| 514 |
+
}
|
| 515 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 516 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 517 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
Vectorized<T> igammac(const Vectorized<T> &x) const {
|
| 521 |
+
__m512 lo, hi;
|
| 522 |
+
__m512 xlo, xhi;
|
| 523 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 524 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 525 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 526 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 527 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 528 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 529 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 530 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 531 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 532 |
+
tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
|
| 533 |
+
tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
|
| 534 |
+
}
|
| 535 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 536 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 537 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 538 |
+
}
|
| 539 |
+
Vectorized<T> log() const {
|
| 540 |
+
return map(Sleef_logf16_u10);
|
| 541 |
+
}
|
| 542 |
+
Vectorized<T> log2() const {
|
| 543 |
+
return map(Sleef_log2f16_u10);
|
| 544 |
+
}
|
| 545 |
+
Vectorized<T> log10() const {
|
| 546 |
+
return map(Sleef_log10f16_u10);
|
| 547 |
+
}
|
| 548 |
+
Vectorized<T> log1p() const {
|
| 549 |
+
return map(Sleef_log1pf16_u10);
|
| 550 |
+
}
|
| 551 |
+
Vectorized<T> sin() const {
|
| 552 |
+
return map(Sleef_sinf16_u10);
|
| 553 |
+
}
|
| 554 |
+
Vectorized<T> sinh() const {
|
| 555 |
+
return map(Sleef_sinhf16_u10);
|
| 556 |
+
}
|
| 557 |
+
Vectorized<T> cos() const {
|
| 558 |
+
return map(Sleef_cosf16_u10);
|
| 559 |
+
}
|
| 560 |
+
Vectorized<T> cosh() const {
|
| 561 |
+
return map(Sleef_coshf16_u10);
|
| 562 |
+
}
|
| 563 |
+
Vectorized<T> ceil() const {
|
| 564 |
+
__m512 lo, hi;
|
| 565 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 566 |
+
auto o1 = _mm512_ceil_ps(lo);
|
| 567 |
+
auto o2 = _mm512_ceil_ps(hi);
|
| 568 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 569 |
+
}
|
| 570 |
+
Vectorized<T> floor() const {
|
| 571 |
+
__m512 lo, hi;
|
| 572 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 573 |
+
auto o1 = _mm512_floor_ps(lo);
|
| 574 |
+
auto o2 = _mm512_floor_ps(hi);
|
| 575 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 576 |
+
}
|
| 577 |
+
Vectorized<T> neg() const {
|
| 578 |
+
return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000));
|
| 579 |
+
}
|
| 580 |
+
Vectorized<T> round() const {
|
| 581 |
+
__m512 lo, hi;
|
| 582 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 583 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 584 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 585 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 586 |
+
}
|
| 587 |
+
Vectorized<T> tan() const {
|
| 588 |
+
return map(Sleef_tanf16_u10);
|
| 589 |
+
}
|
| 590 |
+
Vectorized<T> tanh() const {
|
| 591 |
+
return map(Sleef_tanhf16_u10);
|
| 592 |
+
}
|
| 593 |
+
Vectorized<T> trunc() const {
|
| 594 |
+
__m512 lo, hi;
|
| 595 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 596 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 597 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 598 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 599 |
+
}
|
| 600 |
+
Vectorized<T> lgamma() const {
|
| 601 |
+
return map(Sleef_lgammaf16_u10);
|
| 602 |
+
}
|
| 603 |
+
Vectorized<T> sqrt() const {
|
| 604 |
+
__m512 lo, hi;
|
| 605 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 606 |
+
auto o1 = _mm512_sqrt_ps(lo);
|
| 607 |
+
auto o2 = _mm512_sqrt_ps(hi);
|
| 608 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 609 |
+
}
|
| 610 |
+
Vectorized<T> reciprocal() const {
|
| 611 |
+
__m512 lo, hi;
|
| 612 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 613 |
+
auto ones = _mm512_set1_ps(1);
|
| 614 |
+
auto o1 = _mm512_div_ps(ones, lo);
|
| 615 |
+
auto o2 = _mm512_div_ps(ones, hi);
|
| 616 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 617 |
+
}
|
| 618 |
+
Vectorized<T> rsqrt() const {
|
| 619 |
+
__m512 lo, hi;
|
| 620 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 621 |
+
auto ones = _mm512_set1_ps(1);
|
| 622 |
+
auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo));
|
| 623 |
+
auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi));
|
| 624 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 625 |
+
}
|
| 626 |
+
Vectorized<T> pow(const Vectorized<T> &b) const {
|
| 627 |
+
__m512 lo, hi;
|
| 628 |
+
__m512 b1, b2;
|
| 629 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 630 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 631 |
+
auto o1 = Sleef_powf16_u10(lo, b1);
|
| 632 |
+
auto o2 = Sleef_powf16_u10(hi, b2);
|
| 633 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 634 |
+
}
|
| 635 |
+
private:
|
| 636 |
+
template<typename Op>
|
| 637 |
+
Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
|
| 638 |
+
__m512 a_lo, a_hi;
|
| 639 |
+
__m512 b_lo, b_hi;
|
| 640 |
+
cvt_to_fp32<T>(values, a_lo, a_hi);
|
| 641 |
+
cvt_to_fp32<T>(b.values, b_lo, b_hi);
|
| 642 |
+
auto o1 = op(a_lo, b_lo);
|
| 643 |
+
auto o2 = op(a_hi, b_hi);
|
| 644 |
+
return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
public:
|
| 648 |
+
Vectorized<T> inline operator>(const Vectorized<T>& other) const {
|
| 649 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 650 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 651 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ);
|
| 652 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 653 |
+
});
|
| 654 |
+
}
|
| 655 |
+
Vectorized<T> inline operator<(const Vectorized<T>& other) const {
|
| 656 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 657 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 658 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ);
|
| 659 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 660 |
+
});
|
| 661 |
+
}
|
| 662 |
+
Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
|
| 663 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 664 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 665 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ);
|
| 666 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 667 |
+
});
|
| 668 |
+
}
|
| 669 |
+
Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
|
| 670 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 671 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 672 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ);
|
| 673 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 674 |
+
});
|
| 675 |
+
}
|
| 676 |
+
Vectorized<T> inline operator==(const Vectorized<T>& other) const {
|
| 677 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 678 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 679 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ);
|
| 680 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 681 |
+
});
|
| 682 |
+
}
|
| 683 |
+
Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
|
| 684 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 685 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 686 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ);
|
| 687 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 688 |
+
});
|
| 689 |
+
}
|
| 690 |
+
};
|
| 691 |
+
|
| 692 |
+
template<typename T, typename Op>
|
| 693 |
+
static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 694 |
+
__m512 a_lo, a_hi;
|
| 695 |
+
__m512 b_lo, b_hi;
|
| 696 |
+
cvt_to_fp32<T>(__m512i(a), a_lo, a_hi);
|
| 697 |
+
cvt_to_fp32<T>(__m512i(b), b_lo, b_hi);
|
| 698 |
+
auto o1 = op(a_lo, b_lo);
|
| 699 |
+
auto o2 = op(a_hi, b_hi);
|
| 700 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
template <>
|
| 704 |
+
class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
|
| 705 |
+
public:
|
| 706 |
+
using Vectorized16::Vectorized16;
|
| 707 |
+
|
| 708 |
+
using value_type = BFloat16;
|
| 709 |
+
|
| 710 |
+
Vectorized<BFloat16> frac() const;
|
| 711 |
+
|
| 712 |
+
Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
|
| 713 |
+
Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
|
| 714 |
+
Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
|
| 715 |
+
Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
|
| 716 |
+
Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
|
| 717 |
+
Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
|
| 718 |
+
};
|
| 719 |
+
|
| 720 |
+
Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 721 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 722 |
+
}
|
| 723 |
+
Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 724 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 725 |
+
}
|
| 726 |
+
Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 727 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 728 |
+
}
|
| 729 |
+
Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 730 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 731 |
+
}
|
| 732 |
+
Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 733 |
+
return _mm512_and_si512(a, b);
|
| 734 |
+
}
|
| 735 |
+
Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 736 |
+
return _mm512_or_si512(a, b);
|
| 737 |
+
}
|
| 738 |
+
Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 739 |
+
return _mm512_xor_si512(a, b);
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
|
| 743 |
+
return (*this == other) & Vectorized<BFloat16>(1.0f);
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
|
| 747 |
+
return (*this != other) & Vectorized<BFloat16>(1.0f);
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
|
| 751 |
+
return (*this > other) & Vectorized<BFloat16>(1.0f);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
|
| 755 |
+
return (*this >= other) & Vectorized<BFloat16>(1.0f);
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
|
| 759 |
+
return (*this < other) & Vectorized<BFloat16>(1.0f);
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
|
| 763 |
+
return (*this <= other) & Vectorized<BFloat16>(1.0f);
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
// frac. Implement this here so we can use subtraction
|
| 767 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
|
| 768 |
+
return *this - this->trunc();
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 772 |
+
// either input is a NaN.
|
| 773 |
+
template <>
|
| 774 |
+
Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 775 |
+
__m512 a_lo, a_hi;
|
| 776 |
+
__m512 b_lo, b_hi;
|
| 777 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 778 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 779 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 780 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 781 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 782 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 783 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 784 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 785 |
+
// Exploit the fact that all-ones is a NaN.
|
| 786 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 787 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 788 |
+
return cvtfp32_bf16(o1, o2);
|
| 789 |
+
}
|
| 790 |
+
|
| 791 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 792 |
+
// either input is a NaN.
|
| 793 |
+
template <>
|
| 794 |
+
Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 795 |
+
__m512 a_lo, a_hi;
|
| 796 |
+
__m512 b_lo, b_hi;
|
| 797 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 798 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 799 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 800 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 801 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 802 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 803 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 804 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 805 |
+
0xFFFFFFFF));
|
| 806 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 807 |
+
0xFFFFFFFF));
|
| 808 |
+
// Exploit the fact that all-ones is a NaN.
|
| 809 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 810 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 811 |
+
return cvtfp32_bf16(o1, o2);
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
template <>
|
| 815 |
+
Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
|
| 816 |
+
const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
|
| 817 |
+
__m512 a_lo, a_hi;
|
| 818 |
+
__m512 min_lo, min_hi;
|
| 819 |
+
__m512 max_lo, max_hi;
|
| 820 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 821 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 822 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 823 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 824 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 825 |
+
return cvtfp32_bf16(o1, o2);
|
| 826 |
+
}
|
| 827 |
+
|
| 828 |
+
template <>
|
| 829 |
+
Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
|
| 830 |
+
__m512 a_lo, a_hi;
|
| 831 |
+
__m512 max_lo, max_hi;
|
| 832 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 833 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 834 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 835 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 836 |
+
return cvtfp32_bf16(o1, o2);
|
| 837 |
+
}
|
| 838 |
+
|
| 839 |
+
template <>
|
| 840 |
+
Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
|
| 841 |
+
__m512 a_lo, a_hi;
|
| 842 |
+
__m512 min_lo, min_hi;
|
| 843 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 844 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 845 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 846 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 847 |
+
return cvtfp32_bf16(o1, o2);
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
template <>
|
| 851 |
+
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
| 852 |
+
int64_t i;
|
| 853 |
+
#ifndef __msvc_cl__
|
| 854 |
+
#pragma unroll
|
| 855 |
+
#endif
|
| 856 |
+
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
| 857 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 858 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 859 |
+
}
|
| 860 |
+
#ifndef __msvc_cl__
|
| 861 |
+
#pragma unroll
|
| 862 |
+
#endif
|
| 863 |
+
for (; i < n; i++) {
|
| 864 |
+
dst[i] = src[i];
|
| 865 |
+
}
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
template <>
|
| 869 |
+
inline void convert(const float* src, BFloat16* dst, int64_t n) {
|
| 870 |
+
int64_t i;
|
| 871 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 872 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 873 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 874 |
+
|
| 875 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 876 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 877 |
+
}
|
| 878 |
+
for (; i < n; i++) {
|
| 879 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 880 |
+
}
|
| 881 |
+
}
|
| 882 |
+
|
| 883 |
+
template <>
|
| 884 |
+
inline void convert(const double* src, BFloat16* dst, int64_t n) {
|
| 885 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 886 |
+
// Load one float vector from an array of doubles
|
| 887 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 888 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 889 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 890 |
+
};
|
| 891 |
+
|
| 892 |
+
int64_t i;
|
| 893 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 894 |
+
__m512 a = load_float(&src[i]);
|
| 895 |
+
__m512 b = load_float(&src[i + 16]);
|
| 896 |
+
|
| 897 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 898 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 899 |
+
}
|
| 900 |
+
for (; i < n; i++) {
|
| 901 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 902 |
+
}
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
template <>
|
| 906 |
+
Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
|
| 907 |
+
const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
|
| 908 |
+
__m512 a_lo, a_hi;
|
| 909 |
+
__m512 b_lo, b_hi;
|
| 910 |
+
__m512 c_lo, c_hi;
|
| 911 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 912 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 913 |
+
cvtbf16_fp32(__m512i(c), c_lo, c_hi);
|
| 914 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 915 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 916 |
+
return cvtfp32_bf16(o1, o2);
|
| 917 |
+
}
|
| 918 |
+
|
| 919 |
+
static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
| 920 |
+
__m512i r[8];
|
| 921 |
+
// a0a1 a2a3 a4a5 a6a7 a8a9 a10a11 a12a13 a14a15 e0e1 e2e3 e4e5 e6e7 e8e9 e10e11 e12e13 e14e15
|
| 922 |
+
// b0-b15 f0-f15
|
| 923 |
+
// c0-c15 g0-g15
|
| 924 |
+
// d0-d15 h0-h15
|
| 925 |
+
// i0-i15 m0-m15
|
| 926 |
+
// j0-j15 n0-n15
|
| 927 |
+
// k0-k15 o0-o15
|
| 928 |
+
// l0-l15 p0-p15
|
| 929 |
+
#ifndef __msvc_cl__
|
| 930 |
+
#pragma unroll(4)
|
| 931 |
+
#endif
|
| 932 |
+
for (int i = 0; i < 4; i++) {
|
| 933 |
+
r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01);
|
| 934 |
+
r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01);
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
// u0: a0a1 b0b1 a2a3 b2b3 a8a9 b8b9 a10a11 b10b11 e0e1 f0f1 e2e3 f2f3 e8e9 f8f9 e10e11 f10f11
|
| 938 |
+
// u1: a4a5 b4b5 a6a7 b6b7 a12a13 b12b13 a14a15 b14b15 e4e5 f4f5 e6e7 f6f7 e12e13 f12f13 e14e15 f14f15
|
| 939 |
+
// u2: c0c1 d0d1 c2c3 d2d3 c8c9 d8d9 c10c11 d10d11 g0g1 h0h1 g2g3 h2h3 g8g9 h8h9 g10g11 h10h11
|
| 940 |
+
// u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15
|
| 941 |
+
// i j m n
|
| 942 |
+
// k l o p
|
| 943 |
+
#ifndef __msvc_cl__
|
| 944 |
+
#pragma unroll(4)
|
| 945 |
+
#endif
|
| 946 |
+
for (int i = 0; i < 8; i += 2) {
|
| 947 |
+
u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]);
|
| 948 |
+
u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]);
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
// r0: a0a1 b0b1 c0c1 d0d1 a8a9 b8b9 c8c9 d8d9 e0e1 f0f1 g0g1 h0h1 e8e9 f8f9 g8g9 h8h9
|
| 952 |
+
// r1: a2a3 b2b3 c2c3 d2d3 a10a11 b10b11 c10c11 d10d11 e2e3 f2f3 g2g3 h2h3 e10e11 f10f11 g10g11 h10h11
|
| 953 |
+
// r2: a4a5 b4b5 c4c5 d4b5 a12a13 b12b13 c12c13 d12d13
|
| 954 |
+
// r3: a6a7 b6b7 c6c7 d6b7 a14a15 b14b15 c14c15 d14d15
|
| 955 |
+
// r4: i j k l m n o p
|
| 956 |
+
r[0] = _mm512_unpacklo_epi64(u[0], u[2]);
|
| 957 |
+
r[1] = _mm512_unpackhi_epi64(u[0], u[2]);
|
| 958 |
+
r[2] = _mm512_unpacklo_epi64(u[1], u[3]);
|
| 959 |
+
r[3] = _mm512_unpackhi_epi64(u[1], u[3]);
|
| 960 |
+
r[4] = _mm512_unpacklo_epi64(u[4], u[6]);
|
| 961 |
+
r[5] = _mm512_unpackhi_epi64(u[4], u[6]);
|
| 962 |
+
r[6] = _mm512_unpacklo_epi64(u[5], u[7]);
|
| 963 |
+
r[7] = _mm512_unpackhi_epi64(u[5], u[7]);
|
| 964 |
+
|
| 965 |
+
__m512i const1 = _mm512_set_epi32(
|
| 966 |
+
0x00370035,
|
| 967 |
+
0x00330031,
|
| 968 |
+
0x00270025,
|
| 969 |
+
0x00230021,
|
| 970 |
+
0x00170015,
|
| 971 |
+
0x00130011,
|
| 972 |
+
0x00070005,
|
| 973 |
+
0x00030001,
|
| 974 |
+
0x00360034,
|
| 975 |
+
0x00320030,
|
| 976 |
+
0x00260024,
|
| 977 |
+
0x00220020,
|
| 978 |
+
0x00160014,
|
| 979 |
+
0x00120010,
|
| 980 |
+
0x00060004,
|
| 981 |
+
0x00020000);
|
| 982 |
+
__m512i const2 = _mm512_set_epi32(
|
| 983 |
+
0x003f003d,
|
| 984 |
+
0x003b0039,
|
| 985 |
+
0x002f002d,
|
| 986 |
+
0x002b0029,
|
| 987 |
+
0x001f001d,
|
| 988 |
+
0x001b0019,
|
| 989 |
+
0x000f000d,
|
| 990 |
+
0x000b0009,
|
| 991 |
+
0x003e003c,
|
| 992 |
+
0x003a0038,
|
| 993 |
+
0x002e002c,
|
| 994 |
+
0x002a0028,
|
| 995 |
+
0x001e001c,
|
| 996 |
+
0x001a0018,
|
| 997 |
+
0x000e000c,
|
| 998 |
+
0x000a0008);
|
| 999 |
+
// merge values from two regs
|
| 1000 |
+
// 0-- 1--
|
| 1001 |
+
// 8-- 9--
|
| 1002 |
+
// 2-- 3--
|
| 1003 |
+
// 10-- 11--
|
| 1004 |
+
// 4-- 5--
|
| 1005 |
+
// 12-- 13--
|
| 1006 |
+
// 6-- 7--
|
| 1007 |
+
// 14-- 15--
|
| 1008 |
+
#ifndef __msvc_cl__
|
| 1009 |
+
#pragma unroll(4)
|
| 1010 |
+
#endif
|
| 1011 |
+
for (int i = 0; i < 4; i++) {
|
| 1012 |
+
u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]);
|
| 1013 |
+
u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]);
|
| 1014 |
+
}
|
| 1015 |
+
}
|
| 1016 |
+
|
| 1017 |
+
// TODO(Leslie): Add the AVX2 Version of transpose_mxn for BFloat16 and Float16
|
| 1018 |
+
// Code referred to FBGEMM:
|
| 1019 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1020 |
+
template<>
|
| 1021 |
+
inline void transpose_mxn<BFloat16, 16, 16>(
|
| 1022 |
+
const BFloat16* src,
|
| 1023 |
+
int64_t ld_src,
|
| 1024 |
+
BFloat16* dst,
|
| 1025 |
+
int64_t ld_dst) {
|
| 1026 |
+
__m256i t[16];
|
| 1027 |
+
// load from src to registers
|
| 1028 |
+
// a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
|
| 1029 |
+
// b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
|
| 1030 |
+
// c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
|
| 1031 |
+
// d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
|
| 1032 |
+
// e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
|
| 1033 |
+
// f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
|
| 1034 |
+
// g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
|
| 1035 |
+
// h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
|
| 1036 |
+
// i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
|
| 1037 |
+
// j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
|
| 1038 |
+
// k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
|
| 1039 |
+
// l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
|
| 1040 |
+
// m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
|
| 1041 |
+
// n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
|
| 1042 |
+
// o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
|
| 1043 |
+
// p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
|
| 1044 |
+
#ifndef __msvc_cl__
|
| 1045 |
+
#pragma unroll(16)
|
| 1046 |
+
#endif
|
| 1047 |
+
for (int i = 0; i < 16; i++) {
|
| 1048 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
__m512i u[8];
|
| 1052 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1053 |
+
|
| 1054 |
+
#ifndef __msvc_cl__
|
| 1055 |
+
#pragma unroll(8)
|
| 1056 |
+
#endif
|
| 1057 |
+
for (int i = 0; i < 8; i++) {
|
| 1058 |
+
_mm256_storeu_si256(
|
| 1059 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1060 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1061 |
+
_mm256_storeu_si256(
|
| 1062 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1063 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1064 |
+
}
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
+
// Code referred to FBGEMM:
|
| 1068 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1069 |
+
template<>
|
| 1070 |
+
inline void transpose_mxn<Half, 16, 16>(
|
| 1071 |
+
const Half* src,
|
| 1072 |
+
int64_t ld_src,
|
| 1073 |
+
Half* dst,
|
| 1074 |
+
int64_t ld_dst) {
|
| 1075 |
+
__m256i t[16];
|
| 1076 |
+
// load from src to registers
|
| 1077 |
+
// Same matrix indices as above transpose_mxn<BFloat16, 16, 16>
|
| 1078 |
+
#ifndef __msvc_cl__
|
| 1079 |
+
#pragma unroll(16)
|
| 1080 |
+
#endif
|
| 1081 |
+
for (int i = 0; i < 16; i++) {
|
| 1082 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
__m512i u[8];
|
| 1086 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1087 |
+
|
| 1088 |
+
#ifndef __msvc_cl__
|
| 1089 |
+
#pragma unroll(8)
|
| 1090 |
+
#endif
|
| 1091 |
+
for (int i = 0; i < 8; i++) {
|
| 1092 |
+
_mm256_storeu_si256(
|
| 1093 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1094 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1095 |
+
_mm256_storeu_si256(
|
| 1096 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1097 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1098 |
+
}
|
| 1099 |
+
}
|
| 1100 |
+
|
| 1101 |
+
static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
| 1102 |
+
// t[0]: 0 32 1 33 2 34 3 35 8 40 9 41 10 42 11 43 16 ... 59
|
| 1103 |
+
// t[1]: 4 36 5 37 6 38 7 39 12 44 13 45 14 46 15 47 20 ... 63
|
| 1104 |
+
// t[2]: 64 96 65 97 66 98 67 99 72 104 73 105 74 106 75 ... 123
|
| 1105 |
+
// t[3]: 68 100 69 101 70 102 71 103 76 108 77 109 78 110 79 111 84 ... 127
|
| 1106 |
+
// t[4]: 128 160 129 161 130 162 131 163 136 168 137 169 138 170 139 171 144 ... 187
|
| 1107 |
+
// t[5]: 132 164 133 165 134 166 135 167 140 172 141 173 142 174 143 175 148 ... 191
|
| 1108 |
+
// t[6]: 192 224 193 225 194 226 195 227 200 232 201 233 202 234 203 235 208 ... 251
|
| 1109 |
+
// t[7]: 196 228 197 229 198 230 199 231 204 236 205 237 206 238 207 239 212 ... 255
|
| 1110 |
+
// t[8]: 256 288 257 289 258 290 259 291 264 296 265 297 266 298 267 299 272 ... 315
|
| 1111 |
+
// t[9]: 260 292 261 293 262 294 263 295 268 300 269 301 270 302 271 303 276 ... 319
|
| 1112 |
+
// t[10]: 320 352 321 353 322 354 323 355 328 360 329 361 330 362 331 363 336 ... 379
|
| 1113 |
+
// t[11]: 324 356 325 357 326 358 327 359 332 364 333 365 334 366 335 367 340 ... 383
|
| 1114 |
+
// t[12]: 384 416 385 417 386 418 387 419 392 424 393 425 394 426 395 427 400 ... 443
|
| 1115 |
+
// t[13]: 388 420 389 421 390 422 391 423 396 428 397 429 398 430 399 431 404 ... 447
|
| 1116 |
+
// t[14]: 448 480 449 481 450 482 451 483 456 488 457 489 458 490 459 491 464 ... 507
|
| 1117 |
+
// t[15]: 452 484 453 485 454 486 455 487 460 492 461 493 462 494 463 495 468 ... 511
|
| 1118 |
+
// t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571
|
| 1119 |
+
// ...
|
| 1120 |
+
// t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023
|
| 1121 |
+
#ifndef __msvc_cl__
|
| 1122 |
+
#pragma unroll(16)
|
| 1123 |
+
#endif
|
| 1124 |
+
for (int i = 0; i < 16; ++i) {
|
| 1125 |
+
d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1126 |
+
d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1127 |
+
}
|
| 1128 |
+
|
| 1129 |
+
// t[0]: 0 32 64 96 1 33 65 97 8 40 72 104 9 41 73 105 16 ... 121
|
| 1130 |
+
// t[1]: 2 34 66 98 3 35 67 99 10 42 74 106 11 43 75 107 18 ... 123
|
| 1131 |
+
// t[2]: 4 36 68 100 5 37 69 101 12 44 76 108 13 45 77 109 20 ... 125
|
| 1132 |
+
// t[3]: 6 38 70 102 7 39 71 103 14 46 78 110 15 47 79 111 22 ... 127
|
| 1133 |
+
// t[4]: 128 160 192 224 129 161 193 225 136 168 200 232 137 169 201 233 144 ... 249
|
| 1134 |
+
// t[5]: 130 162 194 226 131 163 195 227 138 170 202 234 139 171 203 235 146 ... 251
|
| 1135 |
+
// t[6]: 132 164 196 228 133 165 197 229 140 172 204 236 141 173 205 237 148 ... 253
|
| 1136 |
+
// t[7]: 134 166 198 230 135 167 199 231 142 174 206 238 143 175 207 239 150 ... 255
|
| 1137 |
+
// t[8]: 256 288 320 352 257 289 321 353 264 296 328 360 265 297 329 361 272 ... 377
|
| 1138 |
+
// t[9]: 258 290 322 354 259 291 323 355 266 298 330 362 267 299 331 363 274 ... 379
|
| 1139 |
+
// t[10]: 260 292 324 356 261 293 325 357 268 300 332 364 269 301 333 365 276 ... 381
|
| 1140 |
+
// t[11]: 262 294 326 358 263 295 327 359 270 302 334 366 271 303 335 367 278 ... 383
|
| 1141 |
+
// t[12]: 384 416 448 480 385 417 449 481 392 424 456 488 393 425 457 489 400 ... 505
|
| 1142 |
+
// t[13]: 386 418 450 482 387 419 451 483 394 426 458 490 395 427 459 491 402 ... 507
|
| 1143 |
+
// t[14]: 388 420 452 484 389 421 453 485 396 428 460 492 397 429 461 493 404 ... 509
|
| 1144 |
+
// t[15]: 390 422 454 486 391 423 455 487 398 430 462 494 399 431 463 495 406 ... 511
|
| 1145 |
+
// t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633
|
| 1146 |
+
// ...
|
| 1147 |
+
// t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023
|
| 1148 |
+
#ifndef __msvc_cl__
|
| 1149 |
+
#pragma unroll(8)
|
| 1150 |
+
#endif
|
| 1151 |
+
for (int i = 0; i < 8; ++i) {
|
| 1152 |
+
r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1153 |
+
r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1154 |
+
r[i * 4 + 2] = _mm512_unpacklo_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1155 |
+
r[i * 4 + 3] = _mm512_unpackhi_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1156 |
+
}
|
| 1157 |
+
|
| 1158 |
+
// t[0]: 0 32 64 96 128 160 192 224 8 40 72 104 136 168 200 232 16 ... 248
|
| 1159 |
+
// t[1]: 1 33 65 97 129 161 193 225 9 41 73 105 137 169 201 233 17 ... 249
|
| 1160 |
+
// t[2]: 2 34 66 98 130 162 194 226 10 42 74 106 138 170 202 234 18 ... 250
|
| 1161 |
+
// t[3]: 3 35 67 99 131 163 195 227 11 43 75 107 139 171 203 235 19 ... 251
|
| 1162 |
+
// t[4]: 4 36 68 100 132 164 196 228 12 44 76 108 140 172 204 236 20 ... 252
|
| 1163 |
+
// t[5]: 5 37 69 101 133 165 197 229 13 45 77 109 141 173 205 237 21 ... 253
|
| 1164 |
+
// t[6]: 6 38 70 102 134 166 198 230 14 46 78 110 142 174 206 238 22 ... 254
|
| 1165 |
+
// t[7]: 7 39 71 103 135 167 199 231 15 47 79 111 143 175 207 239 23 ... 255
|
| 1166 |
+
// t[8]: 256 288 320 352 384 416 448 480 264 296 328 360 392 424 456 488 272 ... 504
|
| 1167 |
+
// t[9]: 257 289 321 353 385 417 449 481 265 297 329 361 393 425 457 489 273 ... 505
|
| 1168 |
+
// t[10]: 258 290 322 354 386 418 450 482 266 298 330 362 394 426 458 490 274 ... 506
|
| 1169 |
+
// t[11]: 259 291 323 355 387 419 451 483 267 299 331 363 395 427 459 491 275 ... 507
|
| 1170 |
+
// t[12]: 260 292 324 356 388 420 452 484 268 300 332 364 396 428 460 492 276 ... 508
|
| 1171 |
+
// t[13]: 261 293 325 357 389 421 453 485 269 301 333 365 397 429 461 493 277 ... 509
|
| 1172 |
+
// t[14]: 262 294 326 358 390 422 454 486 270 302 334 366 398 430 462 494 278 ... 510
|
| 1173 |
+
// t[15]: 263 295 327 359 391 423 455 487 271 303 335 367 399 431 463 495 279 ... 511
|
| 1174 |
+
// t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760
|
| 1175 |
+
// ...
|
| 1176 |
+
// t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023
|
| 1177 |
+
#ifndef __msvc_cl__
|
| 1178 |
+
#pragma unroll(4)
|
| 1179 |
+
#endif
|
| 1180 |
+
for (int i = 0; i < 4; ++i) {
|
| 1181 |
+
d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1182 |
+
d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1183 |
+
d[i * 8 + 2] = _mm512_unpacklo_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1184 |
+
d[i * 8 + 3] = _mm512_unpackhi_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1185 |
+
d[i * 8 + 4] = _mm512_unpacklo_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1186 |
+
d[i * 8 + 5] = _mm512_unpackhi_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1187 |
+
d[i * 8 + 6] = _mm512_unpacklo_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1188 |
+
d[i * 8 + 7] = _mm512_unpackhi_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1189 |
+
}
|
| 1190 |
+
|
| 1191 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 16 ... 496
|
| 1192 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 17 ... 497
|
| 1193 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 18 ... 498
|
| 1194 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 19 ... 499
|
| 1195 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 20 ... 500
|
| 1196 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 21 ... 501
|
| 1197 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 22 ... 502
|
| 1198 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 23 ... 503
|
| 1199 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 24 ... 504
|
| 1200 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 25 ... 505
|
| 1201 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 26 ... 506
|
| 1202 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 27 ... 507
|
| 1203 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 28 ... 508
|
| 1204 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 29 ... 509
|
| 1205 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 30 ... 510
|
| 1206 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 31 ... 511
|
| 1207 |
+
// t[16]: 512 544 576 608 640 672 704 736 768 800 832 864 896 928 960 992 528 ... 1008
|
| 1208 |
+
// ...
|
| 1209 |
+
// t[31]: 527 559 591 623 655 687 719 751 783 815 847 879 911 943 975 1007 543 ... 1023
|
| 1210 |
+
__m512i const1 = _mm512_set_epi64(
|
| 1211 |
+
0x000000000000000d,
|
| 1212 |
+
0x000000000000000c,
|
| 1213 |
+
0x0000000000000005,
|
| 1214 |
+
0x0000000000000004,
|
| 1215 |
+
0x0000000000000009,
|
| 1216 |
+
0x0000000000000008,
|
| 1217 |
+
0x0000000000000001,
|
| 1218 |
+
0x0000000000000000);
|
| 1219 |
+
__m512i const2 = _mm512_set_epi64(
|
| 1220 |
+
0x000000000000000f,
|
| 1221 |
+
0x000000000000000e,
|
| 1222 |
+
0x0000000000000007,
|
| 1223 |
+
0x0000000000000006,
|
| 1224 |
+
0x000000000000000b,
|
| 1225 |
+
0x000000000000000a,
|
| 1226 |
+
0x0000000000000003,
|
| 1227 |
+
0x0000000000000002);
|
| 1228 |
+
#ifndef __msvc_cl__
|
| 1229 |
+
#pragma unroll(8)
|
| 1230 |
+
#endif
|
| 1231 |
+
for (int i = 0; i < 8; ++i) {
|
| 1232 |
+
r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]);
|
| 1233 |
+
r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]);
|
| 1234 |
+
r[i + 16] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const1, d[i + 24]);
|
| 1235 |
+
r[i + 24] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const2, d[i + 24]);
|
| 1236 |
+
}
|
| 1237 |
+
|
| 1238 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 512 544 ... 992
|
| 1239 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 513 545 ... 993
|
| 1240 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 514 546 ... 994
|
| 1241 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 515 547 ... 995
|
| 1242 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 516 548 ... 996
|
| 1243 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 517 549 ... 997
|
| 1244 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 518 550 ... 998
|
| 1245 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 519 551 ... 999
|
| 1246 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 520 552 ... 1000
|
| 1247 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 521 553 ... 1001
|
| 1248 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 522 554 ... 1002
|
| 1249 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 523 555 ... 1003
|
| 1250 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 524 556 ... 1004
|
| 1251 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 525 557 ... 1005
|
| 1252 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 526 558 ... 1006
|
| 1253 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 527 559 ... 1007
|
| 1254 |
+
// t[16]: 16 48 80 112 144 176 208 240 272 304 336 368 400 432 464 496 528 560 ... 1008
|
| 1255 |
+
// ...
|
| 1256 |
+
// t[31]: 31 63 95 127 159 191 223 255 287 319 351 383 415 447 479 511 543 575 ... 1023
|
| 1257 |
+
__m512i const3 = _mm512_set_epi64(
|
| 1258 |
+
0x000000000000000b,
|
| 1259 |
+
0x000000000000000a,
|
| 1260 |
+
0x0000000000000009,
|
| 1261 |
+
0x0000000000000008,
|
| 1262 |
+
0x0000000000000003,
|
| 1263 |
+
0x0000000000000002,
|
| 1264 |
+
0x0000000000000001,
|
| 1265 |
+
0x0000000000000000);
|
| 1266 |
+
__m512i const4 = _mm512_set_epi64(
|
| 1267 |
+
0x000000000000000f,
|
| 1268 |
+
0x000000000000000e,
|
| 1269 |
+
0x000000000000000d,
|
| 1270 |
+
0x000000000000000c,
|
| 1271 |
+
0x0000000000000007,
|
| 1272 |
+
0x0000000000000006,
|
| 1273 |
+
0x0000000000000005,
|
| 1274 |
+
0x0000000000000004);
|
| 1275 |
+
#ifndef __msvc_cl__
|
| 1276 |
+
#pragma unroll(16)
|
| 1277 |
+
#endif
|
| 1278 |
+
for (int i = 0; i < 16; ++i) {
|
| 1279 |
+
d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]);
|
| 1280 |
+
d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]);
|
| 1281 |
+
}
|
| 1282 |
+
}
|
| 1283 |
+
|
| 1284 |
+
// Code referred to FBGEMM:
|
| 1285 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
|
| 1286 |
+
template<>
|
| 1287 |
+
inline void transpose_mxn<BFloat16>(const BFloat16* src, int64_t ld_src, BFloat16* dst, int64_t ld_dst, int M, int N) {
|
| 1288 |
+
// load from src
|
| 1289 |
+
TORCH_CHECK(M <= 32 && N <= 32, "transpose_mxn<BFloat16> expects M, N <= 32.");
|
| 1290 |
+
__m512i r[32];
|
| 1291 |
+
int i;
|
| 1292 |
+
if (N == 32) {
|
| 1293 |
+
for (i = 0; i < M; ++i) {
|
| 1294 |
+
r[i] = _mm512_loadu_si512(&src[i * ld_src]);
|
| 1295 |
+
}
|
| 1296 |
+
} else {
|
| 1297 |
+
__mmask32 src_mask = (1 << N) - 1;
|
| 1298 |
+
for (i = 0; i < M; ++i) {
|
| 1299 |
+
r[i] = _mm512_maskz_loadu_epi16(src_mask, &src[i * ld_src]);
|
| 1300 |
+
}
|
| 1301 |
+
}
|
| 1302 |
+
for (; i < 32; ++i) {
|
| 1303 |
+
r[i] = _mm512_setzero_si512();
|
| 1304 |
+
}
|
| 1305 |
+
|
| 1306 |
+
__m512i d[32];
|
| 1307 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1308 |
+
|
| 1309 |
+
// store to dst
|
| 1310 |
+
if (M == 32) {
|
| 1311 |
+
for (i = 0; i < N; ++i) {
|
| 1312 |
+
_mm512_storeu_si512(&dst[i * ld_dst], d[i]);
|
| 1313 |
+
}
|
| 1314 |
+
} else {
|
| 1315 |
+
__mmask32 dst_mask = (1 << M) - 1;
|
| 1316 |
+
for (i = 0; i < N; ++i) {
|
| 1317 |
+
_mm512_mask_storeu_epi16(&dst[i * ld_dst], dst_mask, d[i]);
|
| 1318 |
+
}
|
| 1319 |
+
}
|
| 1320 |
+
}
|
| 1321 |
+
|
| 1322 |
+
template <typename T, int M, int N,
|
| 1323 |
+
typename std::enable_if_t<std::is_same_v<T, BFloat16> && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
|
| 1324 |
+
inline void transpose_mxn(const BFloat16* src, int64_t ld_src, BFloat16* dst, int64_t ld_dst) {
|
| 1325 |
+
transpose_mxn<BFloat16>(src, ld_src, dst, ld_dst, M, N);
|
| 1326 |
+
}
|
| 1327 |
+
|
| 1328 |
+
template<>
|
| 1329 |
+
inline void transpose_mxn<Half>(const Half* src, int64_t ld_src, Half* dst, int64_t ld_dst, int M, int N) {
|
| 1330 |
+
TORCH_CHECK(M <= 32 && N <= 32, "transpose_mxn<Half> expects M, N <= 32.");
|
| 1331 |
+
// load from src
|
| 1332 |
+
__m512i r[32];
|
| 1333 |
+
int i;
|
| 1334 |
+
if (N == 32) {
|
| 1335 |
+
for (i = 0; i < M; ++i) {
|
| 1336 |
+
r[i] = _mm512_loadu_si512(&src[i * ld_src]);
|
| 1337 |
+
}
|
| 1338 |
+
} else {
|
| 1339 |
+
__mmask32 src_mask = (1 << N) - 1;
|
| 1340 |
+
for (i = 0; i < M; ++i) {
|
| 1341 |
+
r[i] = _mm512_maskz_loadu_epi16(src_mask, &src[i * ld_src]);
|
| 1342 |
+
}
|
| 1343 |
+
}
|
| 1344 |
+
for (; i < 32; ++i) {
|
| 1345 |
+
r[i] = _mm512_setzero_si512();
|
| 1346 |
+
}
|
| 1347 |
+
|
| 1348 |
+
__m512i d[32];
|
| 1349 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1350 |
+
|
| 1351 |
+
// store to dst
|
| 1352 |
+
if (M == 32) {
|
| 1353 |
+
for (i = 0; i < N; ++i) {
|
| 1354 |
+
_mm512_storeu_si512(&dst[i * ld_dst], d[i]);
|
| 1355 |
+
}
|
| 1356 |
+
} else {
|
| 1357 |
+
__mmask32 dst_mask = (1 << M) - 1;
|
| 1358 |
+
for (i = 0; i < N; ++i) {
|
| 1359 |
+
_mm512_mask_storeu_epi16(&dst[i * ld_dst], dst_mask, d[i]);
|
| 1360 |
+
}
|
| 1361 |
+
}
|
| 1362 |
+
}
|
| 1363 |
+
|
| 1364 |
+
template <typename T, int M, int N,
|
| 1365 |
+
typename std::enable_if_t<std::is_same_v<T, Half> && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
|
| 1366 |
+
inline void transpose_mxn(const Half* src, int64_t ld_src, Half* dst, int64_t ld_dst) {
|
| 1367 |
+
transpose_mxn<Half>(src, ld_src, dst, ld_dst, M, N);
|
| 1368 |
+
}
|
| 1369 |
+
|
| 1370 |
+
template <>
|
| 1371 |
+
class Vectorized<Half>: public Vectorized16<Half> {
|
| 1372 |
+
public:
|
| 1373 |
+
using Vectorized16::Vectorized16;
|
| 1374 |
+
|
| 1375 |
+
using value_type = Half;
|
| 1376 |
+
|
| 1377 |
+
Vectorized<Half> frac() const;
|
| 1378 |
+
|
| 1379 |
+
Vectorized<Half> eq(const Vectorized<Half>& other) const;
|
| 1380 |
+
Vectorized<Half> ne(const Vectorized<Half>& other) const;
|
| 1381 |
+
Vectorized<Half> gt(const Vectorized<Half>& other) const;
|
| 1382 |
+
Vectorized<Half> ge(const Vectorized<Half>& other) const;
|
| 1383 |
+
Vectorized<Half> lt(const Vectorized<Half>& other) const;
|
| 1384 |
+
Vectorized<Half> le(const Vectorized<Half>& other) const;
|
| 1385 |
+
};
|
| 1386 |
+
|
| 1387 |
+
Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1388 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 1389 |
+
}
|
| 1390 |
+
Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1391 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 1392 |
+
}
|
| 1393 |
+
Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1394 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 1395 |
+
}
|
| 1396 |
+
Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1397 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 1398 |
+
}
|
| 1399 |
+
|
| 1400 |
+
Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1401 |
+
return _mm512_and_si512(a, b);
|
| 1402 |
+
}
|
| 1403 |
+
Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1404 |
+
return _mm512_or_si512(a, b);
|
| 1405 |
+
}
|
| 1406 |
+
Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1407 |
+
return _mm512_xor_si512(a, b);
|
| 1408 |
+
}
|
| 1409 |
+
|
| 1410 |
+
inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
|
| 1411 |
+
return (*this == other) & Vectorized<Half>(1.0f);
|
| 1412 |
+
}
|
| 1413 |
+
|
| 1414 |
+
inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
|
| 1415 |
+
return (*this != other) & Vectorized<Half>(1.0f);
|
| 1416 |
+
}
|
| 1417 |
+
|
| 1418 |
+
inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
|
| 1419 |
+
return (*this > other) & Vectorized<Half>(1.0f);
|
| 1420 |
+
}
|
| 1421 |
+
|
| 1422 |
+
inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
|
| 1423 |
+
return (*this >= other) & Vectorized<Half>(1.0f);
|
| 1424 |
+
}
|
| 1425 |
+
|
| 1426 |
+
inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
|
| 1427 |
+
return (*this < other) & Vectorized<Half>(1.0f);
|
| 1428 |
+
}
|
| 1429 |
+
|
| 1430 |
+
inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
|
| 1431 |
+
return (*this <= other) & Vectorized<Half>(1.0f);
|
| 1432 |
+
}
|
| 1433 |
+
|
| 1434 |
+
// frac. Implement this here so we can use subtraction
|
| 1435 |
+
inline Vectorized<Half> Vectorized<Half>::frac() const {
|
| 1436 |
+
return *this - this->trunc();
|
| 1437 |
+
}
|
| 1438 |
+
|
| 1439 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 1440 |
+
// either input is a NaN.
|
| 1441 |
+
template <>
|
| 1442 |
+
Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1443 |
+
__m512 a_lo, a_hi;
|
| 1444 |
+
__m512 b_lo, b_hi;
|
| 1445 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1446 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1447 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 1448 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 1449 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1450 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1451 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 1452 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 1453 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1454 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 1455 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 1456 |
+
return cvtfp32_fp16(o1, o2);
|
| 1457 |
+
}
|
| 1458 |
+
|
| 1459 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 1460 |
+
// either input is a NaN.
|
| 1461 |
+
template <>
|
| 1462 |
+
Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1463 |
+
__m512 a_lo, a_hi;
|
| 1464 |
+
__m512 b_lo, b_hi;
|
| 1465 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 1466 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1467 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1468 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 1469 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 1470 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1471 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1472 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 1473 |
+
0xFFFFFFFF));
|
| 1474 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 1475 |
+
0xFFFFFFFF));
|
| 1476 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1477 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 1478 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 1479 |
+
return cvtfp32_fp16(o1, o2);
|
| 1480 |
+
}
|
| 1481 |
+
|
| 1482 |
+
template <>
|
| 1483 |
+
Vectorized<Half> inline clamp(const Vectorized<Half>& a,
|
| 1484 |
+
const Vectorized<Half>& min, const Vectorized<Half>& max) {
|
| 1485 |
+
__m512 a_lo, a_hi;
|
| 1486 |
+
__m512 min_lo, min_hi;
|
| 1487 |
+
__m512 max_lo, max_hi;
|
| 1488 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1489 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1490 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1491 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 1492 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 1493 |
+
return cvtfp32_fp16(o1, o2);
|
| 1494 |
+
}
|
| 1495 |
+
|
| 1496 |
+
template <>
|
| 1497 |
+
Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
|
| 1498 |
+
__m512 a_lo, a_hi;
|
| 1499 |
+
__m512 max_lo, max_hi;
|
| 1500 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1501 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1502 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 1503 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 1504 |
+
return cvtfp32_fp16(o1, o2);
|
| 1505 |
+
}
|
| 1506 |
+
|
| 1507 |
+
template <>
|
| 1508 |
+
Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
|
| 1509 |
+
__m512 a_lo, a_hi;
|
| 1510 |
+
__m512 min_lo, min_hi;
|
| 1511 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1512 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1513 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 1514 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 1515 |
+
return cvtfp32_fp16(o1, o2);
|
| 1516 |
+
}
|
| 1517 |
+
|
| 1518 |
+
template <>
|
| 1519 |
+
inline void convert(const Half* src, Half* dst, int64_t n) {
|
| 1520 |
+
int64_t i;
|
| 1521 |
+
#ifndef __msvc_cl__
|
| 1522 |
+
#pragma unroll
|
| 1523 |
+
#endif
|
| 1524 |
+
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
| 1525 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 1526 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 1527 |
+
}
|
| 1528 |
+
#ifndef __msvc_cl__
|
| 1529 |
+
#pragma unroll
|
| 1530 |
+
#endif
|
| 1531 |
+
for (; i < n; i++) {
|
| 1532 |
+
dst[i] = src[i];
|
| 1533 |
+
}
|
| 1534 |
+
}
|
| 1535 |
+
|
| 1536 |
+
template <>
|
| 1537 |
+
inline void convert(const float* src, Half* dst, int64_t n) {
|
| 1538 |
+
int64_t i;
|
| 1539 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1540 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 1541 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 1542 |
+
|
| 1543 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1544 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1545 |
+
}
|
| 1546 |
+
for (; i < n; i++) {
|
| 1547 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1548 |
+
}
|
| 1549 |
+
}
|
| 1550 |
+
|
| 1551 |
+
template <>
|
| 1552 |
+
inline void convert(const double* src, Half* dst, int64_t n) {
|
| 1553 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 1554 |
+
// Load one float vector from an array of doubles
|
| 1555 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 1556 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 1557 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 1558 |
+
};
|
| 1559 |
+
|
| 1560 |
+
int64_t i;
|
| 1561 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1562 |
+
__m512 a = load_float(&src[i]);
|
| 1563 |
+
__m512 b = load_float(&src[i + 16]);
|
| 1564 |
+
|
| 1565 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1566 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1567 |
+
}
|
| 1568 |
+
for (; i < n; i++) {
|
| 1569 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1570 |
+
}
|
| 1571 |
+
}
|
| 1572 |
+
|
| 1573 |
+
template <>
|
| 1574 |
+
Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
|
| 1575 |
+
const Vectorized<Half>& b, const Vectorized<Half>& c) {
|
| 1576 |
+
__m512 a_lo, a_hi;
|
| 1577 |
+
__m512 b_lo, b_hi;
|
| 1578 |
+
__m512 c_lo, c_hi;
|
| 1579 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1580 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1581 |
+
cvtfp16_fp32(__m512i(c), c_lo, c_hi);
|
| 1582 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 1583 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 1584 |
+
return cvtfp32_fp16(o1, o2);
|
| 1585 |
+
}
|
| 1586 |
+
|
| 1587 |
+
#define CONVERT_VECTORIZED_INIT(type, name) \
|
| 1588 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1589 |
+
__m512 o1, o2; \
|
| 1590 |
+
cvt_to_fp32<type>(__m512i(a), o1, o2); \
|
| 1591 |
+
return std::make_tuple(o1, o2); \
|
| 1592 |
+
} \
|
| 1593 |
+
\
|
| 1594 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1595 |
+
return cvt_from_fp32<type>(__m512(a), __m512(b)); \
|
| 1596 |
+
}
|
| 1597 |
+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16)
|
| 1598 |
+
CONVERT_VECTORIZED_INIT(Half, half)
|
| 1599 |
+
|
| 1600 |
+
#else //defined(CPU_CAPABILITY_AVX512)
|
| 1601 |
+
|
| 1602 |
+
#define CONVERT_NON_VECTORIZED_INIT(type, name) \
|
| 1603 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1604 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1605 |
+
__at_align__ float arr[K]; \
|
| 1606 |
+
__at_align__ type arr2[K]; \
|
| 1607 |
+
a.store(arr2); \
|
| 1608 |
+
for (const auto k : c10::irange(K)) { \
|
| 1609 |
+
arr[k] = c10::convert<float>(arr2[k]); \
|
| 1610 |
+
} \
|
| 1611 |
+
return std::make_tuple( \
|
| 1612 |
+
Vectorized<float>::loadu(arr), \
|
| 1613 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
|
| 1614 |
+
} \
|
| 1615 |
+
\
|
| 1616 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1617 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1618 |
+
__at_align__ float arr[K]; \
|
| 1619 |
+
__at_align__ type arr2[K]; \
|
| 1620 |
+
a.store(arr); \
|
| 1621 |
+
b.store(arr + Vectorized<float>::size()); \
|
| 1622 |
+
for (const auto k : c10::irange(K)) { \
|
| 1623 |
+
arr2[k] = c10::convert<type>(arr[k]); \
|
| 1624 |
+
} \
|
| 1625 |
+
return Vectorized<type>::loadu(arr2); \
|
| 1626 |
+
}
|
| 1627 |
+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16)
|
| 1628 |
+
CONVERT_NON_VECTORIZED_INIT(Half, half)
|
| 1629 |
+
|
| 1630 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 1631 |
+
|
| 1632 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 1633 |
+
#define LOAD_FP32_VECTORIZED_INIT(type, name) \
|
| 1634 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1635 |
+
auto values = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(data)); \
|
| 1636 |
+
__m512 out_values; \
|
| 1637 |
+
cvt_to_fp32<type>(values, out_values); \
|
| 1638 |
+
out = out_values; \
|
| 1639 |
+
} \
|
| 1640 |
+
\
|
| 1641 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1642 |
+
auto vec = Vectorized<type>::loadu(data); \
|
| 1643 |
+
__m512 out1_values, out2_values; \
|
| 1644 |
+
cvt_to_fp32<type>(vec, out1_values, out2_values); \
|
| 1645 |
+
out1 = out1_values; \
|
| 1646 |
+
out2 = out2_values; \
|
| 1647 |
+
}
|
| 1648 |
+
LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16)
|
| 1649 |
+
LOAD_FP32_VECTORIZED_INIT(Half, fp16)
|
| 1650 |
+
|
| 1651 |
+
#else // defined(CPU_CAPABILITY_AVX512)
|
| 1652 |
+
#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
|
| 1653 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1654 |
+
__at_align__ float values[Vectorized<float>::size()]; \
|
| 1655 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) { \
|
| 1656 |
+
values[k] = data[k]; \
|
| 1657 |
+
} \
|
| 1658 |
+
out = Vectorized<float>::loadu(values); \
|
| 1659 |
+
} \
|
| 1660 |
+
\
|
| 1661 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1662 |
+
load_fp32_from_##name(data, out1); \
|
| 1663 |
+
data += Vectorized<float>::size(); \
|
| 1664 |
+
load_fp32_from_##name(data, out2); \
|
| 1665 |
+
}
|
| 1666 |
+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16)
|
| 1667 |
+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16)
|
| 1668 |
+
|
| 1669 |
+
#endif
|
| 1670 |
+
}}}
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h
ADDED
|
@@ -0,0 +1,711 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 10 |
+
#define SLEEF_STATIC_LIBS
|
| 11 |
+
#include <sleef.h>
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
namespace at {
|
| 15 |
+
namespace vec {
|
| 16 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 17 |
+
inline namespace CPU_CAPABILITY {
|
| 18 |
+
|
| 19 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 20 |
+
|
| 21 |
+
template <> class Vectorized<float> {
|
| 22 |
+
private:
|
| 23 |
+
static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0};
|
| 24 |
+
public:
|
| 25 |
+
__m512 values;
|
| 26 |
+
using value_type = float;
|
| 27 |
+
using size_type = int;
|
| 28 |
+
static constexpr size_type size() {
|
| 29 |
+
return 16;
|
| 30 |
+
}
|
| 31 |
+
Vectorized() {}
|
| 32 |
+
Vectorized(__m512 v) : values(v) {}
|
| 33 |
+
Vectorized(float val) {
|
| 34 |
+
values = _mm512_set1_ps(val);
|
| 35 |
+
}
|
| 36 |
+
Vectorized(float val1, float val2, float val3, float val4,
|
| 37 |
+
float val5, float val6, float val7, float val8,
|
| 38 |
+
float val9, float val10, float val11, float val12,
|
| 39 |
+
float val13, float val14, float val15, float val16) {
|
| 40 |
+
values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8,
|
| 41 |
+
val9, val10, val11, val12, val13, val14, val15, val16);
|
| 42 |
+
}
|
| 43 |
+
Vectorized(const float (&arr)[16])
|
| 44 |
+
: Vectorized(arr[0], arr[1], arr[2], arr[3], arr[4], arr[5], arr[6], arr[7],
|
| 45 |
+
arr[8], arr[9], arr[10], arr[11], arr[12], arr[13], arr[14], arr[15]) {}
|
| 46 |
+
operator __m512() const {
|
| 47 |
+
return values;
|
| 48 |
+
}
|
| 49 |
+
template <int64_t mask>
|
| 50 |
+
static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 51 |
+
return _mm512_mask_blend_ps(mask, a.values, b.values);
|
| 52 |
+
}
|
| 53 |
+
static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 54 |
+
const Vectorized<float>& mask) {
|
| 55 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 56 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ);
|
| 57 |
+
return _mm512_mask_blend_ps(mmask, a.values, b.values);
|
| 58 |
+
}
|
| 59 |
+
template<typename step_t>
|
| 60 |
+
static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 61 |
+
return Vectorized<float>(
|
| 62 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 63 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 64 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 65 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
| 66 |
+
}
|
| 67 |
+
static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
|
| 68 |
+
int64_t count = size()) {
|
| 69 |
+
switch (count) {
|
| 70 |
+
case 0:
|
| 71 |
+
return a;
|
| 72 |
+
case 1:
|
| 73 |
+
return blend<1>(a, b);
|
| 74 |
+
case 2:
|
| 75 |
+
return blend<3>(a, b);
|
| 76 |
+
case 3:
|
| 77 |
+
return blend<7>(a, b);
|
| 78 |
+
case 4:
|
| 79 |
+
return blend<15>(a, b);
|
| 80 |
+
case 5:
|
| 81 |
+
return blend<31>(a, b);
|
| 82 |
+
case 6:
|
| 83 |
+
return blend<63>(a, b);
|
| 84 |
+
case 7:
|
| 85 |
+
return blend<127>(a, b);
|
| 86 |
+
case 8:
|
| 87 |
+
return blend<255>(a, b);
|
| 88 |
+
case 9:
|
| 89 |
+
return blend<511>(a, b);
|
| 90 |
+
case 10:
|
| 91 |
+
return blend<1023>(a, b);
|
| 92 |
+
case 11:
|
| 93 |
+
return blend<2047>(a, b);
|
| 94 |
+
case 12:
|
| 95 |
+
return blend<4095>(a, b);
|
| 96 |
+
case 13:
|
| 97 |
+
return blend<8191>(a, b);
|
| 98 |
+
case 14:
|
| 99 |
+
return blend<16383>(a, b);
|
| 100 |
+
case 15:
|
| 101 |
+
return blend<32767>(a, b);
|
| 102 |
+
}
|
| 103 |
+
return b;
|
| 104 |
+
}
|
| 105 |
+
static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
|
| 106 |
+
if (count == size())
|
| 107 |
+
return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
|
| 108 |
+
|
| 109 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 110 |
+
return _mm512_maskz_loadu_ps(mask, ptr);
|
| 111 |
+
}
|
| 112 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 113 |
+
if (count == size()) {
|
| 114 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
|
| 115 |
+
} else if (count > 0) {
|
| 116 |
+
__mmask16 mask = (1ULL << count) - 1;
|
| 117 |
+
_mm512_mask_storeu_ps(reinterpret_cast<float*>(ptr), mask, values);
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
const float& operator[](int idx) const = delete;
|
| 121 |
+
float& operator[](int idx) = delete;
|
| 122 |
+
int zero_mask() const {
|
| 123 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 124 |
+
__mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ);
|
| 125 |
+
return static_cast<int32_t>(cmp);
|
| 126 |
+
}
|
| 127 |
+
Vectorized<float> isnan() const {
|
| 128 |
+
auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q);
|
| 129 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 130 |
+
0xFFFFFFFF));
|
| 131 |
+
}
|
| 132 |
+
bool has_inf_nan() const {
|
| 133 |
+
__m512 self_sub = _mm512_sub_ps(values, values);
|
| 134 |
+
return (_mm512_movepi8_mask(_mm512_castps_si512(self_sub)) & 0x7777777777777777) != 0;
|
| 135 |
+
}
|
| 136 |
+
Vectorized<float> map(float (*const f)(float)) const {
|
| 137 |
+
__at_align__ float tmp[size()];
|
| 138 |
+
store(tmp);
|
| 139 |
+
for (const auto i : c10::irange(size())) {
|
| 140 |
+
tmp[i] = f(tmp[i]);
|
| 141 |
+
}
|
| 142 |
+
return loadu(tmp);
|
| 143 |
+
}
|
| 144 |
+
Vectorized<float> abs() const {
|
| 145 |
+
auto mask = _mm512_set1_ps(-0.f);
|
| 146 |
+
return _mm512_andnot_ps(mask, values);
|
| 147 |
+
}
|
| 148 |
+
Vectorized<float> angle() const {
|
| 149 |
+
__m512 zero_vec = _mm512_set1_ps(0.f);
|
| 150 |
+
const auto nan_vec = _mm512_set1_ps(NAN);
|
| 151 |
+
const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
|
| 152 |
+
const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
|
| 153 |
+
not_nan_mask, 0xFFFFFFFF);
|
| 154 |
+
const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec),
|
| 155 |
+
zero_vec, _CMP_EQ_OQ);
|
| 156 |
+
const auto pi = _mm512_set1_ps(c10::pi<double>);
|
| 157 |
+
|
| 158 |
+
const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
|
| 159 |
+
auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
|
| 160 |
+
angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
|
| 161 |
+
return angle;
|
| 162 |
+
}
|
| 163 |
+
Vectorized<float> real() const {
|
| 164 |
+
return *this;
|
| 165 |
+
}
|
| 166 |
+
Vectorized<float> imag() const {
|
| 167 |
+
return _mm512_set1_ps(0);
|
| 168 |
+
}
|
| 169 |
+
Vectorized<float> conj() const {
|
| 170 |
+
return *this;
|
| 171 |
+
}
|
| 172 |
+
Vectorized<float> acos() const {
|
| 173 |
+
return Vectorized<float>(Sleef_acosf16_u10(values));
|
| 174 |
+
}
|
| 175 |
+
Vectorized<float> acosh() const {
|
| 176 |
+
return Vectorized<float>(Sleef_acoshf16_u10(values));
|
| 177 |
+
}
|
| 178 |
+
Vectorized<float> asin() const {
|
| 179 |
+
return Vectorized<float>(Sleef_asinf16_u10(values));
|
| 180 |
+
}
|
| 181 |
+
Vectorized<float> atan() const {
|
| 182 |
+
return Vectorized<float>(Sleef_atanf16_u10(values));
|
| 183 |
+
}
|
| 184 |
+
Vectorized<float> atanh() const {
|
| 185 |
+
return Vectorized<float>(Sleef_atanhf16_u10(values));
|
| 186 |
+
}
|
| 187 |
+
Vectorized<float> atan2(const Vectorized<float> &b) const {
|
| 188 |
+
return Vectorized<float>(Sleef_atan2f16_u10(values, b));
|
| 189 |
+
}
|
| 190 |
+
Vectorized<float> copysign(const Vectorized<float> &sign) const {
|
| 191 |
+
return Vectorized<float>(Sleef_copysignf16(values, sign));
|
| 192 |
+
}
|
| 193 |
+
Vectorized<float> erf() const {
|
| 194 |
+
// constants
|
| 195 |
+
const auto neg_zero_vec = _mm512_set1_ps(-0.f);
|
| 196 |
+
const auto one_vec = _mm512_set1_ps(1.0f);
|
| 197 |
+
const auto p = _mm512_set1_ps(0.3275911f);
|
| 198 |
+
const auto p1 = _mm512_set1_ps(0.254829592f);
|
| 199 |
+
const auto p2 = _mm512_set1_ps(-0.284496736f);
|
| 200 |
+
const auto p3 = _mm512_set1_ps(1.421413741f);
|
| 201 |
+
const auto p4 = _mm512_set1_ps(-1.453152027f);
|
| 202 |
+
const auto p5 = _mm512_set1_ps(1.061405429f);
|
| 203 |
+
// sign(x)
|
| 204 |
+
auto sign_mask = _mm512_and_ps(neg_zero_vec, values);
|
| 205 |
+
auto abs_vec = _mm512_abs_ps(values);
|
| 206 |
+
// t = 1 / (p * abs(x) + 1)
|
| 207 |
+
auto tmp0 = _mm512_fmadd_ps(p, abs_vec, one_vec);
|
| 208 |
+
auto t = _mm512_div_ps(one_vec, tmp0);
|
| 209 |
+
// r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
|
| 210 |
+
auto tmp1 = _mm512_fmadd_ps(p5, t, p4);
|
| 211 |
+
auto tmp2 = _mm512_fmadd_ps(tmp1, t, p3);
|
| 212 |
+
auto tmp3 = _mm512_fmadd_ps(tmp2, t, p2);
|
| 213 |
+
auto r = _mm512_fmadd_ps(tmp3, t, p1);
|
| 214 |
+
// - exp(- x * x)
|
| 215 |
+
auto pow_2 = _mm512_mul_ps(values, values);
|
| 216 |
+
auto neg_pow_2 = _mm512_xor_ps(neg_zero_vec, pow_2);
|
| 217 |
+
// auto tmp4 = exp(neg_pow_2);
|
| 218 |
+
auto tmp4 = Vectorized<float>(Sleef_expf16_u10(neg_pow_2));
|
| 219 |
+
auto tmp5 = _mm512_xor_ps(neg_zero_vec, tmp4);
|
| 220 |
+
// erf(x) = sign(x) * (1 - r * t * exp(- x * x))
|
| 221 |
+
auto tmp6 = _mm512_mul_ps(tmp5, t);
|
| 222 |
+
auto tmp7 = _mm512_fmadd_ps(tmp6, r, one_vec);
|
| 223 |
+
return _mm512_xor_ps(sign_mask, tmp7);
|
| 224 |
+
}
|
| 225 |
+
Vectorized<float> erfc() const {
|
| 226 |
+
return Vectorized<float>(Sleef_erfcf16_u15(values));
|
| 227 |
+
}
|
| 228 |
+
Vectorized<float> erfinv() const {
|
| 229 |
+
return map(calc_erfinv);
|
| 230 |
+
}
|
| 231 |
+
Vectorized<float> exp() const {
|
| 232 |
+
return Vectorized<float>(Sleef_expf16_u10(values));
|
| 233 |
+
}
|
| 234 |
+
Vectorized<float> exp2() const {
|
| 235 |
+
return Vectorized<float>(Sleef_exp2f16_u10(values));
|
| 236 |
+
}
|
| 237 |
+
Vectorized<float> expm1() const {
|
| 238 |
+
return Vectorized<float>(Sleef_expm1f16_u10(values));
|
| 239 |
+
}
|
| 240 |
+
Vectorized<float> exp_u20() const {
|
| 241 |
+
// A faster version of exp with ULP=20
|
| 242 |
+
const __m512 vec_factorial_1 =
|
| 243 |
+
_mm512_set1_ps(0.999999701f); // 1/factorial(1)
|
| 244 |
+
const __m512 vec_factorial_2 =
|
| 245 |
+
_mm512_set1_ps(0.499991506f); // 1/factorial(2)
|
| 246 |
+
const __m512 vec_factorial_3 =
|
| 247 |
+
_mm512_set1_ps(0.166676521f); // 1/factorial(3)
|
| 248 |
+
const __m512 vec_factorial_4 =
|
| 249 |
+
_mm512_set1_ps(0.0418978221f); // 1/factorial(4)
|
| 250 |
+
const __m512 vec_factorial_5 =
|
| 251 |
+
_mm512_set1_ps(0.00828929059f); // 1/factorial(5)
|
| 252 |
+
const __m512 vec_exp_log2ef =
|
| 253 |
+
_mm512_castsi512_ps(_mm512_set1_epi32(0x3fb8aa3b)); // log2(e)
|
| 254 |
+
const __m512 vec_half = _mm512_set1_ps(0.5f);
|
| 255 |
+
const __m512 vec_one = _mm512_set1_ps(1.f);
|
| 256 |
+
const __m512 vec_zero = _mm512_set1_ps(0.f);
|
| 257 |
+
const __m512 vec_two = _mm512_set1_ps(2.f);
|
| 258 |
+
const __m512 vec_ln2f = _mm512_castsi512_ps(_mm512_set1_epi32(0x3f317218)); // ln(2)
|
| 259 |
+
const __m512 vec_ln_flt_min = _mm512_castsi512_ps(_mm512_set1_epi32(0xc2aeac50));
|
| 260 |
+
const __m512 vec_ln_flt_max = _mm512_castsi512_ps(_mm512_set1_epi32(0x42b17218));
|
| 261 |
+
const __m512i vec_127 = _mm512_set1_epi32(0x0000007f);
|
| 262 |
+
const int n_mantissa_bits = 23;
|
| 263 |
+
|
| 264 |
+
// exp(x) =
|
| 265 |
+
// = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
|
| 266 |
+
// = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
|
| 267 |
+
|
| 268 |
+
auto less_ln_flt_min_mask =
|
| 269 |
+
_mm512_cmp_ps_mask(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
|
| 270 |
+
auto vec_src = _mm512_min_ps(values, vec_ln_flt_max);
|
| 271 |
+
vec_src = _mm512_max_ps(vec_src, vec_ln_flt_min);
|
| 272 |
+
|
| 273 |
+
// fx = floorf(x * log2ef + 0.5)
|
| 274 |
+
auto vec_fx = _mm512_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
|
| 275 |
+
auto vec_fx_i = _mm512_cvt_roundps_epi32(
|
| 276 |
+
vec_fx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
|
| 277 |
+
vec_fx = _mm512_cvtepi32_ps(vec_fx_i);
|
| 278 |
+
|
| 279 |
+
// x = x - fx * ln2
|
| 280 |
+
auto vec_exp_poly = _mm512_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
|
| 281 |
+
|
| 282 |
+
// compute polynomial
|
| 283 |
+
auto vec_res =
|
| 284 |
+
_mm512_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
|
| 285 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
|
| 286 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
|
| 287 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
|
| 288 |
+
vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_one);
|
| 289 |
+
|
| 290 |
+
// compute 2^(n-1)
|
| 291 |
+
auto vec_exp_number = _mm512_sub_ps(vec_fx, vec_one);
|
| 292 |
+
auto vec_exp_number_i = _mm512_cvtps_epi32(vec_exp_number);
|
| 293 |
+
auto vec_two_pow_n_i = _mm512_add_epi32(vec_exp_number_i, vec_127);
|
| 294 |
+
vec_two_pow_n_i = _mm512_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
|
| 295 |
+
auto vec_two_pow_n = _mm512_castsi512_ps(vec_two_pow_n_i);
|
| 296 |
+
vec_two_pow_n =
|
| 297 |
+
_mm512_mask_blend_ps(less_ln_flt_min_mask, vec_two_pow_n, vec_zero);
|
| 298 |
+
|
| 299 |
+
// y = y * 2^n
|
| 300 |
+
vec_res = _mm512_mul_ps(vec_res, vec_two_pow_n);
|
| 301 |
+
vec_res = _mm512_mul_ps(vec_res, vec_two);
|
| 302 |
+
return vec_res;
|
| 303 |
+
}
|
| 304 |
+
Vectorized<float> fmod(const Vectorized<float>& q) const {
|
| 305 |
+
return Vectorized<float>(Sleef_fmodf16(values, q));
|
| 306 |
+
}
|
| 307 |
+
Vectorized<float> log() const {
|
| 308 |
+
return Vectorized<float>(Sleef_logf16_u10(values));
|
| 309 |
+
}
|
| 310 |
+
Vectorized<float> log2() const {
|
| 311 |
+
return Vectorized<float>(Sleef_log2f16_u10(values));
|
| 312 |
+
}
|
| 313 |
+
Vectorized<float> log10() const {
|
| 314 |
+
return Vectorized<float>(Sleef_log10f16_u10(values));
|
| 315 |
+
}
|
| 316 |
+
Vectorized<float> log1p() const {
|
| 317 |
+
return Vectorized<float>(Sleef_log1pf16_u10(values));
|
| 318 |
+
}
|
| 319 |
+
Vectorized<float> frac() const;
|
| 320 |
+
Vectorized<float> sin() const {
|
| 321 |
+
return Vectorized<float>(Sleef_sinf16_u35(values));
|
| 322 |
+
}
|
| 323 |
+
Vectorized<float> sinh() const {
|
| 324 |
+
return Vectorized<float>(Sleef_sinhf16_u10(values));
|
| 325 |
+
}
|
| 326 |
+
Vectorized<float> cos() const {
|
| 327 |
+
return Vectorized<float>(Sleef_cosf16_u35(values));
|
| 328 |
+
}
|
| 329 |
+
Vectorized<float> cosh() const {
|
| 330 |
+
return Vectorized<float>(Sleef_coshf16_u10(values));
|
| 331 |
+
}
|
| 332 |
+
Vectorized<float> ceil() const {
|
| 333 |
+
return _mm512_ceil_ps(values);
|
| 334 |
+
}
|
| 335 |
+
Vectorized<float> floor() const {
|
| 336 |
+
return _mm512_floor_ps(values);
|
| 337 |
+
}
|
| 338 |
+
Vectorized<float> hypot(const Vectorized<float> &b) const {
|
| 339 |
+
return Vectorized<float>(Sleef_hypotf16_u05(values, b));
|
| 340 |
+
}
|
| 341 |
+
Vectorized<float> i0() const {
|
| 342 |
+
return map(calc_i0);
|
| 343 |
+
}
|
| 344 |
+
Vectorized<float> i0e() const {
|
| 345 |
+
return map(calc_i0e);
|
| 346 |
+
}
|
| 347 |
+
Vectorized<float> digamma() const {
|
| 348 |
+
return map(calc_digamma);
|
| 349 |
+
}
|
| 350 |
+
Vectorized<float> igamma(const Vectorized<float> &x) const {
|
| 351 |
+
__at_align__ float tmp[size()];
|
| 352 |
+
__at_align__ float tmp_x[size()];
|
| 353 |
+
store(tmp);
|
| 354 |
+
x.store(tmp_x);
|
| 355 |
+
for (const auto i : c10::irange(size())) {
|
| 356 |
+
tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
|
| 357 |
+
}
|
| 358 |
+
return loadu(tmp);
|
| 359 |
+
}
|
| 360 |
+
Vectorized<float> igammac(const Vectorized<float> &x) const {
|
| 361 |
+
__at_align__ float tmp[size()];
|
| 362 |
+
__at_align__ float tmp_x[size()];
|
| 363 |
+
store(tmp);
|
| 364 |
+
x.store(tmp_x);
|
| 365 |
+
for (const auto i : c10::irange(size())) {
|
| 366 |
+
tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
|
| 367 |
+
}
|
| 368 |
+
return loadu(tmp);
|
| 369 |
+
}
|
| 370 |
+
Vectorized<float> neg() const {
|
| 371 |
+
return _mm512_xor_ps(_mm512_set1_ps(-0.f), values);
|
| 372 |
+
}
|
| 373 |
+
Vectorized<float> nextafter(const Vectorized<float> &b) const {
|
| 374 |
+
return Vectorized<float>(Sleef_nextafterf16(values, b));
|
| 375 |
+
}
|
| 376 |
+
Vectorized<float> round() const {
|
| 377 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 378 |
+
}
|
| 379 |
+
Vectorized<float> tan() const {
|
| 380 |
+
return Vectorized<float>(Sleef_tanf16_u10(values));
|
| 381 |
+
}
|
| 382 |
+
Vectorized<float> tanh() const {
|
| 383 |
+
return Vectorized<float>(Sleef_tanhf16_u10(values));
|
| 384 |
+
}
|
| 385 |
+
Vectorized<float> trunc() const {
|
| 386 |
+
return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 387 |
+
}
|
| 388 |
+
Vectorized<float> lgamma() const {
|
| 389 |
+
return Vectorized<float>(Sleef_lgammaf16_u10(values));
|
| 390 |
+
}
|
| 391 |
+
Vectorized<float> sqrt() const {
|
| 392 |
+
return _mm512_sqrt_ps(values);
|
| 393 |
+
}
|
| 394 |
+
Vectorized<float> reciprocal() const {
|
| 395 |
+
return _mm512_div_ps(_mm512_set1_ps(1), values);
|
| 396 |
+
}
|
| 397 |
+
Vectorized<float> rsqrt() const {
|
| 398 |
+
return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values));
|
| 399 |
+
}
|
| 400 |
+
Vectorized<float> pow(const Vectorized<float> &b) const {
|
| 401 |
+
return Vectorized<float>(Sleef_powf16_u10(values, b));
|
| 402 |
+
}
|
| 403 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 404 |
+
// `O`: get false if an operand is NaN
|
| 405 |
+
// `Q`: do not raise if an operand is NaN
|
| 406 |
+
Vectorized<float> operator==(const Vectorized<float>& other) const {
|
| 407 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
|
| 408 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 409 |
+
0xFFFFFFFF));
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
Vectorized<float> operator!=(const Vectorized<float>& other) const {
|
| 413 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
|
| 414 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 415 |
+
0xFFFFFFFF));
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
Vectorized<float> operator<(const Vectorized<float>& other) const {
|
| 419 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ);
|
| 420 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 421 |
+
0xFFFFFFFF));
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
Vectorized<float> operator<=(const Vectorized<float>& other) const {
|
| 425 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ);
|
| 426 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 427 |
+
0xFFFFFFFF));
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
Vectorized<float> operator>(const Vectorized<float>& other) const {
|
| 431 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ);
|
| 432 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 433 |
+
0xFFFFFFFF));
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
Vectorized<float> operator>=(const Vectorized<float>& other) const {
|
| 437 |
+
auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ);
|
| 438 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
|
| 439 |
+
0xFFFFFFFF));
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
Vectorized<float> eq(const Vectorized<float>& other) const;
|
| 443 |
+
Vectorized<float> ne(const Vectorized<float>& other) const;
|
| 444 |
+
Vectorized<float> gt(const Vectorized<float>& other) const;
|
| 445 |
+
Vectorized<float> ge(const Vectorized<float>& other) const;
|
| 446 |
+
Vectorized<float> lt(const Vectorized<float>& other) const;
|
| 447 |
+
Vectorized<float> le(const Vectorized<float>& other) const;
|
| 448 |
+
};
|
| 449 |
+
|
| 450 |
+
template <>
|
| 451 |
+
Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 452 |
+
return _mm512_add_ps(a, b);
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
template <>
|
| 456 |
+
Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 457 |
+
return _mm512_sub_ps(a, b);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
template <>
|
| 461 |
+
Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 462 |
+
return _mm512_mul_ps(a, b);
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
template <>
|
| 466 |
+
Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 467 |
+
return _mm512_div_ps(a, b);
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
// frac. Implement this here so we can use subtraction
|
| 471 |
+
inline Vectorized<float> Vectorized<float>::frac() const {
|
| 472 |
+
return *this - this->trunc();
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 476 |
+
// either input is a NaN.
|
| 477 |
+
template <>
|
| 478 |
+
Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 479 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 480 |
+
auto max = _mm512_max_ps(a, b);
|
| 481 |
+
auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
|
| 482 |
+
auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
|
| 483 |
+
0xFFFFFFFF));
|
| 484 |
+
// Exploit the fact that all-ones is a NaN.
|
| 485 |
+
return _mm512_or_ps(max, isnan);
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 489 |
+
// either input is a NaN.
|
| 490 |
+
template <>
|
| 491 |
+
Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 492 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 493 |
+
auto min = _mm512_min_ps(a, b);
|
| 494 |
+
auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
|
| 495 |
+
auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
|
| 496 |
+
0xFFFFFFFF));
|
| 497 |
+
// Exploit the fact that all-ones is a NaN.
|
| 498 |
+
return _mm512_or_ps(min, isnan);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
template <>
|
| 502 |
+
Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
|
| 503 |
+
return _mm512_min_ps(max, _mm512_max_ps(min, a));
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
template <>
|
| 507 |
+
Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
|
| 508 |
+
return _mm512_min_ps(max, a);
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
template <>
|
| 512 |
+
Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
|
| 513 |
+
return _mm512_max_ps(min, a);
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
template <>
|
| 517 |
+
Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 518 |
+
return _mm512_and_ps(a, b);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
template <>
|
| 522 |
+
Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 523 |
+
return _mm512_or_ps(a, b);
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
template <>
|
| 527 |
+
Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 528 |
+
return _mm512_xor_ps(a, b);
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
|
| 532 |
+
return (*this == other) & Vectorized<float>(1.0f);
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
|
| 536 |
+
return (*this != other) & Vectorized<float>(1.0f);
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
|
| 540 |
+
return (*this > other) & Vectorized<float>(1.0f);
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
|
| 544 |
+
return (*this >= other) & Vectorized<float>(1.0f);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
|
| 548 |
+
return (*this < other) & Vectorized<float>(1.0f);
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
|
| 552 |
+
return (*this <= other) & Vectorized<float>(1.0f);
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
template <>
|
| 556 |
+
inline void convert(const float* src, float* dst, int64_t n) {
|
| 557 |
+
int64_t i;
|
| 558 |
+
#ifndef __msvc_cl__
|
| 559 |
+
#pragma unroll
|
| 560 |
+
#endif
|
| 561 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 562 |
+
_mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i));
|
| 563 |
+
}
|
| 564 |
+
#ifndef __msvc_cl__
|
| 565 |
+
#pragma unroll
|
| 566 |
+
#endif
|
| 567 |
+
for (; i < n; i++) {
|
| 568 |
+
dst[i] = src[i];
|
| 569 |
+
}
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
template <>
|
| 573 |
+
Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 574 |
+
return _mm512_fmadd_ps(a, b, c);
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
template <>
|
| 578 |
+
Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
|
| 579 |
+
return _mm512_fmsub_ps(a, b, c);
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
// TODO(jgong5): rewrite with ATEN vectorized (need to add unpack and shuffle)
|
| 583 |
+
// Used by Inductor CPP codegen
|
| 584 |
+
// Code referred to FBGEMM:
|
| 585 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L230-L304
|
| 586 |
+
// kernel for transposing mxn where m, n <= 16
|
| 587 |
+
// M + (M + 1) / 2 * 2 + (M + 3) / 4 * 4 + (M + 7) / 8 * 8 + 2 * N instructions
|
| 588 |
+
inline void transpose_mxn_16x16(const float* src, int64_t ld_src, float* dst, int64_t ld_dst, int M, int N) {
|
| 589 |
+
TORCH_CHECK(M <= 16 && N <= 16, "transpose_mxn<float> expects M, N <= 16.");
|
| 590 |
+
// load from src to registers
|
| 591 |
+
__m512 input[16];
|
| 592 |
+
int i;
|
| 593 |
+
if (N == 16) {
|
| 594 |
+
for (i = 0; i < M; ++i) {
|
| 595 |
+
input[i] = _mm512_loadu_ps(&src[i * ld_src]);
|
| 596 |
+
}
|
| 597 |
+
} else {
|
| 598 |
+
__mmask16 src_mask = (1 << N) - 1;
|
| 599 |
+
for (i = 0; i < M; ++i) {
|
| 600 |
+
input[i] = _mm512_maskz_loadu_ps(src_mask, &src[i * ld_src]);
|
| 601 |
+
}
|
| 602 |
+
}
|
| 603 |
+
for (; i < 16; ++i) {
|
| 604 |
+
// Not really needed but to avoid uninitialized variable warning.
|
| 605 |
+
// Shouldn't be much overhead because xor can be executed in parallel with
|
| 606 |
+
// other instructions.
|
| 607 |
+
input[i] = _mm512_setzero_ps();
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
// unpacking and interleaving 32-bit elements
|
| 611 |
+
__m512 temp[16];
|
| 612 |
+
for (i = 0; i < (M + 1) / 2; ++i) {
|
| 613 |
+
temp[2 * i] = _mm512_unpacklo_ps(input[2 * i], input[2 * i + 1]);
|
| 614 |
+
temp[2 * i + 1] = _mm512_unpackhi_ps(input[2 * i], input[2 * i + 1]);
|
| 615 |
+
}
|
| 616 |
+
for (i = i * 2; i < 16; ++i) {
|
| 617 |
+
temp[i] = _mm512_setzero_ps();
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
// unpacking and interleaving 64-bit elements
|
| 621 |
+
for (i = 0; i < (M + 3) / 4; ++i) {
|
| 622 |
+
input[4 * i] = _mm512_castpd_ps(_mm512_unpacklo_pd(
|
| 623 |
+
_mm512_castps_pd(temp[4 * i]), _mm512_castps_pd(temp[4 * i + 2])));
|
| 624 |
+
input[4 * i + 1] = _mm512_castpd_ps(_mm512_unpackhi_pd(
|
| 625 |
+
_mm512_castps_pd(temp[4 * i]), _mm512_castps_pd(temp[4 * i + 2])));
|
| 626 |
+
input[4 * i + 2] = _mm512_castpd_ps(_mm512_unpacklo_pd(
|
| 627 |
+
_mm512_castps_pd(temp[4 * i + 1]), _mm512_castps_pd(temp[4 * i + 3])));
|
| 628 |
+
input[4 * i + 3] = _mm512_castpd_ps(_mm512_unpackhi_pd(
|
| 629 |
+
_mm512_castps_pd(temp[4 * i + 1]), _mm512_castps_pd(temp[4 * i + 3])));
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
// shuffle 128-bits (composed of 4 32-bit elements)
|
| 633 |
+
for (i = 0; i < (M + 7) / 8; ++i) {
|
| 634 |
+
temp[8 * i] = _mm512_shuffle_f32x4(input[8 * i], input[8 * i + 4], 0x88);
|
| 635 |
+
temp[8 * i + 1] =
|
| 636 |
+
_mm512_shuffle_f32x4(input[8 * i + 1], input[8 * i + 5], 0x88);
|
| 637 |
+
temp[8 * i + 2] =
|
| 638 |
+
_mm512_shuffle_f32x4(input[8 * i + 2], input[8 * i + 6], 0x88);
|
| 639 |
+
temp[8 * i + 3] =
|
| 640 |
+
_mm512_shuffle_f32x4(input[8 * i + 3], input[8 * i + 7], 0x88);
|
| 641 |
+
temp[8 * i + 4] =
|
| 642 |
+
_mm512_shuffle_f32x4(input[8 * i], input[8 * i + 4], 0xdd);
|
| 643 |
+
temp[8 * i + 5] =
|
| 644 |
+
_mm512_shuffle_f32x4(input[8 * i + 1], input[8 * i + 5], 0xdd);
|
| 645 |
+
temp[8 * i + 6] =
|
| 646 |
+
_mm512_shuffle_f32x4(input[8 * i + 2], input[8 * i + 6], 0xdd);
|
| 647 |
+
temp[8 * i + 7] =
|
| 648 |
+
_mm512_shuffle_f32x4(input[8 * i + 3], input[8 * i + 7], 0xdd);
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
for (i = 0; i < N; ++i) {
|
| 652 |
+
if (i < 8) {
|
| 653 |
+
input[i] = _mm512_shuffle_f32x4(temp[i], temp[8 + i], 0x88);
|
| 654 |
+
} else {
|
| 655 |
+
input[i] = _mm512_shuffle_f32x4(temp[i - 8], temp[i], 0xdd);
|
| 656 |
+
}
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
// store from registers to dst
|
| 660 |
+
if (M == 16) {
|
| 661 |
+
for (i = 0; i < N; ++i) {
|
| 662 |
+
_mm512_storeu_ps(&dst[i * ld_dst], input[i]);
|
| 663 |
+
}
|
| 664 |
+
} else {
|
| 665 |
+
__mmask16 dst_mask = (1 << M) - 1;
|
| 666 |
+
for (i = 0; i < N; ++i) {
|
| 667 |
+
_mm512_mask_storeu_ps(&dst[i * ld_dst], dst_mask, input[i]);
|
| 668 |
+
}
|
| 669 |
+
}
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
template<>
|
| 673 |
+
inline void transpose_mxn<float>(const float* src, int64_t ld_src, float* dst, int64_t ld_dst, int M, int N) {
|
| 674 |
+
int64_t i = 0;
|
| 675 |
+
for (; i < M / 16 * 16; i += 16) {
|
| 676 |
+
int64_t j = 0;
|
| 677 |
+
for (; j < N / 16 * 16; j += 16) {
|
| 678 |
+
transpose_mxn_16x16(
|
| 679 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, 16, 16);
|
| 680 |
+
}
|
| 681 |
+
// handle remainder j
|
| 682 |
+
int nrem = N - j;
|
| 683 |
+
if (nrem > 0) {
|
| 684 |
+
transpose_mxn_16x16(
|
| 685 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, 16, nrem);
|
| 686 |
+
}
|
| 687 |
+
}
|
| 688 |
+
// handle remainder i
|
| 689 |
+
int mrem = M - i;
|
| 690 |
+
if (mrem > 0) {
|
| 691 |
+
int j = 0;
|
| 692 |
+
for (; j < N / 16 * 16; j += 16) {
|
| 693 |
+
transpose_mxn_16x16(
|
| 694 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, mrem, 16);
|
| 695 |
+
}
|
| 696 |
+
// handle remainder j
|
| 697 |
+
int nrem = N - j;
|
| 698 |
+
transpose_mxn_16x16(
|
| 699 |
+
src + i * ld_src + j, ld_src, dst + j * ld_dst + i, ld_dst, mrem, nrem);
|
| 700 |
+
}
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
template <typename T, int M, int N,
|
| 704 |
+
typename std::enable_if_t<std::is_same_v<T, float>, int> = 0>
|
| 705 |
+
inline void transpose_mxn(const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
|
| 706 |
+
transpose_mxn<float>(src, ld_src, dst, ld_dst, M, N);
|
| 707 |
+
}
|
| 708 |
+
|
| 709 |
+
#endif
|
| 710 |
+
|
| 711 |
+
}}}
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_n.h>
|
| 5 |
+
namespace at::vec {
|
| 6 |
+
inline namespace CPU_CAPABILITY {
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* The `VecMask` class provides a convenient interface for working with
|
| 10 |
+
* vectorized masks in SIMD operations. It encapsulates a `Vectorized<T, N>`
|
| 11 |
+
* mask that can be directly usable in masked vectorized operations. It provides
|
| 12 |
+
* various methods for manipulating and accessing the mask elements:
|
| 13 |
+
* 1. `from` and `to`: Conversion between a vector of boolean values and a
|
| 14 |
+
* vectorized mask.
|
| 15 |
+
* 2. `cast`: Casts the mask to a different base type.
|
| 16 |
+
* 3. `all_zero`: Checks if all mask elements are zero.
|
| 17 |
+
* 4. `is_masked`: Checks if a specific element is masked.
|
| 18 |
+
* 5. `loadu`: Loads data from memory using the mask.
|
| 19 |
+
* 6. `all_masked`: Checks if all mask elements are masked.
|
| 20 |
+
*
|
| 21 |
+
* Some helper template classes are provided to simplify the specialization of
|
| 22 |
+
* the `VecMask` for the specific CPU arch:
|
| 23 |
+
* 1. `VecMaskLoad`: Loads data from memory using the mask.
|
| 24 |
+
* 2. `VecMaskTo`: Converts the mask to boolean.
|
| 25 |
+
* 3. `VecMaskCast`: Casts the mask to a different base type.
|
| 26 |
+
*
|
| 27 |
+
*/
|
| 28 |
+
template <typename T, int N>
|
| 29 |
+
class VecMask;
|
| 30 |
+
|
| 31 |
+
template <
|
| 32 |
+
typename data_t,
|
| 33 |
+
int data_n,
|
| 34 |
+
typename mask_t,
|
| 35 |
+
int mask_n,
|
| 36 |
+
typename Enabled = void>
|
| 37 |
+
struct VecMaskLoad {
|
| 38 |
+
static inline VectorizedN<data_t, data_n> apply(
|
| 39 |
+
const data_t* ptr,
|
| 40 |
+
const VecMask<mask_t, mask_n>& vec_mask) {
|
| 41 |
+
constexpr typename VecMask<mask_t, mask_n>::size_type size =
|
| 42 |
+
VecMask<mask_t, mask_n>::size();
|
| 43 |
+
static_assert(VectorizedN<data_t, data_n>::size() >= size);
|
| 44 |
+
__at_align__ data_t data[size];
|
| 45 |
+
__at_align__ mask_t mask[size];
|
| 46 |
+
auto mask_ = VectorizedN<mask_t, mask_n>(vec_mask);
|
| 47 |
+
mask_.store(mask);
|
| 48 |
+
for (int i = 0; i < size; i++) {
|
| 49 |
+
data[i] = mask[i] ? ptr[i] : static_cast<data_t>(0);
|
| 50 |
+
}
|
| 51 |
+
return VectorizedN<data_t, data_n>::loadu(data, size);
|
| 52 |
+
}
|
| 53 |
+
};
|
| 54 |
+
|
| 55 |
+
template <
|
| 56 |
+
typename dst_t,
|
| 57 |
+
int dst_n,
|
| 58 |
+
typename src_t,
|
| 59 |
+
int src_n,
|
| 60 |
+
typename Enabled = void>
|
| 61 |
+
struct VecMaskTo {
|
| 62 |
+
static inline VecMask<dst_t, dst_n> apply(
|
| 63 |
+
const VecMask<src_t, src_n>& vec_mask) {
|
| 64 |
+
auto zeros = VectorizedN<dst_t, dst_n>(static_cast<dst_t>(0));
|
| 65 |
+
auto ones = VectorizedN<dst_t, dst_n>(static_cast<dst_t>(1));
|
| 66 |
+
return VectorizedN<dst_t, dst_n>::blendv(
|
| 67 |
+
zeros, ones, vec_mask.template cast<dst_t, dst_n>());
|
| 68 |
+
}
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
template <typename dst_t, int dst_n, typename src_t, int src_n, typename Enabled = void>
|
| 72 |
+
struct VecMaskCast {
|
| 73 |
+
static inline VecMask<dst_t, dst_n> apply(
|
| 74 |
+
const VecMask<src_t, src_n>& vec_mask) {
|
| 75 |
+
return VecMask<dst_t, dst_n>::from(VectorizedN<src_t, src_n>(vec_mask));
|
| 76 |
+
}
|
| 77 |
+
};
|
| 78 |
+
|
| 79 |
+
template <typename T, int N>
|
| 80 |
+
struct VecMaskCast<T, N, T, N> {
|
| 81 |
+
static inline VecMask<T, N> apply(const VecMask<T, N>& vec_mask) {
|
| 82 |
+
return vec_mask;
|
| 83 |
+
}
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
template <typename T, int N>
|
| 87 |
+
struct VecMaskCheck {
|
| 88 |
+
static inline bool all_zero(const VectorizedN<T, N>& vec_mask) {
|
| 89 |
+
__at_align__ T mask[VectorizedN<T, N>::size()];
|
| 90 |
+
vec_mask.store(mask);
|
| 91 |
+
return std::all_of(
|
| 92 |
+
mask, mask + VectorizedN<T, N>::size(), [](T m) { return m == static_cast<T>(0); });
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
static inline bool all_masked(const VectorizedN<T, N>& vec_mask) {
|
| 96 |
+
__at_align__ T mask[VectorizedN<T, N>::size()];
|
| 97 |
+
vec_mask.store(mask);
|
| 98 |
+
return std::all_of(
|
| 99 |
+
mask, mask + VectorizedN<T, N>::size(), [](T m) { return m != static_cast<T>(0); });
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
static inline bool is_masked(const VectorizedN<T, N>& vec_mask, int i) {
|
| 103 |
+
__at_align__ T mask[VectorizedN<T, N>::size()];
|
| 104 |
+
vec_mask.store(mask);
|
| 105 |
+
return mask[i] != static_cast<T>(0);
|
| 106 |
+
}
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
template <typename T, int N>
|
| 110 |
+
class VecMask {
|
| 111 |
+
public:
|
| 112 |
+
using size_type = int;
|
| 113 |
+
static constexpr size_type size() {
|
| 114 |
+
return VectorizedN<T, N>::size();
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
private:
|
| 118 |
+
VectorizedN<T, N> mask_;
|
| 119 |
+
|
| 120 |
+
public:
|
| 121 |
+
VecMask() : mask_(static_cast<T>(0)) {}
|
| 122 |
+
VecMask(const VectorizedN<T, N>& mask) : mask_(mask) {}
|
| 123 |
+
|
| 124 |
+
template <int L = N, typename std::enable_if_t<L == 1, int> = 0>
|
| 125 |
+
VecMask(const Vectorized<T>& mask) : mask_(mask) {}
|
| 126 |
+
|
| 127 |
+
template <typename U, int L>
|
| 128 |
+
static VecMask<T, N> from(const VectorizedN<U, L>& b_vec) {
|
| 129 |
+
__at_align__ U b_buf[size()];
|
| 130 |
+
if constexpr (size() >= VectorizedN<U, L>::size()) {
|
| 131 |
+
b_vec.store(b_buf);
|
| 132 |
+
for (int i = VectorizedN<U, L>::size(); i < size(); i++) {
|
| 133 |
+
b_buf[i] = static_cast<U>(0);
|
| 134 |
+
}
|
| 135 |
+
} else {
|
| 136 |
+
b_vec.store(b_buf, size());
|
| 137 |
+
}
|
| 138 |
+
return from(b_buf);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
template <typename U>
|
| 142 |
+
static VecMask<T, N> from(U b) {
|
| 143 |
+
using int_t = int_same_size_t<T>;
|
| 144 |
+
T mask = b ? c10::bit_cast<T>((int_t)(~(int_t)0)) : (T)0;
|
| 145 |
+
return VectorizedN<T, N>(mask);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
template <typename U>
|
| 149 |
+
static VecMask<T, N> from(U* b) {
|
| 150 |
+
using int_t = int_same_size_t<T>;
|
| 151 |
+
__at_align__ T mask[size()];
|
| 152 |
+
#ifndef __msvc_cl__
|
| 153 |
+
#pragma unroll
|
| 154 |
+
#endif
|
| 155 |
+
for (int i = 0; i < size(); i++) {
|
| 156 |
+
*(int_t*)(mask + i) = b[i] ? ~(int_t)0 : (int_t)0;
|
| 157 |
+
}
|
| 158 |
+
return VectorizedN<T, N>(VectorizedN<T, N>::loadu(mask));
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
static VecMask<T, N> blendv(
|
| 162 |
+
const VecMask<T, N>& c,
|
| 163 |
+
const VecMask<T, N>& b,
|
| 164 |
+
const VecMask<T, N>& a) {
|
| 165 |
+
VectorizedN<T, N> result = VectorizedN<T, N>::blendv(
|
| 166 |
+
VectorizedN<T, N>(c),
|
| 167 |
+
VectorizedN<T, N>(b),
|
| 168 |
+
VectorizedN<T, N>(a));
|
| 169 |
+
return result;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
static VecMask<T, N> set(
|
| 173 |
+
const VecMask<T, N>& a,
|
| 174 |
+
const VecMask<T, N>& b,
|
| 175 |
+
int64_t count = size()) {
|
| 176 |
+
VectorizedN<T, N> result = VectorizedN<T, N>::set(
|
| 177 |
+
VectorizedN<T, N>(a),
|
| 178 |
+
VectorizedN<T, N>(b),
|
| 179 |
+
count);
|
| 180 |
+
return result;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
void store(bool* b, int count = size()) {
|
| 184 |
+
constexpr int L = (VectorizedN<T, N>::size() + Vectorized<bool>::size() - 1)/ Vectorized<bool>::size();
|
| 185 |
+
auto res = this->to<bool, L>();
|
| 186 |
+
res.store(b, count);
|
| 187 |
+
return;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
template <typename U, int L, std::enable_if_t<L >= 2, int> = 0>
|
| 191 |
+
inline VectorizedN<U, L> to() const {
|
| 192 |
+
return VecMaskTo<U, L, T, N>::apply(*this);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
template <typename U, int L, std::enable_if_t<L == 1, int> = 0>
|
| 196 |
+
inline Vectorized<U> to() const {
|
| 197 |
+
return VecMaskTo<U, L, T, N>::apply(*this);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <typename U, int L>
|
| 201 |
+
inline VecMask<U, L> cast() const {
|
| 202 |
+
return VecMaskCast<U, L, T, N>::apply(*this);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
inline bool all_zero() const {
|
| 206 |
+
return VecMaskCheck<T, N>::all_zero(mask_);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
inline bool all_masked() const {
|
| 210 |
+
return VecMaskCheck<T, N>::all_masked(mask_);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
inline bool is_masked(int i) const {
|
| 214 |
+
return VecMaskCheck<T, N>::is_masked(mask_, i);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
inline operator VectorizedN<T, N>() const {
|
| 218 |
+
return mask_;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
template <int L = N, typename std::enable_if_t<L == 1, int> = 0>
|
| 222 |
+
inline operator Vectorized<T>() const {
|
| 223 |
+
return mask_[0];
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
inline Vectorized<T> operator[](int i) const {
|
| 227 |
+
return mask_[i];
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
template <
|
| 231 |
+
typename U,
|
| 232 |
+
int L,
|
| 233 |
+
std::enable_if_t<L >= 2 && VectorizedN<U, L>::size() >= size(), int> = 0>
|
| 234 |
+
VectorizedN<U, L> loadu(const U* ptr) const {
|
| 235 |
+
return VecMaskLoad<U, L, T, N>::apply(ptr, *this);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
template <
|
| 239 |
+
typename U,
|
| 240 |
+
int L,
|
| 241 |
+
std::enable_if_t<L == 1 && Vectorized<U>::size() >= size(), int> = 0>
|
| 242 |
+
Vectorized<U> loadu(const U* ptr) const {
|
| 243 |
+
return VecMaskLoad<U, L, T, N>::apply(ptr, *this);
|
| 244 |
+
}
|
| 245 |
+
};
|
| 246 |
+
|
| 247 |
+
#define VEC_MASK_DEFINE_UNARY_OP_GLOBAL(op) \
|
| 248 |
+
template <typename T, int N> \
|
| 249 |
+
inline VecMask<T, N> op(const VecMask<T, N>& a) { \
|
| 250 |
+
return op(VectorizedN<T, N>(a)); \
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
#define VEC_MASK_DEFINE_BINARY_OP_GLOBAL(op) \
|
| 254 |
+
template < \
|
| 255 |
+
typename T, \
|
| 256 |
+
int N, \
|
| 257 |
+
typename V, \
|
| 258 |
+
int M, \
|
| 259 |
+
std::enable_if_t<VecMask<T, N>::size() == VecMask<V, M>::size(), int> = \
|
| 260 |
+
0> \
|
| 261 |
+
inline VecMask<T, N> op(const VecMask<T, N>& a, const VecMask<V, M>& b) { \
|
| 262 |
+
return op( \
|
| 263 |
+
VectorizedN<T, N>(a), VectorizedN<T, N>(b.template cast<T, N>())); \
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
#define VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(op, EXPR) \
|
| 267 |
+
template < \
|
| 268 |
+
typename T, \
|
| 269 |
+
int N, \
|
| 270 |
+
typename V, \
|
| 271 |
+
int M, \
|
| 272 |
+
std::enable_if_t<VecMask<T, N>::size() == VecMask<V, M>::size(), int> = \
|
| 273 |
+
0> \
|
| 274 |
+
inline VecMask<T, N> op(const VecMask<T, N>& a, const VecMask<V, M>& b) { \
|
| 275 |
+
return EXPR; \
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
VEC_MASK_DEFINE_UNARY_OP_GLOBAL(operator~)
|
| 279 |
+
VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator&)
|
| 280 |
+
VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator|)
|
| 281 |
+
VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator^)
|
| 282 |
+
VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator*)
|
| 283 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>, a & ~b)
|
| 284 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<, ~a& b)
|
| 285 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator==, ~(a ^ b))
|
| 286 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>=, (a == b) | (a > b))
|
| 287 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<=, (a == b) | (a < b))
|
| 288 |
+
VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator!=, (a ^ b))
|
| 289 |
+
|
| 290 |
+
#undef VEC_MASK_DEFINE_UNARY_OP_GLOBAL
|
| 291 |
+
#undef VEC_MASK_DEFINE_BINARY_OP_GLOBAL
|
| 292 |
+
#undef VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL
|
| 293 |
+
|
| 294 |
+
} // namespace CPU_CAPABILITY
|
| 295 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 4 |
+
#include <array>
|
| 5 |
+
|
| 6 |
+
namespace at::vec {
|
| 7 |
+
inline namespace CPU_CAPABILITY {
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* @brief A class template representing a vectorized type with
|
| 11 |
+
* `N * Vectorized<T>::size()` elements, aiming to support vectors of
|
| 12 |
+
* arbitrary size. A specific use case of it is to represent vectors
|
| 13 |
+
* converted from data types with different sizes but with the same
|
| 14 |
+
* number of vector elements, e.g., `VectorizedN<float, 2>` can be
|
| 15 |
+
* a vector converted from two `Vectorized<bfloat16>`, `VectorizedN<int64_t, 2>`
|
| 16 |
+
* can be a vector converted from two `Vectorized<int32_t>` etc.
|
| 17 |
+
*
|
| 18 |
+
* It supports most of the operations of `Vectorized<T>`
|
| 19 |
+
* and the implementation delegates to `Vectorized<T>` with loops over `N`.
|
| 20 |
+
*
|
| 21 |
+
* @tparam T The underlying type of the vectorized elements.
|
| 22 |
+
* @tparam N The number of underlying `Vectorized<T>`.
|
| 23 |
+
*/
|
| 24 |
+
template <typename T, int N>
|
| 25 |
+
class VectorizedN {
|
| 26 |
+
public:
|
| 27 |
+
using value_type = T;
|
| 28 |
+
using size_type = int;
|
| 29 |
+
|
| 30 |
+
static constexpr size_type size_T = sizeof(T);
|
| 31 |
+
static constexpr size_type size() {
|
| 32 |
+
return Vectorized<T>::size() * N;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
private:
|
| 36 |
+
std::array<Vectorized<T>, N> values;
|
| 37 |
+
|
| 38 |
+
public:
|
| 39 |
+
// methods not implemented yet:
|
| 40 |
+
// variadic constructor, operator T*, as_bytes, zero_mask
|
| 41 |
+
|
| 42 |
+
#define VECTORIZEDN_DEFINE_UNARY_OP(op) \
|
| 43 |
+
VectorizedN<T, N> op() const { \
|
| 44 |
+
return unary_op([](const Vectorized<T>& a) { return a.op(); }); \
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
#define VECTORIZEDN_DEFINE_BINARY_OP(op) \
|
| 48 |
+
VectorizedN<T, N> op(const VectorizedN<T, N>& other) const { \
|
| 49 |
+
return binary_op( \
|
| 50 |
+
other, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
|
| 51 |
+
return a.op(b); \
|
| 52 |
+
}); \
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
template <typename Op>
|
| 56 |
+
inline VectorizedN<T, N> unary_op(Op op) const {
|
| 57 |
+
VectorizedN<T, N> result;
|
| 58 |
+
#ifndef _MSC_VER
|
| 59 |
+
#pragma unroll
|
| 60 |
+
#endif
|
| 61 |
+
for (int i = 0; i < N; ++i) {
|
| 62 |
+
result.values[i] = op(values[i]);
|
| 63 |
+
}
|
| 64 |
+
return result;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
template <typename Op>
|
| 68 |
+
inline VectorizedN<T, N> binary_op(const VectorizedN<T, N>& other, Op op)
|
| 69 |
+
const {
|
| 70 |
+
VectorizedN<T, N> result;
|
| 71 |
+
#ifndef _MSC_VER
|
| 72 |
+
#pragma unroll
|
| 73 |
+
#endif
|
| 74 |
+
for (int i = 0; i < N; ++i) {
|
| 75 |
+
result.values[i] = op(values[i], other.values[i]);
|
| 76 |
+
}
|
| 77 |
+
return result;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
template <typename Op>
|
| 81 |
+
inline VectorizedN<T, N> ternary_op(
|
| 82 |
+
const VectorizedN<T, N>& other,
|
| 83 |
+
const VectorizedN<T, N>& other2,
|
| 84 |
+
Op op) const {
|
| 85 |
+
VectorizedN<T, N> result;
|
| 86 |
+
#ifndef _MSC_VER
|
| 87 |
+
#pragma unroll
|
| 88 |
+
#endif
|
| 89 |
+
for (int i = 0; i < N; ++i) {
|
| 90 |
+
result.values[i] = op(values[i], other.values[i], other2.values[i]);
|
| 91 |
+
}
|
| 92 |
+
return result;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
VectorizedN() = default;
|
| 96 |
+
|
| 97 |
+
explicit VectorizedN(T val) {
|
| 98 |
+
for (int i = 0; i < N; ++i) {
|
| 99 |
+
values[i] = Vectorized<T>(val);
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
template <int L = N, typename std::enable_if_t<L == 1, int> = 0>
|
| 104 |
+
VectorizedN(const Vectorized<T>& val) : values({val}) {}
|
| 105 |
+
|
| 106 |
+
template <int L = N, typename std::enable_if_t<L == 2, int> = 0>
|
| 107 |
+
VectorizedN(const Vectorized<T>& val_0, const Vectorized<T>& val_1)
|
| 108 |
+
: values({val_0, val_1}) {}
|
| 109 |
+
|
| 110 |
+
template <int L = N, typename std::enable_if_t<L == 1, int> = 0>
|
| 111 |
+
inline operator Vectorized<T>() const {
|
| 112 |
+
return values[0];
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
inline const Vectorized<T>& operator[](int i) const {
|
| 116 |
+
return values[i];
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
inline Vectorized<T>& operator[](int i) {
|
| 120 |
+
return values[i];
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <int64_t mask>
|
| 124 |
+
static VectorizedN<T, N> blend(
|
| 125 |
+
const VectorizedN<T, N>& a,
|
| 126 |
+
const VectorizedN<T, N>& b) {
|
| 127 |
+
VectorizedN<T, N> result;
|
| 128 |
+
for (int i = 0; i < N; ++i) {
|
| 129 |
+
result.values[i] =
|
| 130 |
+
Vectorized<T>::template blend<mask>(a.values[i], b.values[i]);
|
| 131 |
+
}
|
| 132 |
+
return result;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
static VectorizedN<T, N> blendv(
|
| 136 |
+
const VectorizedN<T, N>& a,
|
| 137 |
+
const VectorizedN<T, N>& b,
|
| 138 |
+
const VectorizedN<T, N>& mask) {
|
| 139 |
+
VectorizedN<T, N> result;
|
| 140 |
+
for (int i = 0; i < N; ++i) {
|
| 141 |
+
result.values[i] =
|
| 142 |
+
Vectorized<T>::blendv(a.values[i], b.values[i], mask.values[i]);
|
| 143 |
+
}
|
| 144 |
+
return result;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
template <typename step_t>
|
| 148 |
+
static VectorizedN<T, N> arange(
|
| 149 |
+
T base = static_cast<T>(0),
|
| 150 |
+
step_t step = static_cast<step_t>(1)) {
|
| 151 |
+
VectorizedN<T, N> result;
|
| 152 |
+
for (int i = 0; i < N; ++i) {
|
| 153 |
+
result.values[i] = Vectorized<T>::arange(base, step);
|
| 154 |
+
base += step * Vectorized<T>::size();
|
| 155 |
+
}
|
| 156 |
+
return result;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
static VectorizedN<T, N> set(
|
| 160 |
+
const VectorizedN<T, N>& a,
|
| 161 |
+
const VectorizedN<T, N>& b,
|
| 162 |
+
int64_t count = size()) {
|
| 163 |
+
VectorizedN<T, N> result;
|
| 164 |
+
for (int i = 0; i < N; ++i) {
|
| 165 |
+
if (count > 0) {
|
| 166 |
+
result.values[i] = Vectorized<T>::set(
|
| 167 |
+
a.values[i],
|
| 168 |
+
b.values[i],
|
| 169 |
+
std::min(count, (int64_t)Vectorized<T>::size()));
|
| 170 |
+
count -= Vectorized<T>::size();
|
| 171 |
+
} else {
|
| 172 |
+
result.values[i] = a.values[i];
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
return result;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
static VectorizedN<T, N> loadu(const void* ptr) {
|
| 179 |
+
VectorizedN<T, N> result;
|
| 180 |
+
for (int i = 0; i < N; ++i) {
|
| 181 |
+
result.values[i] = Vectorized<T>::loadu(ptr);
|
| 182 |
+
ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
|
| 183 |
+
}
|
| 184 |
+
return result;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
static VectorizedN<T, N> loadu(const void* ptr, int64_t count) {
|
| 188 |
+
VectorizedN<T, N> result;
|
| 189 |
+
for (int i = 0; i < N; ++i) {
|
| 190 |
+
result.values[i] = Vectorized<T>::loadu(
|
| 191 |
+
ptr, std::min(count, (int64_t)Vectorized<T>::size()));
|
| 192 |
+
ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
|
| 193 |
+
count -= Vectorized<T>::size();
|
| 194 |
+
if (count <= 0) {
|
| 195 |
+
break;
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
return result;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
void store(void* ptr) const {
|
| 202 |
+
for (int i = 0; i < N; ++i) {
|
| 203 |
+
values[i].store(ptr);
|
| 204 |
+
ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
|
| 205 |
+
}
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
void store(void* ptr, int count) const {
|
| 209 |
+
for (int i = 0; i < N; ++i) {
|
| 210 |
+
values[i].store(ptr, std::min(count, (int)Vectorized<T>::size()));
|
| 211 |
+
ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
|
| 212 |
+
count -= Vectorized<T>::size();
|
| 213 |
+
if (count <= 0) {
|
| 214 |
+
break;
|
| 215 |
+
}
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
bool has_inf_nan() const {
|
| 220 |
+
for (int i = 0; i < N; ++i) {
|
| 221 |
+
if (values[i].has_inf_nan()) {
|
| 222 |
+
return true;
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
return false;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
VectorizedN<T, N> map(T (*const f)(T)) const {
|
| 229 |
+
VectorizedN<T, N> result;
|
| 230 |
+
for (int i = 0; i < N; ++i) {
|
| 231 |
+
result.values[i] = values[i].map(f);
|
| 232 |
+
}
|
| 233 |
+
return result;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
VectorizedN<T, N> map(T (*const f)(const T&)) const {
|
| 237 |
+
VectorizedN<T, N> result;
|
| 238 |
+
for (int i = 0; i < N; ++i) {
|
| 239 |
+
result.values[i] = values[i].map(f);
|
| 240 |
+
}
|
| 241 |
+
return result;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
VECTORIZEDN_DEFINE_UNARY_OP(isnan)
|
| 245 |
+
VECTORIZEDN_DEFINE_UNARY_OP(abs)
|
| 246 |
+
VECTORIZEDN_DEFINE_UNARY_OP(sgn)
|
| 247 |
+
VECTORIZEDN_DEFINE_UNARY_OP(angle)
|
| 248 |
+
VECTORIZEDN_DEFINE_UNARY_OP(real)
|
| 249 |
+
VECTORIZEDN_DEFINE_UNARY_OP(imag)
|
| 250 |
+
VECTORIZEDN_DEFINE_UNARY_OP(conj)
|
| 251 |
+
VECTORIZEDN_DEFINE_UNARY_OP(acos)
|
| 252 |
+
VECTORIZEDN_DEFINE_UNARY_OP(acosh)
|
| 253 |
+
VECTORIZEDN_DEFINE_UNARY_OP(asin)
|
| 254 |
+
VECTORIZEDN_DEFINE_UNARY_OP(atan)
|
| 255 |
+
VECTORIZEDN_DEFINE_UNARY_OP(atanh)
|
| 256 |
+
VECTORIZEDN_DEFINE_BINARY_OP(atan2)
|
| 257 |
+
VECTORIZEDN_DEFINE_BINARY_OP(copysign)
|
| 258 |
+
VECTORIZEDN_DEFINE_UNARY_OP(erf)
|
| 259 |
+
VECTORIZEDN_DEFINE_UNARY_OP(erfc)
|
| 260 |
+
VECTORIZEDN_DEFINE_UNARY_OP(erfinv)
|
| 261 |
+
VECTORIZEDN_DEFINE_UNARY_OP(exp)
|
| 262 |
+
VECTORIZEDN_DEFINE_UNARY_OP(exp2)
|
| 263 |
+
VECTORIZEDN_DEFINE_UNARY_OP(expm1)
|
| 264 |
+
VECTORIZEDN_DEFINE_UNARY_OP(exp_u20)
|
| 265 |
+
VECTORIZEDN_DEFINE_UNARY_OP(frac)
|
| 266 |
+
VECTORIZEDN_DEFINE_BINARY_OP(fmod)
|
| 267 |
+
VECTORIZEDN_DEFINE_UNARY_OP(log)
|
| 268 |
+
VECTORIZEDN_DEFINE_UNARY_OP(log10)
|
| 269 |
+
VECTORIZEDN_DEFINE_UNARY_OP(log1p)
|
| 270 |
+
VECTORIZEDN_DEFINE_UNARY_OP(log2)
|
| 271 |
+
VECTORIZEDN_DEFINE_UNARY_OP(ceil)
|
| 272 |
+
VECTORIZEDN_DEFINE_UNARY_OP(cos)
|
| 273 |
+
VECTORIZEDN_DEFINE_UNARY_OP(cosh)
|
| 274 |
+
VECTORIZEDN_DEFINE_UNARY_OP(floor)
|
| 275 |
+
VECTORIZEDN_DEFINE_BINARY_OP(hypot)
|
| 276 |
+
VECTORIZEDN_DEFINE_UNARY_OP(i0)
|
| 277 |
+
VECTORIZEDN_DEFINE_UNARY_OP(i0e)
|
| 278 |
+
VECTORIZEDN_DEFINE_UNARY_OP(digamma)
|
| 279 |
+
VECTORIZEDN_DEFINE_BINARY_OP(igamma)
|
| 280 |
+
VECTORIZEDN_DEFINE_BINARY_OP(igammac)
|
| 281 |
+
VECTORIZEDN_DEFINE_UNARY_OP(neg)
|
| 282 |
+
VECTORIZEDN_DEFINE_BINARY_OP(nextafter)
|
| 283 |
+
VECTORIZEDN_DEFINE_UNARY_OP(round)
|
| 284 |
+
VECTORIZEDN_DEFINE_UNARY_OP(sin)
|
| 285 |
+
VECTORIZEDN_DEFINE_UNARY_OP(sinh)
|
| 286 |
+
VECTORIZEDN_DEFINE_UNARY_OP(tan)
|
| 287 |
+
VECTORIZEDN_DEFINE_UNARY_OP(tanh)
|
| 288 |
+
VECTORIZEDN_DEFINE_UNARY_OP(trunc)
|
| 289 |
+
VECTORIZEDN_DEFINE_UNARY_OP(lgamma)
|
| 290 |
+
VECTORIZEDN_DEFINE_UNARY_OP(sqrt)
|
| 291 |
+
VECTORIZEDN_DEFINE_UNARY_OP(reciprocal)
|
| 292 |
+
VECTORIZEDN_DEFINE_UNARY_OP(rsqrt)
|
| 293 |
+
VECTORIZEDN_DEFINE_BINARY_OP(pow)
|
| 294 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator==)
|
| 295 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator!=)
|
| 296 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator>=)
|
| 297 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator<=)
|
| 298 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator>)
|
| 299 |
+
VECTORIZEDN_DEFINE_BINARY_OP(operator<)
|
| 300 |
+
VECTORIZEDN_DEFINE_BINARY_OP(eq)
|
| 301 |
+
VECTORIZEDN_DEFINE_BINARY_OP(ne)
|
| 302 |
+
VECTORIZEDN_DEFINE_BINARY_OP(gt)
|
| 303 |
+
VECTORIZEDN_DEFINE_BINARY_OP(ge)
|
| 304 |
+
VECTORIZEDN_DEFINE_BINARY_OP(lt)
|
| 305 |
+
VECTORIZEDN_DEFINE_BINARY_OP(le)
|
| 306 |
+
|
| 307 |
+
#undef VECTORIZEDN_DEFINE_UNARY_OP
|
| 308 |
+
#undef VECTORIZEDN_DEFINE_BINARY_OP
|
| 309 |
+
};
|
| 310 |
+
|
| 311 |
+
#define VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(op) \
|
| 312 |
+
template <typename T, int N> \
|
| 313 |
+
inline VectorizedN<T, N> op(const VectorizedN<T, N>& a) { \
|
| 314 |
+
return a.unary_op([](const Vectorized<T>& a) { return op(a); }); \
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
#define VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(op) \
|
| 318 |
+
template <typename T, int N> \
|
| 319 |
+
inline VectorizedN<T, N> op( \
|
| 320 |
+
const VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
|
| 321 |
+
return a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
|
| 322 |
+
return op(a, b); \
|
| 323 |
+
}); \
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
#define VECTORIZEDN_DEFINE_TERNARY_OP_GLOBAL(op) \
|
| 327 |
+
template <typename T, int N> \
|
| 328 |
+
inline VectorizedN<T, N> op( \
|
| 329 |
+
const VectorizedN<T, N>& a, \
|
| 330 |
+
const VectorizedN<T, N>& b, \
|
| 331 |
+
const VectorizedN<T, N>& c) { \
|
| 332 |
+
return a.ternary_op( \
|
| 333 |
+
b, \
|
| 334 |
+
c, \
|
| 335 |
+
[](const Vectorized<T>& a, \
|
| 336 |
+
const Vectorized<T>& b, \
|
| 337 |
+
const Vectorized<T>& c) { return op(a, b, c); }); \
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
#define VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(op) \
|
| 341 |
+
template <typename T, int N> \
|
| 342 |
+
inline VectorizedN<T, N>& op( \
|
| 343 |
+
VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
|
| 344 |
+
a = a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
|
| 345 |
+
return op(a, b); \
|
| 346 |
+
}); \
|
| 347 |
+
return a; \
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator+)
|
| 351 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator-)
|
| 352 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator*)
|
| 353 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator/)
|
| 354 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator%)
|
| 355 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator||)
|
| 356 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator<<)
|
| 357 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator>>)
|
| 358 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(maximum)
|
| 359 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(minimum)
|
| 360 |
+
VECTORIZEDN_DEFINE_TERNARY_OP_GLOBAL(fmadd)
|
| 361 |
+
VECTORIZEDN_DEFINE_TERNARY_OP_GLOBAL(fmsub)
|
| 362 |
+
VECTORIZEDN_DEFINE_TERNARY_OP_GLOBAL(clamp)
|
| 363 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_max)
|
| 364 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_min)
|
| 365 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator&)
|
| 366 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator|)
|
| 367 |
+
VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator^)
|
| 368 |
+
VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(operator~)
|
| 369 |
+
|
| 370 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator+=)
|
| 371 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator-=)
|
| 372 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator*=)
|
| 373 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator/=)
|
| 374 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator%=)
|
| 375 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator<<=)
|
| 376 |
+
VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator>>=)
|
| 377 |
+
|
| 378 |
+
#undef VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL
|
| 379 |
+
#undef VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL
|
| 380 |
+
#undef VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL
|
| 381 |
+
|
| 382 |
+
template <typename T, int N, typename OpVec>
|
| 383 |
+
inline T vec_reduce_all(const OpVec& vec_fun, VectorizedN<T, N> acc_vec) {
|
| 384 |
+
Vectorized<T> vec_result = acc_vec[0];
|
| 385 |
+
for (int i = 1; i < N; i++) {
|
| 386 |
+
vec_result = vec_fun(vec_result, acc_vec[i]);
|
| 387 |
+
}
|
| 388 |
+
return vec_reduce_all(vec_fun, vec_result);
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
template <typename T, int N>
|
| 392 |
+
std::ostream& operator<<(std::ostream& stream, const VectorizedN<T, N>& vec_n) {
|
| 393 |
+
stream << "vec_n[";
|
| 394 |
+
for (int i = 0; i < N; ++i) {
|
| 395 |
+
if (i != 0) {
|
| 396 |
+
stream << ", ";
|
| 397 |
+
}
|
| 398 |
+
stream << vec_n[i];
|
| 399 |
+
}
|
| 400 |
+
stream << ']';
|
| 401 |
+
return stream;
|
| 402 |
+
}
|
| 403 |
+
} // namespace CPU_CAPABILITY
|
| 404 |
+
} // namespace at::vec
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/Descriptors.h
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
|
| 5 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 6 |
+
#include <ATen/cuda/Exceptions.h>
|
| 7 |
+
|
| 8 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 9 |
+
#include <ATen/cudnn/Utils.h>
|
| 10 |
+
#include <ATen/core/Tensor.h>
|
| 11 |
+
#include <ATen/TensorUtils.h>
|
| 12 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
| 13 |
+
#include <cuda.h>
|
| 14 |
+
|
| 15 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 16 |
+
#include <ATen/Functions.h>
|
| 17 |
+
#else
|
| 18 |
+
#include <ATen/ops/empty.h>
|
| 19 |
+
#endif
|
| 20 |
+
|
| 21 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8907
|
| 22 |
+
#define USE_CUDNN_RNN_V8_API
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
namespace at::native {
|
| 26 |
+
|
| 27 |
+
std::string cudnnTypeToString(cudnnDataType_t dtype);
|
| 28 |
+
|
| 29 |
+
// TODO: Add constructors for all of the descriptors
|
| 30 |
+
|
| 31 |
+
inline int dataSize(cudnnDataType_t dataType)
|
| 32 |
+
{
|
| 33 |
+
switch (dataType) {
|
| 34 |
+
case CUDNN_DATA_BFLOAT16:
|
| 35 |
+
case CUDNN_DATA_HALF: return 2;
|
| 36 |
+
case CUDNN_DATA_FLOAT: return 4;
|
| 37 |
+
default: return 8;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
// The stride for a size-1 dimensions is not uniquely determined; in
|
| 42 |
+
// fact, it can be anything you want, because the fact that the
|
| 43 |
+
// tensor is size 1 at this dimension means that you will never actually
|
| 44 |
+
// try advancing your pointer by this stride.
|
| 45 |
+
//
|
| 46 |
+
// However, CuDNN has a much more stringent requirement on strides:
|
| 47 |
+
// if you are passing a contiguous input, it better be the case
|
| 48 |
+
// that the stride for dim i is the product of the sizes of dims
|
| 49 |
+
// i+1 to the end. This stride is indeed uniquely determined. This
|
| 50 |
+
// function modifies 'stride' in place so this invariant holds.
|
| 51 |
+
template <typename T>
|
| 52 |
+
static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) {
|
| 53 |
+
int64_t z = 1;
|
| 54 |
+
int index = 0;
|
| 55 |
+
std::vector<int> permutation(dim);
|
| 56 |
+
|
| 57 |
+
if (nhwc) {
|
| 58 |
+
permutation[index++] = 1;
|
| 59 |
+
}
|
| 60 |
+
for (int d = dim-1; d > 1; d--) {
|
| 61 |
+
permutation[index++] = d;
|
| 62 |
+
}
|
| 63 |
+
if (!nhwc) {
|
| 64 |
+
permutation[index++] = 1;
|
| 65 |
+
}
|
| 66 |
+
permutation[index++] = 0;
|
| 67 |
+
for (int d : permutation) {
|
| 68 |
+
if (size[d] == 1) {
|
| 69 |
+
stride[d] = z;
|
| 70 |
+
} else {
|
| 71 |
+
z *= size[d];
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template <typename T, cudnnStatus_t (*dtor)(T*)>
|
| 77 |
+
struct DescriptorDeleter {
|
| 78 |
+
void operator()(T* x) {
|
| 79 |
+
if (x != nullptr) {
|
| 80 |
+
AT_CUDNN_CHECK(dtor(x));
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
// A generic class for wrapping cuDNN descriptor types. All you need
|
| 86 |
+
// is to give the underlying type the Descriptor_t points to (usually,
|
| 87 |
+
// if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct),
|
| 88 |
+
// the constructor and the destructor. Subclasses are responsible
|
| 89 |
+
// for defining a set() function to actually set the descriptor.
|
| 90 |
+
//
|
| 91 |
+
// Descriptors default construct to a nullptr, and have a descriptor
|
| 92 |
+
// initialized the first time you call set() or any other initializing
|
| 93 |
+
// function.
|
| 94 |
+
template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)>
|
| 95 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
| 96 |
+
class TORCH_CUDA_CPP_API Descriptor {
|
| 97 |
+
public:
|
| 98 |
+
// TODO: Figure out why const-correctness doesn't work here
|
| 99 |
+
|
| 100 |
+
// Use desc() to access the underlying descriptor pointer in
|
| 101 |
+
// a read-only fashion. Most client code should use this.
|
| 102 |
+
// If the descriptor was never initialized, this will return
|
| 103 |
+
// nullptr.
|
| 104 |
+
T* desc() const { return desc_.get(); }
|
| 105 |
+
T* desc() { return desc_.get(); }
|
| 106 |
+
|
| 107 |
+
// Use mut_desc() to access the underlying descriptor pointer
|
| 108 |
+
// if you intend to modify what it points to (e.g., using
|
| 109 |
+
// cudnnSetFooDescriptor). This will ensure that the descriptor
|
| 110 |
+
// is initialized. Code in this file will use this function.
|
| 111 |
+
T* mut_desc() { init(); return desc_.get(); }
|
| 112 |
+
protected:
|
| 113 |
+
void init() {
|
| 114 |
+
if (desc_ == nullptr) {
|
| 115 |
+
T* raw_desc = nullptr;
|
| 116 |
+
AT_CUDNN_CHECK(ctor(&raw_desc));
|
| 117 |
+
desc_.reset(raw_desc);
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
private:
|
| 121 |
+
std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
|
| 122 |
+
};
|
| 123 |
+
|
| 124 |
+
class TORCH_CUDA_CPP_API RNNDataDescriptor : public Descriptor<
|
| 125 |
+
cudnnRNNDataStruct,
|
| 126 |
+
&cudnnCreateRNNDataDescriptor,
|
| 127 |
+
&cudnnDestroyRNNDataDescriptor> {
|
| 128 |
+
public:
|
| 129 |
+
void set(const at::Tensor &t, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray);
|
| 130 |
+
private:
|
| 131 |
+
void set(cudnnDataType_t dataType, cudnnRNNDataLayout_t layout, int maxSeqLength, int batchSize, int vectorSize, const int* seqLengthArray) {
|
| 132 |
+
AT_CUDNN_CHECK(cudnnSetRNNDataDescriptor(mut_desc(), dataType, layout, maxSeqLength, batchSize, vectorSize, seqLengthArray, nullptr));
|
| 133 |
+
}
|
| 134 |
+
};
|
| 135 |
+
|
| 136 |
+
class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor<
|
| 137 |
+
cudnnTensorStruct,
|
| 138 |
+
&cudnnCreateTensorDescriptor,
|
| 139 |
+
&cudnnDestroyTensorDescriptor> {
|
| 140 |
+
public:
|
| 141 |
+
TensorDescriptor() = default;
|
| 142 |
+
explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
|
| 143 |
+
set(t, pad);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// Note [CuDNN broadcast padding]
|
| 147 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 148 |
+
// pad specifies the minimum dimensionality of the tensor descriptor
|
| 149 |
+
// we produce (it doesn't have anything to do with, e.g., convolution
|
| 150 |
+
// padding). If 't' is lower-dimensional than 'pad', the remaining
|
| 151 |
+
// dimensions (on the right) are padded with ones. This doesn't
|
| 152 |
+
// affect the underlying data layout. This is particularly useful for
|
| 153 |
+
// dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is
|
| 154 |
+
// done in two steps: first, the client code is expected to pad out
|
| 155 |
+
// (the dimensions) input tensors to be the same dimension as the
|
| 156 |
+
// target broadcast, and then second, CuDNN takes of actually
|
| 157 |
+
// broadcasting size 1 dimensions.
|
| 158 |
+
|
| 159 |
+
void set(const at::Tensor &t, size_t pad = 0);
|
| 160 |
+
void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0);
|
| 161 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
|
| 162 |
+
|
| 163 |
+
void print();
|
| 164 |
+
|
| 165 |
+
private:
|
| 166 |
+
void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc);
|
| 167 |
+
|
| 168 |
+
void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) {
|
| 169 |
+
std::vector<int> strides_copy(stride, stride + dim);
|
| 170 |
+
fixSizeOneDimStride<int>(dim, size, strides_copy.data(), nhwc);
|
| 171 |
+
AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, strides_copy.data()));
|
| 172 |
+
}
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
|
| 176 |
+
|
| 177 |
+
class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor<
|
| 178 |
+
cudnnFilterStruct,
|
| 179 |
+
&cudnnCreateFilterDescriptor,
|
| 180 |
+
&cudnnDestroyFilterDescriptor> {
|
| 181 |
+
public:
|
| 182 |
+
void set(const at::Tensor &t, int64_t pad = 0) {
|
| 183 |
+
set(t, at::MemoryFormat::Contiguous, pad);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
|
| 187 |
+
|
| 188 |
+
void print();
|
| 189 |
+
private:
|
| 190 |
+
void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) {
|
| 191 |
+
AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size));
|
| 192 |
+
}
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d);
|
| 196 |
+
|
| 197 |
+
struct TORCH_CUDA_CPP_API ConvolutionDescriptor
|
| 198 |
+
: public Descriptor<
|
| 199 |
+
cudnnConvolutionStruct,
|
| 200 |
+
&cudnnCreateConvolutionDescriptor,
|
| 201 |
+
&cudnnDestroyConvolutionDescriptor> {
|
| 202 |
+
void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) {
|
| 203 |
+
cudnnDataType_t mathType = dataType;
|
| 204 |
+
if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT;
|
| 205 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale,
|
| 206 |
+
CUDNN_CROSS_CORRELATION, mathType));
|
| 207 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups));
|
| 208 |
+
// See Note [behavior of cudnnFind and cudnnGet]
|
| 209 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH));
|
| 210 |
+
if(dataType == CUDNN_DATA_HALF) {
|
| 211 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH));
|
| 212 |
+
} else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) {
|
| 213 |
+
AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH));
|
| 214 |
+
}
|
| 215 |
+
}
|
| 216 |
+
};
|
| 217 |
+
|
| 218 |
+
struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor
|
| 219 |
+
: public Descriptor<
|
| 220 |
+
cudnnSpatialTransformerStruct,
|
| 221 |
+
&cudnnCreateSpatialTransformerDescriptor,
|
| 222 |
+
&cudnnDestroySpatialTransformerDescriptor> {
|
| 223 |
+
void set(cudnnDataType_t dataType, int dim, int* size) {
|
| 224 |
+
AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size));
|
| 225 |
+
}
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
| 229 |
+
struct TORCH_CUDA_CPP_API DropoutDescriptor
|
| 230 |
+
: public Descriptor<
|
| 231 |
+
cudnnDropoutStruct,
|
| 232 |
+
&cudnnCreateDropoutDescriptor,
|
| 233 |
+
&cudnnDestroyDropoutDescriptor> {
|
| 234 |
+
at::Tensor state;
|
| 235 |
+
|
| 236 |
+
// Initialize a dropout descriptor's RNG state.
|
| 237 |
+
// WARNING: This function is very expensive, avoid calling this function!
|
| 238 |
+
void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) {
|
| 239 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
| 240 |
+
size_t state_size = 0;
|
| 241 |
+
AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size));
|
| 242 |
+
AT_ASSERT(options.device().type() == kCUDA);
|
| 243 |
+
AT_ASSERT(options.dtype() == kByte);
|
| 244 |
+
state = at::empty({static_cast<int64_t>(state_size)}, options);
|
| 245 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed));
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
// Restore a dropout descriptor given a dropout probability and existing RNG state.
|
| 249 |
+
void set(cudnnHandle_t handle, float dropout, const at::Tensor& state) {
|
| 250 |
+
TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout");
|
| 251 |
+
void *state_ptr = state.data_ptr();
|
| 252 |
+
size_t state_size = state.size(0);
|
| 253 |
+
// NB: The seed doesn't actually matter, so we give a dummy value
|
| 254 |
+
AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */));
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
// Restore a dropout descriptor corresponding to no dropout
|
| 258 |
+
void set_no_dropout(cudnnHandle_t handle) {
|
| 259 |
+
// NB: seed doesn't matter when dropout = 0, because no random number
|
| 260 |
+
// initialization actually takes place when there is no dropout.
|
| 261 |
+
// NB: Empirically, cudnnSetDropoutDescriptor is cheap when
|
| 262 |
+
// dropout == 0
|
| 263 |
+
AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */));
|
| 264 |
+
}
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor<
|
| 268 |
+
cudnnRNNStruct,
|
| 269 |
+
&cudnnCreateRNNDescriptor,
|
| 270 |
+
&cudnnDestroyRNNDescriptor> {
|
| 271 |
+
DropoutDescriptor dropout_desc_;
|
| 272 |
+
void set(cudnnHandle_t handle,
|
| 273 |
+
#ifdef USE_CUDNN_RNN_V8_API
|
| 274 |
+
int input_size,
|
| 275 |
+
bool packed,
|
| 276 |
+
#endif
|
| 277 |
+
int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc,
|
| 278 |
+
cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional,
|
| 279 |
+
cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) {
|
| 280 |
+
dropout_desc_ = std::move(dropout_desc);
|
| 281 |
+
#ifndef USE_CUDNN_RNN_V8_API
|
| 282 |
+
AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6(
|
| 283 |
+
handle,
|
| 284 |
+
mut_desc(),
|
| 285 |
+
hidden_size,
|
| 286 |
+
num_layers,
|
| 287 |
+
dropout_desc_.desc(),
|
| 288 |
+
input_mode,
|
| 289 |
+
bidirectional,
|
| 290 |
+
mode,
|
| 291 |
+
algo,
|
| 292 |
+
datatype));
|
| 293 |
+
if (proj_size != 0) {
|
| 294 |
+
AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers(
|
| 295 |
+
handle,
|
| 296 |
+
/*rnnDesc=*/mut_desc(),
|
| 297 |
+
/*recProjSize=*/proj_size,
|
| 298 |
+
/*outProjSize=*/0));
|
| 299 |
+
}
|
| 300 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 301 |
+
if (prop->major >= 7) {
|
| 302 |
+
if (input_type == CUDNN_DATA_HALF) {
|
| 303 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH);
|
| 304 |
+
}
|
| 305 |
+
else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) {
|
| 306 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH);
|
| 307 |
+
}
|
| 308 |
+
else {
|
| 309 |
+
// Technically, as the default it's not necessary to explicitly
|
| 310 |
+
// set this.
|
| 311 |
+
cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH);
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
#else
|
| 315 |
+
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
|
| 316 |
+
auto math_type = CUDNN_DEFAULT_MATH;
|
| 317 |
+
if (prop->major >= 7) {
|
| 318 |
+
if (input_type == CUDNN_DATA_HALF) {
|
| 319 |
+
math_type = CUDNN_TENSOR_OP_MATH;
|
| 320 |
+
} else if (!allow_tf32) {
|
| 321 |
+
math_type = CUDNN_FMA_MATH;
|
| 322 |
+
}
|
| 323 |
+
}
|
| 324 |
+
AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v8(
|
| 325 |
+
mut_desc(),
|
| 326 |
+
algo,
|
| 327 |
+
mode,
|
| 328 |
+
CUDNN_RNN_DOUBLE_BIAS,
|
| 329 |
+
bidirectional,
|
| 330 |
+
input_mode,
|
| 331 |
+
input_type,
|
| 332 |
+
datatype,
|
| 333 |
+
math_type,
|
| 334 |
+
input_size,
|
| 335 |
+
hidden_size,
|
| 336 |
+
proj_size ? proj_size : hidden_size,
|
| 337 |
+
num_layers,
|
| 338 |
+
dropout_desc_.desc(),
|
| 339 |
+
packed ? CUDNN_RNN_PADDED_IO_DISABLED : CUDNN_RNN_PADDED_IO_ENABLED));
|
| 340 |
+
#endif
|
| 341 |
+
}
|
| 342 |
+
};
|
| 343 |
+
|
| 344 |
+
struct TORCH_CUDA_CPP_API CTCLossDescriptor
|
| 345 |
+
: public Descriptor<
|
| 346 |
+
cudnnCTCLossStruct,
|
| 347 |
+
&cudnnCreateCTCLossDescriptor,
|
| 348 |
+
&cudnnDestroyCTCLossDescriptor> {
|
| 349 |
+
void set(cudnnDataType_t datatype) {
|
| 350 |
+
AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype));
|
| 351 |
+
}
|
| 352 |
+
void setEx(
|
| 353 |
+
cudnnDataType_t datatype,
|
| 354 |
+
cudnnLossNormalizationMode_t normMode,
|
| 355 |
+
cudnnNanPropagation_t gradMode) {
|
| 356 |
+
AT_CUDNN_CHECK(
|
| 357 |
+
cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode));
|
| 358 |
+
}
|
| 359 |
+
void set_v8_v9(
|
| 360 |
+
cudnnDataType_t datatype,
|
| 361 |
+
cudnnLossNormalizationMode_t normMode,
|
| 362 |
+
cudnnNanPropagation_t gradMode,
|
| 363 |
+
int maxLabelLength) {
|
| 364 |
+
#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 90000
|
| 365 |
+
auto gradModev9 = CUDNN_CTC_ZERO_OOB_GRADIENTS;
|
| 366 |
+
if (gradMode == cudnnNanPropagation_t::CUDNN_PROPAGATE_NAN) {
|
| 367 |
+
gradModev9 = CUDNN_CTC_SKIP_OOB_GRADIENTS;
|
| 368 |
+
}
|
| 369 |
+
AT_CUDNN_CHECK(
|
| 370 |
+
cudnnSetCTCLossDescriptor_v9(mut_desc(), datatype, normMode, gradModev9, maxLabelLength));
|
| 371 |
+
#else
|
| 372 |
+
AT_CUDNN_CHECK(
|
| 373 |
+
cudnnSetCTCLossDescriptor_v8(mut_desc(), datatype, normMode, gradMode, maxLabelLength));
|
| 374 |
+
#endif
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
};
|
| 378 |
+
|
| 379 |
+
struct TORCH_CUDA_CPP_API ActivationDescriptor
|
| 380 |
+
: public Descriptor<
|
| 381 |
+
cudnnActivationStruct,
|
| 382 |
+
&cudnnCreateActivationDescriptor,
|
| 383 |
+
&cudnnDestroyActivationDescriptor> {
|
| 384 |
+
void set(cudnnActivationMode_t mode) {
|
| 385 |
+
AT_ASSERT(
|
| 386 |
+
mode == CUDNN_ACTIVATION_RELU,
|
| 387 |
+
"TODO: support more cuDNN activation modes");
|
| 388 |
+
AT_CUDNN_CHECK(cudnnSetActivationDescriptor(
|
| 389 |
+
mut_desc(),
|
| 390 |
+
mode,
|
| 391 |
+
cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN,
|
| 392 |
+
std::numeric_limits<double>::max()));
|
| 393 |
+
}
|
| 394 |
+
};
|
| 395 |
+
|
| 396 |
+
union Constant
|
| 397 |
+
{
|
| 398 |
+
float f;
|
| 399 |
+
double d;
|
| 400 |
+
Constant(cudnnDataType_t dataType, double value) {
|
| 401 |
+
if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
|
| 402 |
+
f = static_cast<float>(value);
|
| 403 |
+
} else {
|
| 404 |
+
d = value;
|
| 405 |
+
}
|
| 406 |
+
}
|
| 407 |
+
};
|
| 408 |
+
|
| 409 |
+
} // namespace
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/Handle.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cuda/ATenCUDAGeneral.h>
|
| 4 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
TORCH_CUDA_CPP_API cudnnHandle_t getCudnnHandle();
|
| 9 |
+
} // namespace at::native
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/Handles.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/cudnn/Handle.h>
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/Types.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
TORCH_CUDA_CPP_API cudnnDataType_t
|
| 9 |
+
getCudnnDataTypeFromScalarType(const at::ScalarType dtype);
|
| 10 |
+
cudnnDataType_t getCudnnDataType(const at::Tensor& tensor);
|
| 11 |
+
|
| 12 |
+
int64_t cudnn_version();
|
| 13 |
+
|
| 14 |
+
} // namespace at::native
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/Utils.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/cuda/Exceptions.h>
|
| 5 |
+
#include <ATen/cudnn/Handle.h>
|
| 6 |
+
#include <ATen/cudnn/cudnn-wrapper.h>
|
| 7 |
+
|
| 8 |
+
namespace at::native {
|
| 9 |
+
|
| 10 |
+
// cuDNN has a buggy check for tensor being contiguous (that is, it does
|
| 11 |
+
// not ignore stride for dimension that is equal to 0). This function
|
| 12 |
+
// makes tensors which have zero stride contiguous, by setting the
|
| 13 |
+
// strides to 1 as cuDNN likes.
|
| 14 |
+
inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
|
| 15 |
+
for (auto s : t.strides()) {
|
| 16 |
+
if (s == 0)
|
| 17 |
+
return t.contiguous();
|
| 18 |
+
}
|
| 19 |
+
return t;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
} // namespace at::native
|
lib/python3.10/site-packages/torch/include/ATen/cudnn/cudnn-wrapper.h
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cudnn.h>
|
| 4 |
+
|
| 5 |
+
#define STRINGIFY(x) #x
|
| 6 |
+
#define STRING(x) STRINGIFY(x)
|
| 7 |
+
|
| 8 |
+
#if CUDNN_MAJOR < 8 || (CUDNN_MAJOR == 8 && CUDNN_MINOR < 5)
|
| 9 |
+
#pragma message("CuDNN v" STRING( \
|
| 10 |
+
CUDNN_MAJOR) " found, but need at least CuDNN v8. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0")
|
| 11 |
+
#pragma message "We strongly encourage you to move to 8.5 and above."
|
| 12 |
+
#pragma message "This message is intended to annoy you enough to update."
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#undef STRINGIFY
|
| 16 |
+
#undef STRING
|
lib/python3.10/site-packages/torch/include/ATen/functorch/ADInterpreters.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/functorch/Interpreter.h>
|
| 3 |
+
|
| 4 |
+
namespace at::functorch {
|
| 5 |
+
|
| 6 |
+
// These are the interpreters for our AD transforms
|
| 7 |
+
// (grad, vjp and jvp).
|
| 8 |
+
// See NOTE: [functorch interpreter stack] for more details.
|
| 9 |
+
|
| 10 |
+
struct TORCH_API GradInterpreterPtr {
|
| 11 |
+
explicit GradInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Grad); }
|
| 12 |
+
TransformType key() const { return base_->key(); }
|
| 13 |
+
int64_t level() const { return base_->level(); }
|
| 14 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 15 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 16 |
+
bool prevGradMode() const {
|
| 17 |
+
return std::get<GradInterpreterMeta>(base_->meta()).prevGradMode_;
|
| 18 |
+
}
|
| 19 |
+
Tensor lift(const Tensor& tensor) const;
|
| 20 |
+
private:
|
| 21 |
+
const Interpreter* base_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
struct TORCH_API JvpInterpreterPtr {
|
| 25 |
+
explicit JvpInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Jvp); }
|
| 26 |
+
TransformType key() const { return base_->key(); }
|
| 27 |
+
int64_t level() const { return base_->level(); }
|
| 28 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 29 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 30 |
+
bool prevFwdGradMode() const {
|
| 31 |
+
return std::get<JvpInterpreterMeta>(base_->meta()).prevFwdGradMode_;
|
| 32 |
+
}
|
| 33 |
+
Tensor lift(const Tensor& tensor) const;
|
| 34 |
+
private:
|
| 35 |
+
const Interpreter* base_;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/BatchRulesHelper.h
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/util/TypeList.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/ATen.h>
|
| 11 |
+
#include <ATen/Operators.h>
|
| 12 |
+
|
| 13 |
+
#include <ATen/functorch/DynamicLayer.h>
|
| 14 |
+
#include <ATen/functorch/TensorWrapper.h>
|
| 15 |
+
#include <ATen/functorch/BatchingMetaprogramming.h>
|
| 16 |
+
#include <ATen/functorch/LegacyVmapTransforms.h>
|
| 17 |
+
#include <ATen/functorch/BatchedFallback.h>
|
| 18 |
+
#include <ATen/functorch/PlumbingHelper.h>
|
| 19 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 20 |
+
#include <ATen/VmapGeneratedPlumbing.h>
|
| 21 |
+
|
| 22 |
+
#include <utility>
|
| 23 |
+
|
| 24 |
+
// This file contains helper functions for batching rules.
|
| 25 |
+
|
| 26 |
+
namespace at::functorch {
|
| 27 |
+
|
| 28 |
+
TORCH_API Tensor reshape_dim_into(int64_t src, int64_t dst, const Tensor& x);
|
| 29 |
+
TORCH_API Tensor reshape_dim_outof(int64_t src, int64_t size1, const Tensor& x);
|
| 30 |
+
|
| 31 |
+
TORCH_API Tensor reshape_dim_outof_symint(int64_t src, const c10::SymInt& size1, const Tensor& x);
|
| 32 |
+
|
| 33 |
+
Tensor moveBatchDimToFront(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
| 34 |
+
int64_t rankWithoutBatchDim(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
| 35 |
+
int64_t numelWithoutBatchDim(const Tensor& tensor, std::optional<int64_t> maybe_batch_dim);
|
| 36 |
+
std::optional<int64_t> valIfNonempty(std::optional<int64_t> maybe_empty, int64_t new_val);
|
| 37 |
+
int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim);
|
| 38 |
+
VmapDimVector getPhysicalDims(const Tensor& tensor, bool has_batch_dim, IntArrayRef logical_dims);
|
| 39 |
+
|
| 40 |
+
void vmapIncompatibleInplaceError(const char* schema_name);
|
| 41 |
+
|
| 42 |
+
Tensor maybePadToLogicalRank(const Tensor& tensor, std::optional<int64_t> has_bdim, int64_t logical_rank);
|
| 43 |
+
|
| 44 |
+
void check_randomness(RandomnessType randomness);
|
| 45 |
+
void check_randomness(RandomnessType randomness, bool any_tensor_bdim);
|
| 46 |
+
|
| 47 |
+
inline Tensor ensure_has_bdim(const Tensor& tensor, bool has_bdim, c10::SymInt batch_size) {
|
| 48 |
+
if (has_bdim) {
|
| 49 |
+
return tensor;
|
| 50 |
+
}
|
| 51 |
+
const auto sizes = tensor.sym_sizes();
|
| 52 |
+
SymDimVector expanded_shape;
|
| 53 |
+
expanded_shape.reserve(sizes.size());
|
| 54 |
+
expanded_shape.emplace_back(std::move(batch_size));
|
| 55 |
+
expanded_shape.insert(expanded_shape.end(), sizes.begin(), sizes.end());
|
| 56 |
+
return tensor.expand_symint(expanded_shape);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#define VMAP_SUPPORT(op, batch_rule) \
|
| 60 |
+
m.impl(#op, op ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
|
| 61 |
+
|
| 62 |
+
#define VMAP_SUPPORT2(op, overload, batch_rule) \
|
| 63 |
+
m.impl(#op "." #overload, op ## _ ## overload ## _generated_plumbing<decltype(&batch_rule), &batch_rule>);
|
| 64 |
+
|
| 65 |
+
#define OP_DECOMPOSE(op) m.impl(#op, static_cast<decltype(&ATEN_FN(op))>(native::op));
|
| 66 |
+
#define OP_DECOMPOSE2(op, overload) m.impl(#op"."#overload, static_cast<decltype(&ATEN_FN2(op, overload))>(native::op));
|
| 67 |
+
|
| 68 |
+
// DO NOT USE ME DIRECTLY! Use BASIC_UNARY_BATCH_RULE to save yourself some pain
|
| 69 |
+
template <typename A, A a, typename C>
|
| 70 |
+
struct BasicUnaryBatchRuleHelper;
|
| 71 |
+
|
| 72 |
+
template <typename F, F Func, typename A, typename... T>
|
| 73 |
+
struct BasicUnaryBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 74 |
+
static std::tuple<Tensor, std::optional<int64_t>> apply(
|
| 75 |
+
const Tensor& tensor,
|
| 76 |
+
std::optional<int64_t> batch_dim,
|
| 77 |
+
T... extra_args) {
|
| 78 |
+
return std::make_tuple(Func(tensor, std::forward<T>(extra_args)...), batch_dim);
|
| 79 |
+
}
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
// USAGE: BASIC_UNARY_BATCH_RULE(at::sin)
|
| 83 |
+
// INCORRECT USAGE: BASIC_UNARY_BATCH_RULE(&at::sin)
|
| 84 |
+
// It is important that this macro is not passed a function pointer!!
|
| 85 |
+
#define BASIC_UNARY_BATCH_RULE(fn) SINGLE_ARG(\
|
| 86 |
+
BasicUnaryBatchRuleHelper<\
|
| 87 |
+
decltype(&fn),\
|
| 88 |
+
&fn,\
|
| 89 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 90 |
+
|
| 91 |
+
#define UNARY_POINTWISE(op) \
|
| 92 |
+
VMAP_SUPPORT(op, BASIC_UNARY_BATCH_RULE(ATEN_FN(op)));
|
| 93 |
+
|
| 94 |
+
template <typename A, A a, typename C>
|
| 95 |
+
struct VariadicBdimsBatchRuleHelper;
|
| 96 |
+
|
| 97 |
+
template <typename F, F Func, typename A, typename... T>
|
| 98 |
+
struct VariadicBdimsBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 99 |
+
static std::tuple<Tensor, std::optional<int64_t>> apply(
|
| 100 |
+
const Tensor& tensor,
|
| 101 |
+
std::optional<int64_t> batch_dim,
|
| 102 |
+
T... extra_args) {
|
| 103 |
+
auto tensor_ = moveBatchDimToFront(tensor, batch_dim);
|
| 104 |
+
return std::make_tuple(Func(tensor_, std::forward<T>(extra_args)...), 0);
|
| 105 |
+
}
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
// USAGE: VARIADIC_BDIMS_BATCH_RULE(at::cholesky_inverse)
|
| 109 |
+
// INCORRECT USAGE: VARIADIC_BDIMS_BATCH_RULE(&at::cholesky_inverse)
|
| 110 |
+
// It is important that this macro is not passed a function pointer!!
|
| 111 |
+
#define VARIADIC_BDIMS_BATCH_RULE(fn) SINGLE_ARG(\
|
| 112 |
+
VariadicBdimsBatchRuleHelper<\
|
| 113 |
+
decltype(&fn),\
|
| 114 |
+
&fn,\
|
| 115 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 116 |
+
|
| 117 |
+
#define VARIADIC_BDIMS(op) \
|
| 118 |
+
VMAP_SUPPORT(op, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN(op)));
|
| 119 |
+
|
| 120 |
+
#define VARIADIC_BDIMS2(op, overload) \
|
| 121 |
+
VMAP_SUPPORT2(op, overload, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN2(op, overload)));
|
| 122 |
+
|
| 123 |
+
template<class F, F Func>
|
| 124 |
+
void boxed_tensor_inputs_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 125 |
+
const auto& schema = op.schema();
|
| 126 |
+
const auto num_returns = schema.returns().size();
|
| 127 |
+
const auto num_arguments = schema.arguments().size();
|
| 128 |
+
|
| 129 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 130 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 131 |
+
vmap_check_escaped(maybe_layer, "boxed_tensor_inputs_batch_rule");
|
| 132 |
+
|
| 133 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 134 |
+
|
| 135 |
+
auto orig_arguments = torch::jit::last(*stack, num_arguments);
|
| 136 |
+
if (std::none_of(orig_arguments.begin(), orig_arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 137 |
+
op.callBoxed(stack);
|
| 138 |
+
return;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
auto arguments = torch::jit::pop(*stack, num_arguments);
|
| 142 |
+
std::vector<std::pair<Tensor, std::optional<int64_t>>> tensor_inputs;
|
| 143 |
+
std::vector<int64_t> tensor_pos;
|
| 144 |
+
for (const auto idx : c10::irange(0, num_arguments)) {
|
| 145 |
+
const auto& ivalue = arguments[idx];
|
| 146 |
+
if (ivalue.isTensor()) {
|
| 147 |
+
auto [tensor_value, tensor_bdim] = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
|
| 148 |
+
tensor_inputs.emplace_back(std::move(tensor_value), tensor_bdim);
|
| 149 |
+
tensor_pos.push_back(static_cast<int64_t>(idx));
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
Func(tensor_inputs);
|
| 153 |
+
|
| 154 |
+
size_t tensor_idx = 0;
|
| 155 |
+
TORCH_INTERNAL_ASSERT(!tensor_pos.empty());
|
| 156 |
+
for (const auto arg_idx : c10::irange(0, num_arguments)) {
|
| 157 |
+
if (tensor_idx >= tensor_pos.size() || (int64_t)arg_idx != tensor_pos[tensor_idx]) {
|
| 158 |
+
torch::jit::push(stack, arguments[arg_idx]);
|
| 159 |
+
} else {
|
| 160 |
+
TORCH_INTERNAL_ASSERT(tensor_idx < tensor_inputs.size());
|
| 161 |
+
torch::jit::push(stack, tensor_inputs[tensor_idx].first);
|
| 162 |
+
tensor_idx++;
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
op.callBoxed(stack);
|
| 167 |
+
const auto returns = torch::jit::pop(*stack, num_returns);
|
| 168 |
+
for (const auto& ret : returns) {
|
| 169 |
+
if (ret.isTensor()) {
|
| 170 |
+
torch::jit::push(stack, makeBatched(ret.toTensor(), 0, cur_level));
|
| 171 |
+
} else {
|
| 172 |
+
TORCH_INTERNAL_ASSERT(false, "This boxed batching rule does not currently support ops that return non-tensor values");
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
inline void handle_pointwise_ops(std::vector<std::pair<Tensor, std::optional<int64_t>>> &tensor_inputs) {
|
| 178 |
+
int64_t out_logical_rank = 0;
|
| 179 |
+
for (auto& tensor_input : tensor_inputs) {
|
| 180 |
+
int64_t cur_logical_rank = rankWithoutBatchDim(tensor_input.first, tensor_input.second);
|
| 181 |
+
out_logical_rank = std::max(out_logical_rank, cur_logical_rank);
|
| 182 |
+
}
|
| 183 |
+
for (auto& tensor_input: tensor_inputs) {
|
| 184 |
+
tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
|
| 185 |
+
tensor_input.first = maybePadToLogicalRank(tensor_input.first, tensor_input.second, out_logical_rank);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
#define POINTWISE_BOXED(op) \
|
| 190 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
|
| 191 |
+
|
| 192 |
+
#define POINTWISE_BOXED2(op, overload) \
|
| 193 |
+
m.impl(#op "." #overload, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_pointwise_ops), &handle_pointwise_ops>>());
|
| 194 |
+
|
| 195 |
+
inline void handle_variadic_bdims(std::vector<std::pair<Tensor, std::optional<int64_t>>> &tensor_inputs) {
|
| 196 |
+
for (auto & tensor_input : tensor_inputs) {
|
| 197 |
+
tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second);
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
#define VARIADIC_BDIMS_BOXED(op) \
|
| 202 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_tensor_inputs_batch_rule<decltype(&handle_variadic_bdims), &handle_variadic_bdims>>());
|
| 203 |
+
|
| 204 |
+
using UnpackedBatchedTensor = std::tuple<Tensor, std::optional<int64_t>>;
|
| 205 |
+
|
| 206 |
+
inline void find_and_unpack_tensors(
|
| 207 |
+
const torch::jit::Stack* stack,
|
| 208 |
+
int64_t num_args,
|
| 209 |
+
int64_t cur_level,
|
| 210 |
+
SmallVector<UnpackedBatchedTensor, 5>* tensors,
|
| 211 |
+
SmallVector<int64_t, 5>* tensors_pos,
|
| 212 |
+
int64_t* batch_size) {
|
| 213 |
+
|
| 214 |
+
int64_t computed_batch_size = -1;
|
| 215 |
+
int64_t args_begin = static_cast<int64_t>(stack->size()) - num_args;
|
| 216 |
+
|
| 217 |
+
for (const auto idx : c10::irange(0, num_args)) {
|
| 218 |
+
const auto& ivalue = (*stack)[args_begin + idx];
|
| 219 |
+
if (!ivalue.isTensor()) {
|
| 220 |
+
continue;
|
| 221 |
+
}
|
| 222 |
+
auto unpacked = unwrapTensorAtLevel(ivalue.toTensor(), cur_level);
|
| 223 |
+
const auto& [tensor_value, tensor_bdim] = unpacked;
|
| 224 |
+
if (tensor_bdim.has_value()) {
|
| 225 |
+
auto candidate_batch_size = tensor_value.size(*tensor_bdim);
|
| 226 |
+
if (computed_batch_size == -1) {
|
| 227 |
+
computed_batch_size = candidate_batch_size;
|
| 228 |
+
}
|
| 229 |
+
TORCH_INTERNAL_ASSERT(candidate_batch_size == computed_batch_size);
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
tensors->push_back(std::move(unpacked));
|
| 233 |
+
tensors_pos->push_back(idx);
|
| 234 |
+
}
|
| 235 |
+
TORCH_INTERNAL_ASSERT(computed_batch_size > -1);
|
| 236 |
+
*batch_size = computed_batch_size;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
inline void boxed_existing_bdim_all_batch_rule(
|
| 240 |
+
const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 241 |
+
const auto& schema = op.schema();
|
| 242 |
+
const auto num_returns = schema.returns().size();
|
| 243 |
+
const auto num_arguments = static_cast<int64_t>(schema.arguments().size());
|
| 244 |
+
|
| 245 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 246 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 247 |
+
vmap_check_escaped(maybe_layer, "boxed_existing_bdim_all_batch_rule");
|
| 248 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 249 |
+
|
| 250 |
+
const auto arguments = torch::jit::last(stack, num_arguments);
|
| 251 |
+
if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 252 |
+
op.callBoxed(stack);
|
| 253 |
+
return;
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
int64_t args_begin = static_cast<int64_t>(stack->size()) - num_arguments;
|
| 257 |
+
SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
|
| 258 |
+
SmallVector<int64_t, 5> tensor_pos;
|
| 259 |
+
int64_t batch_size = 0;
|
| 260 |
+
|
| 261 |
+
find_and_unpack_tensors(
|
| 262 |
+
stack, num_arguments, cur_level,
|
| 263 |
+
&tensor_inputs, &tensor_pos, &batch_size);
|
| 264 |
+
|
| 265 |
+
// for each tensor, ensure it has a bdim and reshape it.
|
| 266 |
+
for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
|
| 267 |
+
const auto& [value, bdim] = tensor_inputs[tensor_idx];
|
| 268 |
+
auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
|
| 269 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = reshape_dim_into(bdim.value_or(0), 0, value_);
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
op.callBoxed(stack);
|
| 273 |
+
|
| 274 |
+
for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
|
| 275 |
+
const auto& ret = (*stack)[idx];
|
| 276 |
+
TORCH_INTERNAL_ASSERT(ret.isTensor(),
|
| 277 |
+
"This boxed batching rule does not currently support ops that return non-tensor values");
|
| 278 |
+
(*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
|
| 279 |
+
}
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
// Use when all tensors arguments accept one (normal) batch dim.
|
| 283 |
+
// This batching rule expands the batch dim on all Tensors, reshapes it into
|
| 284 |
+
// dim 0, calls the op, and then reshapes the batch dim out of dim 0.
|
| 285 |
+
// This is not the most efficient thing; if there are alternatives, plese try
|
| 286 |
+
// to use them. Use this only as a last resort.
|
| 287 |
+
#define EXISTING_BDIM_ALL_BOXED(op) \
|
| 288 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_existing_bdim_all_batch_rule>());
|
| 289 |
+
|
| 290 |
+
template <int64_t feature_rank, int64_t contig_tensor_index=-1>
|
| 291 |
+
inline void boxed_all_tensors_have_optional_bdim(
|
| 292 |
+
const c10::OperatorHandle& op, torch::jit::Stack* stack) {
|
| 293 |
+
const auto& schema = op.schema();
|
| 294 |
+
const auto num_returns = schema.returns().size();
|
| 295 |
+
const auto num_arguments = schema.arguments().size();
|
| 296 |
+
|
| 297 |
+
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
| 298 |
+
auto maybe_layer = maybeCurrentDynamicLayer();
|
| 299 |
+
vmap_check_escaped(maybe_layer, "boxed_all_tensors_have_optional_bdim");
|
| 300 |
+
int64_t cur_level = maybe_layer->layerId();
|
| 301 |
+
|
| 302 |
+
const auto arguments = torch::jit::last(stack, num_arguments);
|
| 303 |
+
if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) {
|
| 304 |
+
op.callBoxed(stack);
|
| 305 |
+
return;
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
int64_t args_begin = static_cast<int64_t>(stack->size() - num_arguments);
|
| 309 |
+
SmallVector<UnpackedBatchedTensor, 5> tensor_inputs;
|
| 310 |
+
SmallVector<int64_t, 5> tensor_pos;
|
| 311 |
+
int64_t batch_size = 0;
|
| 312 |
+
|
| 313 |
+
find_and_unpack_tensors(
|
| 314 |
+
stack, static_cast<int64_t>(num_arguments), cur_level,
|
| 315 |
+
&tensor_inputs, &tensor_pos, &batch_size);
|
| 316 |
+
|
| 317 |
+
std::optional<bool> is_no_batch_dim_case;
|
| 318 |
+
|
| 319 |
+
for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) {
|
| 320 |
+
const auto& value = std::get<0>(tensor_inputs[tensor_idx]);
|
| 321 |
+
auto bdim = std::get<1>(tensor_inputs[tensor_idx]);
|
| 322 |
+
const auto logical_rank = rankWithoutBatchDim(value, bdim);
|
| 323 |
+
|
| 324 |
+
if (!is_no_batch_dim_case.has_value()) {
|
| 325 |
+
is_no_batch_dim_case = (logical_rank == feature_rank);
|
| 326 |
+
}
|
| 327 |
+
auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size);
|
| 328 |
+
if (!bdim.has_value()) {
|
| 329 |
+
bdim = 0;
|
| 330 |
+
}
|
| 331 |
+
if (*is_no_batch_dim_case) {
|
| 332 |
+
TORCH_INTERNAL_ASSERT(logical_rank == feature_rank);
|
| 333 |
+
value_ = moveBatchDimToFront(value_, bdim);
|
| 334 |
+
if (tensor_idx == contig_tensor_index) {
|
| 335 |
+
value_ = value_.contiguous();
|
| 336 |
+
}
|
| 337 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
|
| 338 |
+
continue;
|
| 339 |
+
}
|
| 340 |
+
TORCH_INTERNAL_ASSERT(logical_rank == feature_rank + 1);
|
| 341 |
+
value_ = reshape_dim_into(*bdim, 0, value_);
|
| 342 |
+
if (tensor_idx == contig_tensor_index) {
|
| 343 |
+
value_ = value_.contiguous();
|
| 344 |
+
}
|
| 345 |
+
(*stack)[args_begin + tensor_pos[tensor_idx]] = std::move(value_);
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
op.callBoxed(stack);
|
| 349 |
+
|
| 350 |
+
for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) {
|
| 351 |
+
const auto& ret = (*stack)[idx];
|
| 352 |
+
TORCH_INTERNAL_ASSERT(ret.isTensor(),
|
| 353 |
+
"This boxed batching rule does not currently support ops that return non-tensor values");
|
| 354 |
+
if (*is_no_batch_dim_case) {
|
| 355 |
+
(*stack)[idx] = makeBatched(ret.toTensor(), 0, cur_level);
|
| 356 |
+
} else {
|
| 357 |
+
(*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level);
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
// Useful for many NN operators.
|
| 363 |
+
// The operator must satisfy the following:
|
| 364 |
+
// - All arguments must accept an optional batch dim.
|
| 365 |
+
// - All arguments must be the same rank
|
| 366 |
+
#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED(feature_rank, op) \
|
| 367 |
+
m.impl(#op, torch::CppFunction::makeFromBoxedFunction<boxed_all_tensors_have_optional_bdim<feature_rank>>());
|
| 368 |
+
|
| 369 |
+
#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED_CONTIG1(feature_rank, op, contig_tensor_index) \
|
| 370 |
+
m.impl(#op, \
|
| 371 |
+
torch::CppFunction::makeFromBoxedFunction<\
|
| 372 |
+
boxed_all_tensors_have_optional_bdim<\
|
| 373 |
+
feature_rank, \
|
| 374 |
+
contig_tensor_index>\
|
| 375 |
+
>());
|
| 376 |
+
|
| 377 |
+
template <typename A, A a, typename C>
|
| 378 |
+
struct ExistingBdimBatchRuleHelper;
|
| 379 |
+
|
| 380 |
+
template <typename F, F Func, typename A, typename... T>
|
| 381 |
+
struct ExistingBdimBatchRuleHelper<F, Func, c10::guts::typelist::typelist<A, T...>> {
|
| 382 |
+
static std::tuple<Tensor, std::optional<int64_t>> apply(
|
| 383 |
+
const Tensor& self,
|
| 384 |
+
std::optional<int64_t> self_bdim,
|
| 385 |
+
T... extra_args) {
|
| 386 |
+
auto self_ = reshape_dim_into(*self_bdim, 0, self);
|
| 387 |
+
auto out = Func(self_, std::forward<T>(extra_args)...);
|
| 388 |
+
return std::make_tuple(reshape_dim_outof_symint(0, self.sym_sizes()[*self_bdim], out), 0);
|
| 389 |
+
}
|
| 390 |
+
};
|
| 391 |
+
|
| 392 |
+
// USAGE: EXISTING_BDIM_BATCH_RULE(at::cholesky_inverse)
|
| 393 |
+
// INCORRECT USAGE: EXISTING_BDIM_BATCH_RULE(&at::cholesky_inverse)
|
| 394 |
+
// It is important that this macro is not passed a function pointer!!
|
| 395 |
+
#define EXISTING_BDIM_BATCH_RULE(fn) SINGLE_ARG(\
|
| 396 |
+
ExistingBdimBatchRuleHelper<\
|
| 397 |
+
decltype(&fn),\
|
| 398 |
+
&fn,\
|
| 399 |
+
c10::guts::function_traits<decltype(fn)>::parameter_types>::apply)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
#define EXISTING_BDIM(op) \
|
| 403 |
+
VMAP_SUPPORT(op, EXISTING_BDIM_BATCH_RULE(ATEN_FN(op)));
|
| 404 |
+
|
| 405 |
+
#define EXISTING_BDIM2(op, overload) \
|
| 406 |
+
VMAP_SUPPORT2(op, overload, EXISTING_BDIM_BATCH_RULE(ATEN_FN2(op, overload)));
|
| 407 |
+
|
| 408 |
+
#define INVOKE(object,ptrToMember) ((object).*(ptrToMember))
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
template <typename F, F Method, typename... ExtraArgs>
|
| 412 |
+
Tensor& unary_inplace_batch_rule(Tensor& self, std::optional<int64_t>, ExtraArgs... extra_args) {
|
| 413 |
+
INVOKE(self, Method)(std::forward<ExtraArgs>(extra_args)...);
|
| 414 |
+
return self;
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
inline int64_t get_bdim_size4(
|
| 418 |
+
const Tensor& a_value, std::optional<int64_t> a_bdim,
|
| 419 |
+
const Tensor& b_value, std::optional<int64_t> b_bdim,
|
| 420 |
+
const Tensor& c_value, std::optional<int64_t> c_bdim,
|
| 421 |
+
const Tensor& d_value, std::optional<int64_t> d_bdim) {
|
| 422 |
+
if (a_bdim)
|
| 423 |
+
return a_value.size(*a_bdim);
|
| 424 |
+
if (b_bdim)
|
| 425 |
+
return b_value.size(*b_bdim);
|
| 426 |
+
if (c_bdim)
|
| 427 |
+
return c_value.size(*c_bdim);
|
| 428 |
+
if (d_bdim)
|
| 429 |
+
return d_value.size(*d_bdim);
|
| 430 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
inline int64_t get_bdim_size3(
|
| 434 |
+
const Tensor& a_value, std::optional<int64_t> a_bdim,
|
| 435 |
+
const Tensor& b_value, std::optional<int64_t> b_bdim,
|
| 436 |
+
const Tensor& c_value, std::optional<int64_t> c_bdim) {
|
| 437 |
+
if (a_bdim)
|
| 438 |
+
return a_value.size(*a_bdim);
|
| 439 |
+
if (b_bdim)
|
| 440 |
+
return b_value.size(*b_bdim);
|
| 441 |
+
if (c_bdim)
|
| 442 |
+
return c_value.size(*c_bdim);
|
| 443 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
inline int64_t get_bdim_size2(
|
| 447 |
+
const Tensor& a_value, std::optional<int64_t> a_bdim,
|
| 448 |
+
const Tensor& b_value, std::optional<int64_t> b_bdim) {
|
| 449 |
+
if (a_bdim)
|
| 450 |
+
return a_value.size(*a_bdim);
|
| 451 |
+
if (b_bdim)
|
| 452 |
+
return b_value.size(*b_bdim);
|
| 453 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
inline c10::SymInt get_bdim_size2_symint(
|
| 457 |
+
const Tensor& a_value, std::optional<int64_t> a_bdim,
|
| 458 |
+
const Tensor& b_value, std::optional<int64_t> b_bdim) {
|
| 459 |
+
if (a_bdim)
|
| 460 |
+
return a_value.sym_size(*a_bdim);
|
| 461 |
+
if (b_bdim)
|
| 462 |
+
return b_value.sym_size(*b_bdim);
|
| 463 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
// [start, start + 1, ..., stop - 1]
|
| 467 |
+
inline VmapDimVector range(int64_t start, int64_t stop) {
|
| 468 |
+
TORCH_INTERNAL_ASSERT(stop >= start);
|
| 469 |
+
VmapDimVector dims;
|
| 470 |
+
dims.reserve(stop - start);
|
| 471 |
+
for (int64_t i = start; i < stop; i++) {
|
| 472 |
+
dims.emplace_back(i);
|
| 473 |
+
}
|
| 474 |
+
return dims;
|
| 475 |
+
}
|
| 476 |
+
std::tuple<Tensor, Tensor> _binary_pointwise_helper(
|
| 477 |
+
const Tensor& tensor, std::optional<int64_t> tensor_batch_dim, const Tensor& other, std::optional<int64_t> other_batch_dim,
|
| 478 |
+
bool do_type_promotion=true);
|
| 479 |
+
|
| 480 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedFallback.h
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
#include <ATen/ATen.h>
|
| 9 |
+
#include <ATen/core/op_registration/op_registration.h>
|
| 10 |
+
#include <torch/library.h>
|
| 11 |
+
|
| 12 |
+
namespace at::functorch {
|
| 13 |
+
|
| 14 |
+
// This file contains code for the vmap fallback (also known as the
|
| 15 |
+
// BatchedTensor fallback or the Batched fallback). This code runs
|
| 16 |
+
// when an operation doesn't have a batching rule implemented.
|
| 17 |
+
|
| 18 |
+
// If an operator doesn't have a batching rule implemented then we fallback
|
| 19 |
+
// to this implementation. The fallback doesn't work on out= variants or
|
| 20 |
+
// view operations; that is, it works for out-of-place operations and
|
| 21 |
+
// in-place non-view operations.
|
| 22 |
+
//
|
| 23 |
+
// For out-of-place operations, the fallback effectively takes all of the
|
| 24 |
+
// BatchedTensors in `stack`, slices them, and runs `op` on all of the
|
| 25 |
+
// corresponding slices to produce slices of the outputs. The output slices
|
| 26 |
+
// then get `torch.stack`ed to create the
|
| 27 |
+
// final returns.
|
| 28 |
+
//
|
| 29 |
+
// The performance of the fallback is not very good because it introduces an
|
| 30 |
+
// extra copy from stacking the sliced outputs. Because of this, we prefer to
|
| 31 |
+
// write batching rules for operators whenever possible.
|
| 32 |
+
void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 33 |
+
void batchedNestedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 34 |
+
|
| 35 |
+
void vmapErrorFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 36 |
+
|
| 37 |
+
// The vmap fallback emits a warning by default, but it may be disabled if
|
| 38 |
+
// the user finds it to be too annoying.
|
| 39 |
+
TORCH_API bool isVmapFallbackWarningEnabled();
|
| 40 |
+
TORCH_API void setVmapFallbackWarningEnabled(bool enabled);
|
| 41 |
+
|
| 42 |
+
// Used for testing. The vmap fallback is enabled by default. When it is disabled,
|
| 43 |
+
// it raises an error.
|
| 44 |
+
TORCH_API bool isVmapFallbackEnabled();
|
| 45 |
+
TORCH_API void setVmapFallbackEnabled(bool enabled);
|
| 46 |
+
|
| 47 |
+
template <typename A> A vector_to_result(const std::vector<IValue>& buffer) {
|
| 48 |
+
return buffer[0].to<A>();
|
| 49 |
+
}
|
| 50 |
+
template <typename A, typename B> std::tuple<A, B> vector_to_result(const std::vector<IValue>& buffer) {
|
| 51 |
+
return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>());
|
| 52 |
+
}
|
| 53 |
+
template <typename A, typename B, typename C> std::tuple<A, B, C> vector_to_result(const std::vector<IValue>& buffer) {
|
| 54 |
+
return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>(), buffer[2].to<B>());
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// slow_fallback is a way to call the vmap fallback inside some boxed kernel.
|
| 58 |
+
// There is probably some better way to metaprogram this.
|
| 59 |
+
template <typename Ret>
|
| 60 |
+
Ret slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
|
| 61 |
+
std::vector<IValue> stack(args.begin(), args.end());
|
| 62 |
+
batchedTensorForLoopFallback(op, &stack);
|
| 63 |
+
return vector_to_result<Ret>(stack);
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
template <typename A, typename B>
|
| 67 |
+
std::tuple<A, B> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
|
| 68 |
+
std::vector<IValue> stack(args.begin(), args.end());
|
| 69 |
+
batchedTensorForLoopFallback(op, &stack);
|
| 70 |
+
return vector_to_result<A, B>(stack);
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
template <typename A, typename B, typename C>
|
| 74 |
+
std::tuple<A, B, C> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
|
| 75 |
+
std::vector<IValue> stack(args.begin(), args.end());
|
| 76 |
+
batchedTensorForLoopFallback(op, &stack);
|
| 77 |
+
return vector_to_result<A, B, C>(stack);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/BatchedTensorImpl.h
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <bitset>
|
| 10 |
+
|
| 11 |
+
#include <ATen/ArrayRef.h>
|
| 12 |
+
#include <ATen/SmallVector.h>
|
| 13 |
+
#include <ATen/Tensor.h>
|
| 14 |
+
|
| 15 |
+
namespace at::functorch {
|
| 16 |
+
|
| 17 |
+
using Tensor = at::Tensor;
|
| 18 |
+
|
| 19 |
+
// We assume this in a few other places in the codebase,
|
| 20 |
+
// but there isn't a centralized definition.
|
| 21 |
+
constexpr int64_t kVmapMaxTensorDims = 64;
|
| 22 |
+
|
| 23 |
+
// The valid vmap levels range from [0, 64). This effectively means that we
|
| 24 |
+
// support a maximum of 64 nested vmaps.
|
| 25 |
+
constexpr int64_t kVmapNumLevels = 64;
|
| 26 |
+
|
| 27 |
+
// Store this number of elements of BatchDims on the stack. Most people will
|
| 28 |
+
// probably use <= 5 nested vmaps, but adjust this number as necessary.
|
| 29 |
+
constexpr int64_t kBatchDimsStackSize = 5;
|
| 30 |
+
|
| 31 |
+
// A BatchedTensorImpl holds an underlying Tensor and a single batch dim
|
| 32 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
| 33 |
+
// BatchedTensorImpl.
|
| 34 |
+
//
|
| 35 |
+
// The batch dimensions are treated as being "private"; they are not user-visible.
|
| 36 |
+
// For example, in the following Tensor,
|
| 37 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
|
| 38 |
+
// dimension 0 is batch dimension.
|
| 39 |
+
//
|
| 40 |
+
// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
|
| 41 |
+
// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor.
|
| 42 |
+
struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
|
| 43 |
+
explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level);
|
| 44 |
+
|
| 45 |
+
// Returns batch dimension of this tensor
|
| 46 |
+
int64_t bdim() const { return bdim_; }
|
| 47 |
+
|
| 48 |
+
// Returns batch dimension of this tensor
|
| 49 |
+
int64_t level() const { return level_; }
|
| 50 |
+
|
| 51 |
+
// BatchedTensorImpl wraps a Tensor
|
| 52 |
+
const Tensor& value() const { return value_; }
|
| 53 |
+
|
| 54 |
+
// Given a public dimension index, return the dimension index in the underlying
|
| 55 |
+
// value() tensor.
|
| 56 |
+
// For example, if we have
|
| 57 |
+
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
|
| 58 |
+
// bt.actualDim(0) -> 1
|
| 59 |
+
// bt.actualDim(1) -> 2
|
| 60 |
+
// bt.actualDim(2) -> 3
|
| 61 |
+
// bt.actualDim(3) -> Error
|
| 62 |
+
int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
|
| 63 |
+
|
| 64 |
+
IntArrayRef sizes_custom() const override;
|
| 65 |
+
SymIntArrayRef sym_sizes_custom() const override;
|
| 66 |
+
int64_t size_custom(int64_t d) const override;
|
| 67 |
+
c10::SymInt sym_size_custom(int64_t d) const override;
|
| 68 |
+
// We have to override this because we opted into CustomStrides
|
| 69 |
+
IntArrayRef strides_custom() const override;
|
| 70 |
+
SymIntArrayRef sym_strides_custom() const override;
|
| 71 |
+
// Override a bunch of methods inherited from TensorImpl to return error messages.
|
| 72 |
+
bool is_contiguous_custom(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const override;
|
| 73 |
+
void set_size(int64_t dim, int64_t new_size) override;
|
| 74 |
+
void set_stride(int64_t dim, int64_t new_stride) override;
|
| 75 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 76 |
+
const c10::VariableVersion& version_counter,
|
| 77 |
+
bool allow_tensor_metadata_change) const override;
|
| 78 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 79 |
+
c10::VariableVersion&& version_counter,
|
| 80 |
+
bool allow_tensor_metadata_change) const override;
|
| 81 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
|
| 82 |
+
#ifdef DEBUG
|
| 83 |
+
bool has_storage() const override;
|
| 84 |
+
#endif
|
| 85 |
+
|
| 86 |
+
void refreshTensorMetadata();
|
| 87 |
+
|
| 88 |
+
// Used in torchdim. torchdim uses non-lexical BatchedTensor; the way it
|
| 89 |
+
// accomplishes this is a hack where it is able to modify the levels of
|
| 90 |
+
// BatchedTensor to match the level of the current vmap transform.
|
| 91 |
+
void _unsafe_set_level(int64_t level) {
|
| 92 |
+
level_ = level;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// Used in batching rule for in-place view operations that can change
|
| 96 |
+
// the index of the bdim (think squeeze_, unsqueeze_)
|
| 97 |
+
void unsafe_set_bdim(int64_t bdim) {
|
| 98 |
+
// NB: you MUST call refreshTensorMetadata after doing this.
|
| 99 |
+
bdim_ = bdim;
|
| 100 |
+
}
|
| 101 |
+
private:
|
| 102 |
+
// see NOTE: [BatchedTensorImpl levels invariant]
|
| 103 |
+
void checkInvariants() const;
|
| 104 |
+
const char* tensorimpl_type_name() const override;
|
| 105 |
+
|
| 106 |
+
Tensor value_;
|
| 107 |
+
|
| 108 |
+
int64_t level_;
|
| 109 |
+
int64_t bdim_;
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
|
| 113 |
+
// BatchedTensorImpl.
|
| 114 |
+
inline bool isBatchedTensor(const Tensor& tensor) {
|
| 115 |
+
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::FuncTorchBatched) ||
|
| 116 |
+
tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::BatchedNestedTensor);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
// It is unsafe to call this on a Tensor that is not backed by a
|
| 120 |
+
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
|
| 121 |
+
inline BatchedTensorImpl* unsafeGetBatchedImpl(const Tensor& tensor) {
|
| 122 |
+
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
inline BatchedTensorImpl* maybeGetBatchedImpl(const Tensor& tensor) {
|
| 126 |
+
if (!isBatchedTensor(tensor)) {
|
| 127 |
+
return nullptr;
|
| 128 |
+
}
|
| 129 |
+
return unsafeGetBatchedImpl(tensor);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
// Returns a bitset. If bit i is set, then that means dim i is a batchdim.
|
| 133 |
+
inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(int64_t dim) {
|
| 134 |
+
std::bitset<kVmapMaxTensorDims> is_bdim;
|
| 135 |
+
is_bdim.set(dim);
|
| 136 |
+
return is_bdim;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
// Creates a bitset for the given level
|
| 140 |
+
inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(int64_t level) {
|
| 141 |
+
std::bitset<kVmapNumLevels> result;
|
| 142 |
+
result.set(level);
|
| 143 |
+
return result;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// Use this to construct a BatchedTensor from a regular Tensor
|
| 147 |
+
TORCH_API Tensor makeBatched(Tensor tensor, int64_t dim, int64_t level);
|
| 148 |
+
|
| 149 |
+
// Adds a batch dim to `tensor`, returning a BatchedTensor
|
| 150 |
+
TORCH_API Tensor addBatchDim(Tensor tensor, int64_t dim, int64_t level);
|
| 151 |
+
|
| 152 |
+
// Certain dispatch keys must be propagated to the BatchedTensor (or, in general,
|
| 153 |
+
// any wrapper Tensor subclasses). This is because there are methods on Tensor
|
| 154 |
+
// that skip dispatch and check for the presence of a dispatch key (e.g. is_cpu()).
|
| 155 |
+
// TODO: should probably contain more (or all?) backend keys
|
| 156 |
+
constexpr DispatchKeySet kKeysToPropagateToWrapper({
|
| 157 |
+
DispatchKey::Negative,
|
| 158 |
+
DispatchKey::Conjugate,
|
| 159 |
+
DispatchKey::XLA,
|
| 160 |
+
DispatchKey::CUDA,
|
| 161 |
+
DispatchKey::CPU,
|
| 162 |
+
});
|
| 163 |
+
|
| 164 |
+
inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) {
|
| 165 |
+
auto key_set = tensor.unsafeGetTensorImpl()->key_set();
|
| 166 |
+
return key_set & kKeysToPropagateToWrapper;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/BatchingMetaprogramming.h
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
#include <ATen/Tensor.h>
|
| 9 |
+
#include <ATen/VmapGeneratedPlumbing.h>
|
| 10 |
+
|
| 11 |
+
// This file contains template metaprogramming things that are used for our
|
| 12 |
+
// batching rules.
|
| 13 |
+
//
|
| 14 |
+
// See NOTE: [vmap plumbing] for more details on why this is necessary.
|
| 15 |
+
// The plumbing has a bunch of metaprogramming hacks for determining the signature
|
| 16 |
+
// of a batching rule from the signature of the operator, many of which use the
|
| 17 |
+
// helper functions in this file.
|
| 18 |
+
|
| 19 |
+
namespace at::functorch {
|
| 20 |
+
|
| 21 |
+
// Metaprogramming things
|
| 22 |
+
template <class... Items> using typelist = c10::guts::typelist::typelist<Items...>;
|
| 23 |
+
template <class TypeList> using head_t = c10::guts::typelist::head_t<TypeList>;
|
| 24 |
+
template <class TL1, class TL2> using concat_t = c10::guts::typelist::concat_t<TL1, TL2>;
|
| 25 |
+
template <typename T> class debug_t;
|
| 26 |
+
|
| 27 |
+
// tail operation
|
| 28 |
+
template<class TypeList>
|
| 29 |
+
struct tail final {
|
| 30 |
+
static_assert(c10::guts::false_t<TypeList>::value,
|
| 31 |
+
"In typelist::tail<T>, the T argument must be typelist<...>.");
|
| 32 |
+
};
|
| 33 |
+
template<class Head, class... Tail>
|
| 34 |
+
struct tail<typelist<Head, Tail...>> final {
|
| 35 |
+
using type = typelist<Tail...>;
|
| 36 |
+
};
|
| 37 |
+
template<class TypeList> using tail_t = typename tail<TypeList>::type;
|
| 38 |
+
|
| 39 |
+
template <class First, class Second, class Next, class Tail>
|
| 40 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext {
|
| 41 |
+
using type = Next;
|
| 42 |
+
};
|
| 43 |
+
template <class Next, class Tail>
|
| 44 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor, std::optional<int64_t>, Next, Tail> {
|
| 45 |
+
using type = Tail;
|
| 46 |
+
};
|
| 47 |
+
template <class Next, class Tail>
|
| 48 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const Tensor&, std::optional<int64_t>, Next, Tail> {
|
| 49 |
+
using type = Tail;
|
| 50 |
+
};
|
| 51 |
+
template <class Next, class Tail>
|
| 52 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor&, std::optional<int64_t>, Next, Tail> {
|
| 53 |
+
using type = Tail;
|
| 54 |
+
};
|
| 55 |
+
template <class Next, class Tail>
|
| 56 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::optional<Tensor>, std::optional<int64_t>, Next, Tail> {
|
| 57 |
+
using type = Tail;
|
| 58 |
+
};
|
| 59 |
+
template <class Next, class Tail>
|
| 60 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const std::optional<Tensor>&, std::optional<int64_t>, Next, Tail> {
|
| 61 |
+
using type = Tail;
|
| 62 |
+
};
|
| 63 |
+
template <class Next, class Tail>
|
| 64 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::optional<Tensor>&, std::optional<int64_t>, Next, Tail> {
|
| 65 |
+
using type = Tail;
|
| 66 |
+
};
|
| 67 |
+
template <class Next, class Tail>
|
| 68 |
+
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::vector<Tensor>, std::optional<int64_t>, Next, Tail> {
|
| 69 |
+
using type = Tail;
|
| 70 |
+
};
|
| 71 |
+
template <class TypeList> struct RemoveBatchDimAfterTensor {
|
| 72 |
+
using first = head_t<TypeList>;
|
| 73 |
+
using next = tail_t<TypeList>;
|
| 74 |
+
using second = head_t<next>;
|
| 75 |
+
using tail = tail_t<next>;
|
| 76 |
+
|
| 77 |
+
using type = concat_t<
|
| 78 |
+
typelist<first>,
|
| 79 |
+
typename RemoveBatchDimAfterTensor<
|
| 80 |
+
typename IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<first, second, next, tail>::type
|
| 81 |
+
>::type
|
| 82 |
+
>;
|
| 83 |
+
};
|
| 84 |
+
template <class Type> struct RemoveBatchDimAfterTensor<typelist<Type>> {
|
| 85 |
+
using type = typelist<Type>;
|
| 86 |
+
};
|
| 87 |
+
template <> struct RemoveBatchDimAfterTensor<typelist<>> {
|
| 88 |
+
using type = typelist<>;
|
| 89 |
+
};
|
| 90 |
+
template<class TypeList> using remove_batch_dim_after_tensor_t = typename RemoveBatchDimAfterTensor<TypeList>::type;
|
| 91 |
+
|
| 92 |
+
template <typename T> struct UnpackSingleItemTuple {
|
| 93 |
+
using type = T;
|
| 94 |
+
};
|
| 95 |
+
template <typename T> struct UnpackSingleItemTuple<std::tuple<T>> {
|
| 96 |
+
using type = T;
|
| 97 |
+
};
|
| 98 |
+
template <typename T> using unpack_single_item_tuple_t = typename UnpackSingleItemTuple<T>::type;
|
| 99 |
+
|
| 100 |
+
template <typename Return, typename TupleArgs> struct BuildFunctionHelper;
|
| 101 |
+
template <typename Return, typename... Args> struct BuildFunctionHelper<Return, std::tuple<Args...>> {
|
| 102 |
+
using type = Return(Args...);
|
| 103 |
+
};
|
| 104 |
+
template <typename Return, typename TL>
|
| 105 |
+
struct BuildFunction {
|
| 106 |
+
using type = typename BuildFunctionHelper<Return, c10::guts::typelist::to_tuple_t<TL>>::type;
|
| 107 |
+
};
|
| 108 |
+
template <typename Return, typename TL> using build_function_t = typename BuildFunction<Return, TL>::type;
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
template <typename batch_rule_t> struct ToOperatorType {
|
| 112 |
+
using batch_rule_return_type = typename c10::guts::function_traits<batch_rule_t>::return_type;
|
| 113 |
+
using batch_rule_parameter_types = typename c10::guts::function_traits<batch_rule_t>::parameter_types;
|
| 114 |
+
|
| 115 |
+
using operator_parameter_types = remove_batch_dim_after_tensor_t<batch_rule_parameter_types>;
|
| 116 |
+
using operator_return_type =
|
| 117 |
+
unpack_single_item_tuple_t<
|
| 118 |
+
c10::guts::typelist::to_tuple_t<
|
| 119 |
+
remove_batch_dim_after_tensor_t<
|
| 120 |
+
c10::guts::typelist::from_tuple_t<batch_rule_return_type>>>>;
|
| 121 |
+
|
| 122 |
+
using type = build_function_t<operator_return_type, operator_parameter_types>;
|
| 123 |
+
};
|
| 124 |
+
template <typename batch_rule_t> using to_operator_t = typename ToOperatorType<batch_rule_t>::type;
|
| 125 |
+
|
| 126 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/DynamicLayer.h
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
#include <ATen/functorch/Macros.h>
|
| 9 |
+
#include <c10/core/DispatchKey.h>
|
| 10 |
+
#include <ATen/core/function_schema.h>
|
| 11 |
+
#include <optional>
|
| 12 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
| 13 |
+
#include <ATen/functorch/Interpreter.h>
|
| 14 |
+
#include <ATen/functorch/VmapInterpreter.h>
|
| 15 |
+
#include <ATen/functorch/ADInterpreters.h>
|
| 16 |
+
#include <ATen/functorch/FunctionalizeInterpreter.h>
|
| 17 |
+
|
| 18 |
+
// Forward declared
|
| 19 |
+
namespace c10 { struct AutogradMetaInterface; }
|
| 20 |
+
|
| 21 |
+
namespace at::functorch {
|
| 22 |
+
|
| 23 |
+
// This file contains the implementation of functorch's interpreter stack.
|
| 24 |
+
// See NOTE: [functorch interpreter stack] first before reading on.
|
| 25 |
+
//
|
| 26 |
+
// NB: the functorch interpreter stack is also referred to as:
|
| 27 |
+
// - the "dynamic layer stack" -- an older name for "interpreter" was
|
| 28 |
+
// "dynamic layer".
|
| 29 |
+
// - the "functorch mode stack". You can think of each functorch transform as a
|
| 30 |
+
// "mode" (in the same sense as torch_dispatch mode or torch_function mode),
|
| 31 |
+
// and functorch being an implementation of a "mode stack" where the modes
|
| 32 |
+
// may be arbitrary composed.
|
| 33 |
+
|
| 34 |
+
// DynamicLayer is basically the same thing as an Interpreter.
|
| 35 |
+
// It represents a functorch transform and it holds an Interpreter,
|
| 36 |
+
// which contains metadata related to the transform and instructions on
|
| 37 |
+
// how to perform the transform.
|
| 38 |
+
//
|
| 39 |
+
// TODO: we can excise DynamicLayer in favor of Interpreter,
|
| 40 |
+
// But I am going to leave it for now as a compatiblity shim to avoid
|
| 41 |
+
// needing to refactor a lot of callsites...
|
| 42 |
+
struct TORCH_API DynamicLayer {
|
| 43 |
+
explicit DynamicLayer(
|
| 44 |
+
TransformType transform_type,
|
| 45 |
+
int64_t layerId,
|
| 46 |
+
std::optional<c10::SymInt> batchSize = std::nullopt,
|
| 47 |
+
std::optional<RandomnessType> randomness = std::nullopt,
|
| 48 |
+
std::optional<bool> prev_grad_mode = std::nullopt,
|
| 49 |
+
std::optional<bool> pre_fwd_grad_mode = std::nullopt,
|
| 50 |
+
std::optional<bool> functionalize_add_back_views = std::nullopt);
|
| 51 |
+
|
| 52 |
+
TransformType key() const;
|
| 53 |
+
int64_t layerId() const;
|
| 54 |
+
|
| 55 |
+
const Interpreter& interpreter() const { return interpreter_; }
|
| 56 |
+
Interpreter& interpreter() { return interpreter_; }
|
| 57 |
+
|
| 58 |
+
// Only valid for vmap
|
| 59 |
+
c10::SymInt batchSize() const;
|
| 60 |
+
RandomnessType randomness() const;
|
| 61 |
+
|
| 62 |
+
private:
|
| 63 |
+
Interpreter interpreter_;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
TORCH_API int64_t initAndPushDynamicLayer(
|
| 67 |
+
TransformType transform_type,
|
| 68 |
+
std::optional<c10::SymInt> batch_size = std::nullopt,
|
| 69 |
+
std::optional<RandomnessType> randomness = std::nullopt,
|
| 70 |
+
std::optional<bool> prev_grad_mode = std::nullopt,
|
| 71 |
+
std::optional<bool> prev_fwd_grad_mode = std::nullopt,
|
| 72 |
+
std::optional<bool> functionalize_add_back_views = std::nullopt);
|
| 73 |
+
TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata();
|
| 74 |
+
TORCH_API std::optional<DynamicLayer> maybeCurrentDynamicLayer();
|
| 75 |
+
TORCH_API const std::vector<DynamicLayer>& getDynamicLayerStack();
|
| 76 |
+
TORCH_API void setDynamicLayerStack(const std::vector<DynamicLayer>& stack);
|
| 77 |
+
TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included);
|
| 78 |
+
|
| 79 |
+
// NOTE: [Life handles and lexically scoped transforms]
|
| 80 |
+
// functorch transforms are lexically scoped.
|
| 81 |
+
// Given a level, we store a "life handle" that is a boolean that tells us if the
|
| 82 |
+
// transform with that level is active or not.
|
| 83 |
+
//
|
| 84 |
+
// functorch's TensorWrapper (for grad transforms) stores a life handle.
|
| 85 |
+
// If a TensorWrapper escapes from the scope of the transform, then somehow
|
| 86 |
+
// it must know it escaped; it can tell by querying the life handle.
|
| 87 |
+
TORCH_API const std::shared_ptr<bool>& getLifeHandleForLevel(int64_t level);
|
| 88 |
+
|
| 89 |
+
// Returns if an operator is in-place. An operator is inplace if:
|
| 90 |
+
// 1. The first argument is a Tensor and it is being written to
|
| 91 |
+
// 2. The first argument is being returned
|
| 92 |
+
// 3. No other arguments are aliased
|
| 93 |
+
// Here is an example of an in-place operator:
|
| 94 |
+
// add_(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
|
| 95 |
+
TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema);
|
| 96 |
+
|
| 97 |
+
// Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped
|
| 98 |
+
TORCH_API std::optional<size_t> findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input);
|
| 99 |
+
|
| 100 |
+
TORCH_API Tensor unwrapIfDead(const Tensor& tensor);
|
| 101 |
+
TORCH_API bool isDeadTensorWrapper(const Tensor& tensor);
|
| 102 |
+
|
| 103 |
+
// Pretty printers
|
| 104 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const DynamicLayer& layer);
|
| 105 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const std::vector<DynamicLayer>& dynamicLayerStack);
|
| 106 |
+
|
| 107 |
+
// While a functorch transform is active, torch.autograd.function._SingleLevelFunction
|
| 108 |
+
// is disabled by default. The following two APIs are APIs for enabling
|
| 109 |
+
// it. These are not user-facing APIs. We can delete this in the future, but
|
| 110 |
+
// it is useful for debugging when something goes wrong with the
|
| 111 |
+
// autograd.Function <> functorch interaction, which uses _SingleLevelFunction,
|
| 112 |
+
// because it leads to loud errors if something is incorrect.
|
| 113 |
+
TORCH_API void setSingleLevelAutogradFunctionAllowed(bool allowed);
|
| 114 |
+
TORCH_API bool getSingleLevelAutogradFunctionAllowed();
|
| 115 |
+
|
| 116 |
+
// While a functorch grad transform is active, Tensor.requires_grad_() gets
|
| 117 |
+
// disabled. These two functions are the mechanism to controlling that.
|
| 118 |
+
TORCH_API void setInplaceRequiresGradAllowed(bool allowed);
|
| 119 |
+
TORCH_API bool getInplaceRequiresGradAllowed();
|
| 120 |
+
|
| 121 |
+
TORCH_API DynamicLayer popDynamicLayer();
|
| 122 |
+
TORCH_API int64_t pushDynamicLayer(DynamicLayer&& layer);
|
| 123 |
+
|
| 124 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/FunctionalizeInterpreter.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/functorch/Interpreter.h>
|
| 3 |
+
|
| 4 |
+
namespace at::functorch {
|
| 5 |
+
|
| 6 |
+
// This is the interpreter that handles the functionalize() transform.
|
| 7 |
+
// See NOTE: [functorch interpreter stack] for more details.
|
| 8 |
+
|
| 9 |
+
struct FunctionalizeInterpreterPtr {
|
| 10 |
+
explicit FunctionalizeInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Functionalize); }
|
| 11 |
+
TransformType key() const { return base_->key(); }
|
| 12 |
+
int64_t level() const { return base_->level(); }
|
| 13 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 14 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 15 |
+
bool functionalizeAddBackViews() const {
|
| 16 |
+
return std::get<FunctionalizeInterpreterMeta>(base_->meta()).functionalizeAddBackViews_;
|
| 17 |
+
}
|
| 18 |
+
private:
|
| 19 |
+
const Interpreter* base_;
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/Interpreter.h
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/functorch/Macros.h>
|
| 4 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 5 |
+
#include <c10/core/impl/LocalDispatchKeySet.h>
|
| 6 |
+
#include <optional>
|
| 7 |
+
#include <bitset>
|
| 8 |
+
#include <utility>
|
| 9 |
+
#include <variant>
|
| 10 |
+
|
| 11 |
+
namespace at::functorch {
|
| 12 |
+
|
| 13 |
+
// NOTE: [functorch interpreter stack]
|
| 14 |
+
//
|
| 15 |
+
// functorch's dispatching system uses a stack of interpreters.
|
| 16 |
+
// Historically we've referred to this as the "DynamicLayerStack".
|
| 17 |
+
//
|
| 18 |
+
// An interpreter is something that reads in the code it is passed
|
| 19 |
+
// and then executes it. We have a different interpreter per-transform:
|
| 20 |
+
// the "VmapInterpreter" is responsible for reading in operators (like aten::mv)
|
| 21 |
+
// and executing the batched version of it (the batching rule for aten::mv).
|
| 22 |
+
//
|
| 23 |
+
// Concretely, each interpreter is responsible for two things:
|
| 24 |
+
//
|
| 25 |
+
// 1) process(ophandle, stack)
|
| 26 |
+
// Given an operator handle and a stack of arguments, the interpreter is
|
| 27 |
+
// responsible for figuring out how to execute the operation under the semantics
|
| 28 |
+
// of the interpreter. For e.g. VmapInterpreter, this is figuring out how to call
|
| 29 |
+
// the batching rule.
|
| 30 |
+
//
|
| 31 |
+
// The batching rules are stored as kernels on the FuncTorchBatched key, so the way
|
| 32 |
+
// VmapInterpreter calls the batching rule is roughly: (A) exclude all
|
| 33 |
+
// dispatch keys aside from the Batched key, (B) redispatch so we get to the
|
| 34 |
+
// Batched key.
|
| 35 |
+
//
|
| 36 |
+
// 2) sendToNextInterpreter(ophandle, stack)
|
| 37 |
+
// The VmapInterpreter, when it sees aten::mv, will process it into a call to
|
| 38 |
+
// aten::mm. It then needs to send the call to aten::mm to the next interpreter
|
| 39 |
+
// in the interpreter stack.
|
| 40 |
+
//
|
| 41 |
+
// The VmapInterpreter just does this via a call to ophandle.callBoxed(stack)
|
| 42 |
+
// and most Interpreters will implement it this way.
|
| 43 |
+
|
| 44 |
+
enum class RandomnessType {
|
| 45 |
+
Error, // always errors when calling a random function
|
| 46 |
+
Same, // randomness appears the same across batches
|
| 47 |
+
Different, // randomness appears different across batches
|
| 48 |
+
END
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
enum class TransformType {
|
| 52 |
+
Torch, // Unused
|
| 53 |
+
Vmap,
|
| 54 |
+
Grad, // reverse-mode AD, aka vjp
|
| 55 |
+
Jvp, // forward-mode AD
|
| 56 |
+
Functionalize,
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
std::ostream& operator<<(std::ostream& os, const TransformType& t);
|
| 60 |
+
|
| 61 |
+
// NOTE: [Interpreter "subclassing" design]
|
| 62 |
+
//
|
| 63 |
+
// How are various Interpreters for different transforms (vmap, grad, ...)
|
| 64 |
+
// implemented?
|
| 65 |
+
//
|
| 66 |
+
// Accessing interpreters is in the hot-path of functorch so we have a constraint
|
| 67 |
+
// that this code must be as fast as possible.
|
| 68 |
+
//
|
| 69 |
+
// As a result, we stay away from virtual methods and this causes our code
|
| 70 |
+
// to look a little funny.
|
| 71 |
+
//
|
| 72 |
+
// `Interpreter` is the struct for Interpreters. It holds ALL of the
|
| 73 |
+
// relevant information (what type of interpreter it is and the metadata).
|
| 74 |
+
// Metadata for each interpreter is represented as a Union (std::variant)
|
| 75 |
+
// of all possible metadata (VmapInterpreterMeta, GradInterpreterMeta, ...).
|
| 76 |
+
//
|
| 77 |
+
// Given an Interpreter, how do I get a "VmapInterpreter"? You may wish to do this
|
| 78 |
+
// if you want to access the metadata fields (like batchSize and randomness).
|
| 79 |
+
//
|
| 80 |
+
// Each type of interpreter (e.g. Vmap) has a convenience struct
|
| 81 |
+
// (e.g. VmapInterpreterPtr) associated with it.
|
| 82 |
+
//
|
| 83 |
+
// Construct the convenience struct with VmapInterpreterPtr(Interpreter*),
|
| 84 |
+
// and then one can access methods on VmapInterpreterPtr like so:
|
| 85 |
+
// >>> VmapInterpreterPtr(&interpreter).batchSize()
|
| 86 |
+
//
|
| 87 |
+
// Finally, Interpreter::process switches on the type of the interpreter
|
| 88 |
+
// and calls one of {Transform}Intepreter::processImpl under the hood.
|
| 89 |
+
// Same for Interpreter::sendToNextInterpreter :)
|
| 90 |
+
|
| 91 |
+
struct VmapInterpreterMeta {
|
| 92 |
+
explicit VmapInterpreterMeta(c10::SymInt batchSize, RandomnessType randomness) :
|
| 93 |
+
batchSize_(std::move(batchSize)), randomness_(randomness) {}
|
| 94 |
+
c10::SymInt batchSize_;
|
| 95 |
+
RandomnessType randomness_;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
struct GradInterpreterMeta {
|
| 99 |
+
explicit GradInterpreterMeta(bool prevGradMode): prevGradMode_(prevGradMode) {}
|
| 100 |
+
bool prevGradMode_;
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
struct JvpInterpreterMeta {
|
| 104 |
+
explicit JvpInterpreterMeta(bool prevFwdGradMode) : prevFwdGradMode_(prevFwdGradMode) {}
|
| 105 |
+
bool prevFwdGradMode_;
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
struct FunctionalizeInterpreterMeta {
|
| 109 |
+
explicit FunctionalizeInterpreterMeta(bool functionalizeAddBackViews) :
|
| 110 |
+
functionalizeAddBackViews_(functionalizeAddBackViews) {}
|
| 111 |
+
bool functionalizeAddBackViews_;
|
| 112 |
+
};
|
| 113 |
+
|
| 114 |
+
typedef std::variant<
|
| 115 |
+
int64_t,
|
| 116 |
+
GradInterpreterMeta,
|
| 117 |
+
JvpInterpreterMeta,
|
| 118 |
+
VmapInterpreterMeta,
|
| 119 |
+
FunctionalizeInterpreterMeta
|
| 120 |
+
> InterpreterMeta;
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
struct Interpreter {
|
| 124 |
+
// factory functions
|
| 125 |
+
static Interpreter Vmap(int64_t level, c10::SymInt batchSize, RandomnessType randomness) {
|
| 126 |
+
return Interpreter(TransformType::Vmap, level, VmapInterpreterMeta(std::move(batchSize), randomness));
|
| 127 |
+
}
|
| 128 |
+
static Interpreter Grad(int64_t level, bool prevGradMode) {
|
| 129 |
+
return Interpreter(TransformType::Grad, level, GradInterpreterMeta(prevGradMode));
|
| 130 |
+
}
|
| 131 |
+
static Interpreter Jvp(int64_t level, bool prevFwdGradMode) {
|
| 132 |
+
return Interpreter(TransformType::Jvp, level, JvpInterpreterMeta(prevFwdGradMode));
|
| 133 |
+
}
|
| 134 |
+
static Interpreter Functionalize(int64_t level, bool functionalizeAddBackViews) {
|
| 135 |
+
return Interpreter(TransformType::Functionalize, level, FunctionalizeInterpreterMeta(functionalizeAddBackViews));
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
// methods
|
| 139 |
+
TransformType key() const { return type_; }
|
| 140 |
+
int64_t level() const { return level_; }
|
| 141 |
+
const InterpreterMeta& meta() const { return meta_; }
|
| 142 |
+
|
| 143 |
+
void process(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 144 |
+
void sendToNextInterpreter(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 145 |
+
|
| 146 |
+
void saveLocalDispatchKeySet(c10::impl::LocalDispatchKeySet keyset) {
|
| 147 |
+
TORCH_INTERNAL_ASSERT(!savedLocalDispatchKeySet_.has_value());
|
| 148 |
+
savedLocalDispatchKeySet_ = keyset;
|
| 149 |
+
}
|
| 150 |
+
void clearSavedLocalDispatchKeySet() {
|
| 151 |
+
TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
|
| 152 |
+
savedLocalDispatchKeySet_ = std::nullopt;
|
| 153 |
+
}
|
| 154 |
+
c10::impl::LocalDispatchKeySet getSavedLocalDispatchKeySet() const {
|
| 155 |
+
TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
|
| 156 |
+
return *savedLocalDispatchKeySet_;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
// An Interpreter is alive if we are currently inside the ongoing transform
|
| 160 |
+
// for the interpreter. For example, vmap(f)(x); inside of f, the vmap's
|
| 161 |
+
// corresponding Interpreter is alive, even when it is not on the DynamicLayerStack.
|
| 162 |
+
bool is_alive() const {
|
| 163 |
+
return *is_alive_;
|
| 164 |
+
}
|
| 165 |
+
const std::shared_ptr<bool>& is_alive_ptr() const {
|
| 166 |
+
return is_alive_;
|
| 167 |
+
}
|
| 168 |
+
void set_is_alive(bool alive) {
|
| 169 |
+
*is_alive_ = alive;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
// Please don't use this
|
| 173 |
+
explicit Interpreter() = default;
|
| 174 |
+
|
| 175 |
+
private:
|
| 176 |
+
explicit Interpreter(TransformType type, int64_t level, InterpreterMeta meta):
|
| 177 |
+
type_(type), level_(level), is_alive_(std::make_shared<bool>(false)), meta_(std::move(meta)) {}
|
| 178 |
+
|
| 179 |
+
// fields
|
| 180 |
+
TransformType type_{};
|
| 181 |
+
int64_t level_{};
|
| 182 |
+
std::optional<c10::impl::LocalDispatchKeySet> savedLocalDispatchKeySet_;
|
| 183 |
+
std::shared_ptr<bool> is_alive_;
|
| 184 |
+
InterpreterMeta meta_;
|
| 185 |
+
};
|
| 186 |
+
|
| 187 |
+
// Applies the following for-loop:
|
| 188 |
+
// for i in range(begin, end):
|
| 189 |
+
// args[i] = func(args[i])
|
| 190 |
+
void foreachTensorInplace(std::vector<IValue>& args, int64_t begin, int64_t end,
|
| 191 |
+
std::function<Tensor(const Tensor&)> func);
|
| 192 |
+
|
| 193 |
+
// Applies the following for-loop:
|
| 194 |
+
// for i in range(begin, end):
|
| 195 |
+
// if use_flag_relative[i] == 1: <-- treats use_flag_relative as a bitset
|
| 196 |
+
// args[i] = func(args[i], i - begin, true)
|
| 197 |
+
// args[i] = func(args[i], i - begin)
|
| 198 |
+
void foreachTensorInplaceWithFlag(std::vector<IValue>& args, int64_t begin, int64_t end,
|
| 199 |
+
const std::bitset<64> use_flag_relative, const std::function<Tensor(const Tensor&, bool)>& func);
|
| 200 |
+
|
| 201 |
+
std::vector<int64_t> findUnwrappedInputs(std::vector<IValue>& args, int64_t begin, int64_t end);
|
| 202 |
+
|
| 203 |
+
DispatchKeySet keysToExcludeWhenEnteringDynamicLayer(TransformType key);
|
| 204 |
+
|
| 205 |
+
void setup_dispatch_key_tls(TransformType key, DispatchKeySet include);
|
| 206 |
+
|
| 207 |
+
void sanityCheckStack(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 208 |
+
|
| 209 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/LegacyVmapTransforms.h
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <ATen/functorch/Macros.h>
|
| 10 |
+
#include <ATen/functorch/BatchedTensorImpl.h>
|
| 11 |
+
|
| 12 |
+
namespace at::functorch {
|
| 13 |
+
|
| 14 |
+
// This files contains the legacy (now-deprecated) batching rule API.
|
| 15 |
+
// Please try to use the new-style batching rule API (see writing_batch_rules.md)
|
| 16 |
+
|
| 17 |
+
// This file contains abstractions used for transforming *logical* vmap arguments
|
| 18 |
+
// into *physical* arguments. (Keep reading for definitions of these terms).
|
| 19 |
+
|
| 20 |
+
// NOTE: [Logical vs physical args]
|
| 21 |
+
// Consider the following vmap.
|
| 22 |
+
// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
|
| 23 |
+
// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
|
| 24 |
+
// with batch dims 0 and 2:
|
| 25 |
+
// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
|
| 26 |
+
//
|
| 27 |
+
// We say the *logical* view of the tensor has size [3] -- tensors inside
|
| 28 |
+
// `func` appear to have size [3].
|
| 29 |
+
// However, the *physical* underlying tensor (the one passed to vmap) has size
|
| 30 |
+
// [2, 3, 4].
|
| 31 |
+
//
|
| 32 |
+
// This notion of logical vs physical also extends to non-tensor arguments.
|
| 33 |
+
// Consider the previous tensor; let's assume the user called
|
| 34 |
+
// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
|
| 35 |
+
// dimension they are reducing over is dim 0 but the physical dim is dim 1
|
| 36 |
+
// (the first non-batch dimension)
|
| 37 |
+
|
| 38 |
+
// Forward declared; see NOTE: [What is a VmapPhysicalView?]
|
| 39 |
+
struct VmapPhysicalView;
|
| 40 |
+
|
| 41 |
+
// Most PyTorch operators take 4 or fewer inputs.
|
| 42 |
+
constexpr int64_t kVmapTransformStaticInputSize = 4;
|
| 43 |
+
using VmapPhysicalViewVec = SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
|
| 44 |
+
|
| 45 |
+
// Pytorch generally advertises good performance for <= 5 dims.
|
| 46 |
+
// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
|
| 47 |
+
// dimensions to get 8. Adjust this number as necessary
|
| 48 |
+
constexpr int64_t kVmapStaticDimVecSize = 8;
|
| 49 |
+
using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
|
| 50 |
+
using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
|
| 51 |
+
|
| 52 |
+
// NOTE: [What is an VmapTransform?]
|
| 53 |
+
// An *VmapTransform* converts logical views of tensors to physical views.
|
| 54 |
+
//
|
| 55 |
+
// Batching rules use VmapTransforms to convert logical arguments to
|
| 56 |
+
// physical arguments, then call one or more at:: operator that handles the
|
| 57 |
+
// physical arguments, and then converts the physical result back to a logical
|
| 58 |
+
// argument.
|
| 59 |
+
|
| 60 |
+
// VmapTransform for operators that take tensors with multiple batch dims.
|
| 61 |
+
// Given one or more logical views on Tensors, `logicalToPhysical`
|
| 62 |
+
// permutes all of the batch dims to the front of the tensor, aligns
|
| 63 |
+
// and expands the batch dims to match each other (according to their `level`),
|
| 64 |
+
// and returns a VmapPhysicalView on the tensor(s).
|
| 65 |
+
struct TORCH_API MultiBatchVmapTransform {
|
| 66 |
+
static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
|
| 67 |
+
static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
// VmapTransform for operators that broadcast all inputs.
|
| 71 |
+
// Given some logical views on Tensors, `logicalToPhysical`:
|
| 72 |
+
// - permutes all of the batch dims to the front of the tensors
|
| 73 |
+
// - aligns all the batch dims to the collective levels of all of the tensors.
|
| 74 |
+
// If a tensor does not have a batch dim for a vmap level, then it receives
|
| 75 |
+
// a size-one dimension for said level.
|
| 76 |
+
// - aligns the non-batch dims to have the same dimensionality, adding extra
|
| 77 |
+
// size-1 dimensions in between the batch dimensions and the non-batch dimensions
|
| 78 |
+
// so that the batch dimensions are lined up from the right.
|
| 79 |
+
//
|
| 80 |
+
// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
|
| 81 |
+
// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap tensors
|
| 82 |
+
// of size (B, 1, 2) and (B, 3, 2).
|
| 83 |
+
//
|
| 84 |
+
// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
|
| 85 |
+
// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
|
| 86 |
+
// actually *need* to return a tensor of size (1, 2) for the second tensor
|
| 87 |
+
// because the broadcasting operation takes care of that for us, but we do
|
| 88 |
+
// it anyways to keep things simple.
|
| 89 |
+
struct TORCH_API BroadcastingVmapTransform {
|
| 90 |
+
static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
// Forward declared, if you're reading this file head to toe, don't worry about
|
| 94 |
+
// it yet.
|
| 95 |
+
struct VmapPhysicalToLogicalMap;
|
| 96 |
+
|
| 97 |
+
// NOTE: [What is a VmapPhysicalView?]
|
| 98 |
+
// VmapPhysicalView represents a physical view on a Tensor.
|
| 99 |
+
//
|
| 100 |
+
// One can use it to further convert logical dimension indices, logical shapes,
|
| 101 |
+
// and more to their physical variants, or convert a new (physical) tensor into
|
| 102 |
+
// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
|
| 103 |
+
//
|
| 104 |
+
// VmapPhysicalView stores a physical tensor with all of its batch dimensions at
|
| 105 |
+
// the front and some levels that correspond to said batch dimensions.
|
| 106 |
+
//
|
| 107 |
+
// The levels bitset specifies which vmap levels correspond to the batch
|
| 108 |
+
// dimensions at the front of the tensor. In particular, the number of set bits
|
| 109 |
+
// corresponds to the number of batch dimensions on `tensor` and the rightmost
|
| 110 |
+
// bit of `levels` specifies the maximum number of nested vmaps we are in at
|
| 111 |
+
// this point in time.
|
| 112 |
+
// For example, given:
|
| 113 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
|
| 114 |
+
//
|
| 115 |
+
// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
|
| 116 |
+
// than or equal to 3.
|
| 117 |
+
// bitset: 010100
|
| 118 |
+
// ^
|
| 119 |
+
// |
|
| 120 |
+
// levels: 012345
|
| 121 |
+
struct TORCH_API VmapPhysicalView {
|
| 122 |
+
VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
|
| 123 |
+
: levels_(levels), tensor_(std::move(tensor)) {
|
| 124 |
+
// TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
Tensor& tensor() { return tensor_; }
|
| 128 |
+
const Tensor& tensor() const { return tensor_; }
|
| 129 |
+
|
| 130 |
+
// Maps logical dim indices to physical dim indices. Also does dim wrapping.
|
| 131 |
+
//
|
| 132 |
+
// For example, given:
|
| 133 |
+
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
|
| 134 |
+
//
|
| 135 |
+
// Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
|
| 136 |
+
// This is because the size of levels tell us that the first two dimensions
|
| 137 |
+
// of `tensor_` are batch dimensions, so a logical dim of `n` is actually
|
| 138 |
+
// a physical dim of `n + 2`.
|
| 139 |
+
VmapDimVector getPhysicalDims(IntArrayRef logical_dims) const;
|
| 140 |
+
int64_t getPhysicalDim(int64_t logical_dim) const;
|
| 141 |
+
|
| 142 |
+
// Returns a VmapPhysicalToLogicalMap object. This can be used for
|
| 143 |
+
// mapping a physical tensor to a new logical tensor (BatchedTensor)
|
| 144 |
+
VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
|
| 145 |
+
|
| 146 |
+
// Maps a logical shape to a physical shape by pre-pending the batch
|
| 147 |
+
// sizes to the logical shape.
|
| 148 |
+
VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
|
| 149 |
+
SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const;
|
| 150 |
+
|
| 151 |
+
int64_t numBatchDims() const;
|
| 152 |
+
|
| 153 |
+
private:
|
| 154 |
+
int64_t numLogicalDims() const;
|
| 155 |
+
|
| 156 |
+
std::bitset<kVmapNumLevels> levels_;
|
| 157 |
+
Tensor tensor_;
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
// Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
|
| 161 |
+
// to a logical one (BatchedTensor). It holds some levels that are used to do the
|
| 162 |
+
// mapping and assumes that the batch dimensions in the physical tensor all
|
| 163 |
+
// occur at the front of the tensor.
|
| 164 |
+
struct TORCH_API VmapPhysicalToLogicalMap {
|
| 165 |
+
VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels): levels_(levels) {}
|
| 166 |
+
|
| 167 |
+
// Maps a physical tensor to a new logical tensor (BatchedTensor).
|
| 168 |
+
// Assumes that all of the "batch dimensions" are at the front
|
| 169 |
+
// of the physical tensor. For example, given:
|
| 170 |
+
// - x = rank-4 Tensor with size 2, 3, 5, 7
|
| 171 |
+
// - levels = (2, 4)
|
| 172 |
+
// Returns:
|
| 173 |
+
// - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
|
| 174 |
+
Tensor apply(const Tensor& physical_tensor) const;
|
| 175 |
+
|
| 176 |
+
// Given a vector of physical tensors,
|
| 177 |
+
// 1. maps each tensor to a new logical tensor. Assumes that all of the
|
| 178 |
+
// "batch dimensions" are at the front of the physical tensors.
|
| 179 |
+
// 2. stores the new logical tensors back into the passed-in vector. This is
|
| 180 |
+
// to avoid additional dynamic allocations.
|
| 181 |
+
void applyInplace(std::vector<Tensor>& physical_tensors) const;
|
| 182 |
+
|
| 183 |
+
std::bitset<kVmapNumLevels> levels_;
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/Macros.h
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#define SINGLE_ARG(...) __VA_ARGS__
|
lib/python3.10/site-packages/torch/include/ATen/functorch/PlumbingHelper.h
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
#pragma once
|
| 7 |
+
#include <ATen/Tensor.h>
|
| 8 |
+
#include <ATen/functorch/BatchedTensorImpl.h>
|
| 9 |
+
#include <ATen/functorch/DynamicLayer.h>
|
| 10 |
+
|
| 11 |
+
// NOTE: [vmap plumbing]
|
| 12 |
+
//
|
| 13 |
+
// Here's how "batching rules" work.
|
| 14 |
+
// - we register kernels to the Batched key
|
| 15 |
+
// - these kernels have the same signatures as the original operators.
|
| 16 |
+
// For example, at::sin(Tensor self) accepts a Tensor, and the batched kernel
|
| 17 |
+
// must also accept a Tensor
|
| 18 |
+
// - However, it is more natural for users to write a batching rule like the
|
| 19 |
+
// following: sin_batch_rule(Tensor self, std::optional<int> self_bdim)
|
| 20 |
+
// - There is some codegenerated layer (the "plumbing") that wraps the user
|
| 21 |
+
// defined batching rule (e.g. sin_batch_rule) in a kernel that can be
|
| 22 |
+
// registered to the Batched key.
|
| 23 |
+
//
|
| 24 |
+
// The plumbing is responsible for wrapping a batching rule into a form that may
|
| 25 |
+
// be registered as the kernel for the batched key.
|
| 26 |
+
|
| 27 |
+
namespace at::functorch {
|
| 28 |
+
|
| 29 |
+
void vmap_check_escaped(const std::optional<DynamicLayer> &layer, const char* what);
|
| 30 |
+
|
| 31 |
+
// Create a BatchedTensor given a tensor, bdim, and level
|
| 32 |
+
TORCH_API Tensor makeBatched(Tensor tensor, std::optional<int64_t> bdim, int64_t level);
|
| 33 |
+
|
| 34 |
+
// Given a Tensor that may or may not be a BatchedTensor, unwrap it.
|
| 35 |
+
// If `tensor` is not a BatchedTensor, or is a BatchedTensor but the level
|
| 36 |
+
// doesn't match, then this returns (tensor, std::nullopt).
|
| 37 |
+
// Otherwise, it returns (unwrap(tensor), bdim).
|
| 38 |
+
TORCH_API std::tuple<Tensor, std::optional<int64_t>> unwrapTensorAtLevel(const Tensor& tensor, int64_t level);
|
| 39 |
+
|
| 40 |
+
// Creates a vector of BatchedTensor
|
| 41 |
+
TORCH_API std::vector<Tensor> makeBatchedVector(std::vector<Tensor> tensors, std::optional<int64_t> bdim, int64_t level);
|
| 42 |
+
|
| 43 |
+
// Returns True if ANY tensor in tensors is batched at level
|
| 44 |
+
TORCH_API bool isBatchedAtLevel(ITensorListRef tensors, int64_t level);
|
| 45 |
+
TORCH_API bool isBatchedAtLevel(const c10::List<std::optional<Tensor>>& maybe_tensors, int64_t level);
|
| 46 |
+
TORCH_API bool isBatchedAtLevel(const Tensor& tensor, int64_t level);
|
| 47 |
+
TORCH_API bool isBatchedAtLevel(const std::optional<Tensor>& maybe_tensor, int64_t level);
|
| 48 |
+
|
| 49 |
+
// Convenience helper. Returns true if any tensor is batched at level
|
| 50 |
+
TORCH_API bool areAnyBatchedAtLevel(ArrayRef<std::optional<Tensor>> maybe_tensors, int64_t level);
|
| 51 |
+
|
| 52 |
+
inline bool ivalueParticipatesInCurrentLevel(const IValue& ivalue) {
|
| 53 |
+
if (ivalue.isTensor()) {
|
| 54 |
+
auto maybe_level = maybeCurrentDynamicLayer();
|
| 55 |
+
TORCH_INTERNAL_ASSERT(maybe_level.has_value());
|
| 56 |
+
auto current_level = maybe_level->layerId();
|
| 57 |
+
return isBatchedAtLevel(ivalue.toTensor(), current_level);
|
| 58 |
+
}
|
| 59 |
+
// TODO: should really check this
|
| 60 |
+
return false;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/TensorWrapper.h
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <ATen/functorch/Macros.h>
|
| 10 |
+
#include <ATen/Tensor.h>
|
| 11 |
+
#include <ATen/functorch/Interpreter.h>
|
| 12 |
+
|
| 13 |
+
namespace at::functorch {
|
| 14 |
+
|
| 15 |
+
// NOTE: [functorch's TensorWrapper]
|
| 16 |
+
//
|
| 17 |
+
// Taking better suggestions for a name. TensorWrapper is the wrapper Tensor
|
| 18 |
+
// Subclass for functorch's grad-based transforms (grad, vjp, jvp). It is
|
| 19 |
+
// analogous to how vmap uses BatchedTensor as the wrapper Tensor subclass.
|
| 20 |
+
//
|
| 21 |
+
// If you're familiar with the Tensor-Variable merge, TensorWrapper is effectively
|
| 22 |
+
// another Variable.
|
| 23 |
+
//
|
| 24 |
+
// Consider grad(grad(torch.sin))(x). This wraps `x` as TensorWrapper(TensorWrapper(x)).
|
| 25 |
+
// The reason why is so that each TensorWrapper can hold its own AutogradMeta and
|
| 26 |
+
// participate in a **separate** autograd graph.
|
| 27 |
+
//
|
| 28 |
+
// There are alternative designs we could have chosen (e.g. each grad transform
|
| 29 |
+
// stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper
|
| 30 |
+
// design is that we can re-use existing VariableType kernels (i.e. Autograd kernels)
|
| 31 |
+
// without much modification. Since a TensorWrapper looks like a regular Tensor,
|
| 32 |
+
// the VariableType kernel can pull out the AutogradMeta struct from where it
|
| 33 |
+
// expects and extend the autograd graph
|
| 34 |
+
|
| 35 |
+
struct TORCH_API TensorWrapper : public c10::TensorImpl {
|
| 36 |
+
explicit TensorWrapper(
|
| 37 |
+
c10::DispatchKeySet key_set,
|
| 38 |
+
Tensor value,
|
| 39 |
+
int64_t level,
|
| 40 |
+
std::shared_ptr<bool> is_alive,
|
| 41 |
+
bool is_immutable = false, // if true, this came from an operation that aliases an immutable tensor
|
| 42 |
+
bool use_value_sizes_strides = true);
|
| 43 |
+
|
| 44 |
+
void refreshMetadata();
|
| 45 |
+
|
| 46 |
+
const Tensor& value() const {
|
| 47 |
+
return value_;
|
| 48 |
+
}
|
| 49 |
+
std::optional<int64_t> level() const {
|
| 50 |
+
if (is_alive()) {
|
| 51 |
+
return level_;
|
| 52 |
+
}
|
| 53 |
+
return {};
|
| 54 |
+
}
|
| 55 |
+
bool is_immutable() const {
|
| 56 |
+
return is_immutable_;
|
| 57 |
+
}
|
| 58 |
+
bool is_alive() const;
|
| 59 |
+
|
| 60 |
+
// Overrides necessary for autograd
|
| 61 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 62 |
+
const c10::VariableVersion& version_counter,
|
| 63 |
+
bool allow_tensor_metadata_change) const override;
|
| 64 |
+
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
|
| 65 |
+
c10::VariableVersion&& version_counter,
|
| 66 |
+
bool allow_tensor_metadata_change) const override;
|
| 67 |
+
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
|
| 68 |
+
|
| 69 |
+
private:
|
| 70 |
+
const char* tensorimpl_type_name() const override;
|
| 71 |
+
Tensor value_;
|
| 72 |
+
int64_t level_;
|
| 73 |
+
bool is_immutable_;
|
| 74 |
+
|
| 75 |
+
// TensorWrapper receives a boolean flag on whether or not the Grad Interpreter
|
| 76 |
+
// that created it is still alive or not.
|
| 77 |
+
// If the Grad Interpreter is no longer alive then it attempts to behave like
|
| 78 |
+
// a regular Tensor.
|
| 79 |
+
//
|
| 80 |
+
// When we exit the level, this wrapper may be marked as "not alive".
|
| 81 |
+
// Wrappers that are not alive:
|
| 82 |
+
// 1) May still have autograd metadata on them
|
| 83 |
+
// 2) Forward dispatches to the underlying value()
|
| 84 |
+
std::shared_ptr<bool> is_alive_;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
// There are two variants of makeTensorWrapper: one that accepts a level
|
| 88 |
+
// and one that accepts an Interpreter.
|
| 89 |
+
//
|
| 90 |
+
// The one that accepts a level tries to automatically get the life handle from the
|
| 91 |
+
// interpreter on the DynamicLayerStack.
|
| 92 |
+
// It needs to be used with caution: if the interpreter is not on the
|
| 93 |
+
// DynamicLayerStack, then we won't be able to find the life handle.
|
| 94 |
+
//
|
| 95 |
+
// In practice this isn't a problem: when we're constructing TensorWrapper in
|
| 96 |
+
// Python, the corresponding interpreter is on the stack.
|
| 97 |
+
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, int64_t level, bool is_immutable=false);
|
| 98 |
+
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, const Interpreter& interpreter, bool is_immutable=false);
|
| 99 |
+
TORCH_API TensorWrapper* maybeGetTensorWrapper(const Tensor& tensor);
|
| 100 |
+
TORCH_API void dumpTensor(std::ostream & ss, const Tensor& tensor);
|
| 101 |
+
TORCH_API void dumpTensorCout(const Tensor& tensor);
|
| 102 |
+
|
| 103 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/functorch/VmapInterpreter.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/functorch/Interpreter.h>
|
| 3 |
+
|
| 4 |
+
namespace at::functorch {
|
| 5 |
+
|
| 6 |
+
// This is the interpreter that handles the functionalize() transform.
|
| 7 |
+
// See NOTE: [functorch interpreter stack] for more details.
|
| 8 |
+
|
| 9 |
+
struct VmapInterpreterPtr {
|
| 10 |
+
explicit VmapInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Vmap); }
|
| 11 |
+
TransformType key() const { return base_->key(); }
|
| 12 |
+
int64_t level() const { return base_->level(); }
|
| 13 |
+
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
|
| 14 |
+
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
|
| 15 |
+
c10::SymInt batchSize() const {
|
| 16 |
+
return std::get<VmapInterpreterMeta>(base_->meta()).batchSize_;
|
| 17 |
+
}
|
| 18 |
+
RandomnessType randomness() const {
|
| 19 |
+
return std::get<VmapInterpreterMeta>(base_->meta()).randomness_;
|
| 20 |
+
}
|
| 21 |
+
private:
|
| 22 |
+
const Interpreter* base_;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
} // namespace at::functorch
|
lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_addmm_activation_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) {
|
| 27 |
+
return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out);
|
| 28 |
+
}
|
| 29 |
+
// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) {
|
| 31 |
+
return at::_ops::_addmm_activation_out::call(self, mat1, mat2, beta, alpha, use_gelu, out);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor
|
| 35 |
+
inline at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) {
|
| 36 |
+
return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Float_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _cast_Float {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
static constexpr const char* name = "aten::_cast_Float";
|
| 22 |
+
static constexpr const char* overload_name = "";
|
| 23 |
+
static constexpr const char* schema_str = "_cast_Float(Tensor self, bool non_blocking=False) -> Tensor";
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, bool non_blocking);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured__convert_indices_from_csr_to_coo : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_cslt_sparse_mm_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0, int split_k=1, bool split_k_one_kernel=True) -> Tensor
|
| 26 |
+
inline at::Tensor _cslt_sparse_mm(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional<at::Tensor> & bias={}, const ::std::optional<at::Tensor> & alpha={}, ::std::optional<at::ScalarType> out_dtype=::std::nullopt, bool transpose_result=false, int64_t alg_id=0, int64_t split_k=1, bool split_k_one_kernel=true) {
|
| 27 |
+
return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id, split_k, split_k_one_kernel);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_forward_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt> _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & cu_seqlens_q, const ::std::optional<at::Tensor> & cu_seqlens_k, ::std::optional<int64_t> max_seqlen_q, ::std::optional<int64_t> max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional<double> scale=::std::nullopt, const ::std::optional<at::Tensor> & seqlen_k={}, ::std::optional<int64_t> window_size=::std::nullopt);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_expm1(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_expm1_out(at::TensorList out, at::TensorList self);
|
| 22 |
+
TORCH_API void _foreach_expm1_outf(at::TensorList self, at::TensorList out);
|
| 23 |
+
TORCH_API void _foreach_expm1_(at::TensorList self);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_linalg_eigh_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) {
|
| 27 |
+
return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) {
|
| 32 |
+
return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors);
|
| 33 |
+
}
|
| 34 |
+
// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) {
|
| 36 |
+
return at::_ops::_linalg_eigh_eigenvalues::call(A, UPLO, compute_v, eigenvalues, eigenvectors);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <optional>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_native_batch_norm_legit_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
|
| 27 |
+
return at::_ops::_native_batch_norm_legit::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
|
| 31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps) {
|
| 32 |
+
return at::_ops::_native_batch_norm_legit_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
|
| 33 |
+
}
|
| 34 |
+
// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))
|
| 35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
|
| 36 |
+
return at::_ops::_native_batch_norm_legit_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
|
| 40 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
|
| 41 |
+
return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
| 45 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
|
| 46 |
+
return at::_ops::_native_batch_norm_legit_no_stats_out::call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
|
| 47 |
+
}
|
| 48 |
+
// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))
|
| 49 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) {
|
| 50 |
+
return at::_ops::_native_batch_norm_legit_no_stats_out::call(input, weight, bias, training, momentum, eps, out, save_mean, save_invstd);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)
|
| 54 |
+
inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
|
| 55 |
+
return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
}
|
lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_copy_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor _reshape_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a=1, int64_t b=1);
|
| 21 |
+
TORCH_API at::Tensor _test_ambiguous_defaults(const at::Tensor & dummy, int64_t a, c10::string_view b);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeimplicitautograd
|
| 24 |
+
} // namespace at
|