commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
---|---|---|---|---|---|
16f53275378de95723b41dc23c0ec52ef54ae29 | Thu, 11 Apr 2024 06:39:54 +0000 | [PATCH 0001/1000] [AOTI] Serialize large weights (#123002) | But appending them to the end of the shared library and mmaping afterwards Disabled by default, but overridable by `config.aot_inductor.force_mmap_weights` Implemented by adding `USE_MMAP_SELF` define to `inductor/aoti_runtime/model.h` which is defined when weights are appended to the binary. In that case, shared library name is determined by calling `dladdr`, mmaped and finally checked against random magic number embedded at the end of the weights as well as in const section of the library in question Added unites to validate that it works as expected TODO: - Extend support to CUDA - munmap region if the same library is reused Pull Request resolved: https://github.com/pytorch/pytorch/pull/123002 Approved by: https://github.com/jansel, https://github.com/desertfire, https://github.com/mikekgfb | diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index ea21e5f140..5de6d91a0b 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -269,6 +269,22 @@ class AOTInductorTestsTemplate:
)
self.check_model(Model(), example_inputs)
+ def test_large_mmaped_weights(self):
+ class Model(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.linear = torch.nn.Linear(512, 250112)
+
+ def forward(self, x, y):
+ return x + self.linear(y)
+
+ example_inputs = (
+ torch.randn(1, 250112, device=self.device),
+ torch.randn(1, 512, device=self.device),
+ )
+ with config.patch({"aot_inductor.force_mmap_weights": True}):
+ self.check_model(Model(), example_inputs)
+
def test_with_offset(self):
class Model(torch.nn.Module):
def __init__(self, device):
@@ -2727,6 +2743,7 @@ if TEST_WITH_ROCM:
"test_bmm_multiple_dynamic": fail_cuda(is_skip=True),
"test_convolution": fail_cuda(is_skip=True),
"test_large": fail_cuda(is_skip=True),
+ "test_large_mmaped_weights": fail_cuda(is_skip=True),
"test_missing_cubin": fail_cuda(is_skip=True),
"test_multi_device": fail_cuda(is_skip=True),
"test_poi_multiple_dynamic": fail_cuda(is_skip=True),
@@ -2762,6 +2779,7 @@ if not IS_FBCODE:
"test_convolution": fail_minimal_arrayref_interface(is_skip=True),
"test_empty_graph": fail_minimal_arrayref_interface(is_skip=True),
"test_large": fail_minimal_arrayref_interface(is_skip=True),
+ "test_large_mmaped_weights": fail_minimal_arrayref_interface(is_skip=True),
"test_missing_output": fail_minimal_arrayref_interface(is_skip=True),
"test_model_modified_weights": fail_minimal_arrayref_interface(
is_skip=True
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index cc43a50b20..98cf75fc23 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -19,6 +19,7 @@ import re
import shlex
import shutil
import signal
+import struct
import subprocess
import sys
import sysconfig
@@ -38,6 +39,7 @@ from types import ModuleType
from typing import (
Any,
Callable,
+ cast,
Dict,
List,
Optional,
@@ -1545,6 +1547,7 @@ def cpp_compile_command(
aot_mode: bool = False,
compile_only: bool = False,
use_absolute_path: bool = False,
+ use_mmap_weights: bool = False,
) -> str:
ipaths, lpaths, libs, macros, build_arch_flags = get_include_and_linking_paths(
include_pytorch, vec_isa, cuda, aot_mode
@@ -1577,6 +1580,9 @@ def cpp_compile_command(
if compile_only:
libs, lpaths = "", ""
inp_name_str = " ".join(inp_name)
+ if use_mmap_weights:
+ macros += " -D USE_MMAP_SELF"
+
return re.sub(
r"[ \n]+",
" ",
@@ -1655,7 +1661,11 @@ class AotCodeCompiler:
picked_vec_isa = pick_vec_isa()
cpp_command = repr(
cpp_compile_command(
- "i", "o", vec_isa=picked_vec_isa, cuda=cuda, aot_mode=graph.aot_mode
+ "i",
+ "o",
+ vec_isa=picked_vec_isa,
+ cuda=cuda,
+ aot_mode=graph.aot_mode,
)
)
fbcode_aot_cpu_re = False
@@ -1794,6 +1804,17 @@ class AotCodeCompiler:
)
output_o = os.path.splitext(input_path)[0] + ".o"
+ consts_size = sum(
+ tensor.untyped_storage().nbytes()
+ for (name, tensor) in graph.constants.items()
+ if name not in graph.folded_constants
+ )
+ # TODO: Fix mmap weights with cuda
+ use_mmap_weights = (
+ not cuda and not config.is_fbcode() and consts_size > 2_000_000_000
+ )
+ if config.aot_inductor.force_mmap_weights and not cuda:
+ use_mmap_weights = True
compile_cmd = cpp_compile_command(
input=input_path,
output=output_o,
@@ -1802,6 +1823,7 @@ class AotCodeCompiler:
aot_mode=graph.aot_mode,
compile_only=True,
use_absolute_path=use_absolute_path,
+ use_mmap_weights=use_mmap_weights,
)
log.debug("aot compilation command: %s", compile_cmd)
if fbcode_aot_cpu_re:
@@ -1826,11 +1848,19 @@ class AotCodeCompiler:
return bytes(raw_array.contents)
- aot_constants = b"".join(
+ serialized_weights = b"".join(
_to_bytes(graph.get_original_value_of_constant(name))
for name in graph.constants.keys()
if name not in graph.folded_constants
)
+ if not use_mmap_weights:
+ aot_constants = serialized_weights
+ magic_number = 0
+ else:
+ magic_number = cast(
+ int, torch.randint(0, torch.iinfo(torch.int64).max, (1,)).item()
+ )
+ aot_constants = struct.pack("qq", consts_size + 8, magic_number)
consts_o = {
"linux": _compile_consts_linux,
"darwin": _compile_consts_darwin,
@@ -1851,6 +1881,14 @@ class AotCodeCompiler:
else:
run_command_and_check(link_cmd)
+ if use_mmap_weights:
+ with open(output_so, "a+b") as f_so:
+ so_size = f_so.tell()
+ # Page align the weights
+ f_so.write(b" " * (16384 - so_size % 16384))
+ f_so.write(serialized_weights)
+ f_so.write(struct.pack("q", magic_number))
+
# Append cmds to the end of codegen-ed wrapper file
with open(input_path, "a") as f:
f.write("\n")
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index c52b3cbd9b..26015bbc03 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -699,6 +699,10 @@ class aot_inductor:
# flag to decide whether to create a submodule for constant graph.
use_runtime_constant_folding: bool = False
+ # flag to force weight to be appened to the shared library and mmaped by the runtime
+ # rather than embedded into the data section. Needed to support 1B+ parameter models
+ force_mmap_weights: bool = False
+
class cuda:
# CUDA arch to use for CUDA template kernel compilation.
diff --git a/torch/csrc/inductor/aoti_runtime/model.h b/torch/csrc/inductor/aoti_runtime/model.h
index ad0970ebae..f03bf6d0fa 100644
--- a/torch/csrc/inductor/aoti_runtime/model.h
+++ b/torch/csrc/inductor/aoti_runtime/model.h
@@ -1,7 +1,12 @@
#pragma once
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
#include <optional>
#include <regex>
+#include <stdexcept>
#include <unordered_map>
// WARNING: Be careful when adding new includes here. This header will be used
@@ -268,7 +273,43 @@ class AOTInductorModelBase {
cudaMemcpyHostToDevice));
}
return internal_ptr;
-#else // !USE_CUDA
+#elif USE_MMAP_SELF
+ // get pointer to constant which is packed in model during compile time.
+ AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy");
+ if (!self_mmap) {
+ Dl_info dl_info;
+ // get pointer to constant which are appended to the binary
+ AOTI_RUNTIME_CHECK(
+ dladdr(__func__, &dl_info), "Can't find shared library name");
+ int fd = open(dl_info.dli_fname, O_RDONLY);
+ AOTI_RUNTIME_CHECK(fd >= 0, "Shared library file cannot be opened");
+ auto fsize = lseek(fd, 0, SEEK_END);
+ auto weights_size =
+ reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[0];
+ auto magic_number =
+ reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[1];
+ auto weights_offset = fsize - weights_size;
+ AOTI_RUNTIME_CHECK(
+ (weights_offset & 0x3fff) == 0,
+ "weights_offset must be aligned to 16K boundary");
+ auto ptr = mmap(
+ NULL,
+ weights_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ weights_offset);
+ close(fd);
+ AOTI_RUNTIME_CHECK(ptr != MAP_FAILED, "mmap() failed");
+ self_mmap = static_cast<uint8_t*>(ptr);
+ AOTI_RUNTIME_CHECK(
+ reinterpret_cast<uint64_t*>(
+ self_mmap + weights_size - sizeof(uint64_t))[0] == magic_number,
+ "Weigths data seems corrupt");
+ }
+ return self_mmap + bytes_read;
+
+#else // !USE_CUDA&& !USE_MMAP_SELF
// get pointer to constant which is packed in model during compile time.
AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy");
return const_cast<uint8_t*>(_binary_constants_bin_start) + bytes_read;
@@ -457,6 +498,9 @@ class AOTInductorModelBase {
// Holds the blob storage for constants' at::Tensor for CUDA.
CUDAPtr constant_blob_;
#endif // USE_CUDA
+#ifdef USE_MMAP_SELF
+ uint8_t* self_mmap = NULL;
+#endif
// A directory with CUDA binary files, e.g. compiled kernels, etc.
const std::optional<std::string> cubin_dir_; | 2.41.0 |
aad72b0d3f2b03ae6d268b0c78a3cf349c0ae9f | Wed, 10 Apr 2024 18:05:40 -0700 | [PATCH 0002/1000] Support all unsigned int sizes on unique (#123643) | Signed-off-by: Edward Z. Yang <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123643 Approved by: https://github.com/albanD, https://github.com/kit1980 | diff --git a/aten/src/ATen/cuda/cub-RadixSortKeys.cu b/aten/src/ATen/cuda/cub-RadixSortKeys.cu
index cf88c8aa0c..74e82ae55c 100644
--- a/aten/src/ATen/cuda/cub-RadixSortKeys.cu
+++ b/aten/src/ATen/cuda/cub-RadixSortKeys.cu
@@ -51,5 +51,8 @@ void radix_sort_keys(
int64_t end_bit);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
+AT_INSTATIATE_CUB_TEMPLATES(uint16_t, UInt16)
+AT_INSTATIATE_CUB_TEMPLATES(uint32_t, UInt32)
+AT_INSTATIATE_CUB_TEMPLATES(uint64_t, UInt64)
} // namespace at::cuda::cub
diff --git a/aten/src/ATen/cuda/cub-RadixSortPairs.cu b/aten/src/ATen/cuda/cub-RadixSortPairs.cu
index bd20069cf6..cc7c969300 100644
--- a/aten/src/ATen/cuda/cub-RadixSortPairs.cu
+++ b/aten/src/ATen/cuda/cub-RadixSortPairs.cu
@@ -77,6 +77,9 @@ AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
+AT_INSTANTIATE_SORT_PAIRS(uint16_t, 8)
+AT_INSTANTIATE_SORT_PAIRS(uint32_t, 8)
+AT_INSTANTIATE_SORT_PAIRS(uint64_t, 8)
// BFloat16 Radix sort is supported from ROCm 4.5 onwards
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp
index 24247c0b8e..d29b177c13 100644
--- a/aten/src/ATen/native/ReduceOps.cpp
+++ b/aten/src/ATen/native/ReduceOps.cpp
@@ -4,6 +4,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
@@ -2255,7 +2256,7 @@ bool cpu_equal(const Tensor& self, const Tensor& other) {
.promote_inputs_to_common_dtype(true)
.build();
- AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, iter.input_dtype(), "equal_cpu", [&] {
+ AT_DISPATCH_V2(iter.input_dtype(), "equal_cpu", AT_WRAP([&] {
iter.for_each([&](char** data, const int64_t *strides, int64_t dim_size) {
if (!result) {
return;
@@ -2271,7 +2272,7 @@ bool cpu_equal(const Tensor& self, const Tensor& other) {
other_data += strides[1];
}
});
- });
+ }), kBool, kBFloat16, kHalf, AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
return result.load();
}
diff --git a/aten/src/ATen/native/Unique.cpp b/aten/src/ATen/native/Unique.cpp
index 79306f3eee..801af5d5e7 100644
--- a/aten/src/ATen/native/Unique.cpp
+++ b/aten/src/ATen/native/Unique.cpp
@@ -2,7 +2,7 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
-#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/irange.h>
@@ -446,13 +446,13 @@ _unique_cpu(const Tensor& self, const bool sorted, const bool return_inverse) {
self, return_inverse, /* return_counts */false);
return std::make_tuple(output, inverse);
}
- return AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", [&] AT_WRAP({
// The current CPU implementation of unique always sort due to
// this is faster than hash table
auto [output, inverse, _] = unique_cpu_sorted_template<scalar_t>(
self, return_inverse, /* return_counts */false, IsUnique<scalar_t, /* equal_nan */false>());
return std::make_tuple(output, inverse);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
@@ -460,35 +460,35 @@ _unique2_cpu(const Tensor& self, const bool sorted, const bool return_inverse, c
if (self.scalar_type() == kBool) {
return unique_cpu_bool_template(self, return_inverse, return_counts);
}
- return AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CPU implementation of unique always sort due to
// this is faster than hash table
return unique_cpu_sorted_template<scalar_t>(
self, return_inverse, return_counts, IsUnique<scalar_t, /* equal_nan */ false>());
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_cpu(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
// The current implementation using `dim` always sorts due to unhashable tensors
return _unique_dim_cpu_template<scalar_t>(self, dim, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_consecutive_cpu(const Tensor& self, const int64_t dim, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return _unique_dim_cpu_template<scalar_t>(self, dim, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_consecutive_cpu(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional<int64_t> dim) {
if (!dim.has_value() || (dim.value() == 0 && self.dim() == 1)) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
return unique_consecutive_cpu_template<scalar_t>(self, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
return unique_dim_consecutive_cpu(self, dim.value(), return_inverse, return_counts);
}
diff --git a/aten/src/ATen/native/cpu/SortingKernel.cpp b/aten/src/ATen/native/cpu/SortingKernel.cpp
index 3349cd3be1..22ba015215 100644
--- a/aten/src/ATen/native/cpu/SortingKernel.cpp
+++ b/aten/src/ATen/native/cpu/SortingKernel.cpp
@@ -5,6 +5,7 @@
#include <ATen/native/Sorting.h>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorIterator.h>
@@ -42,9 +43,8 @@ void _dim_apply(
auto indices_dim_stride = indices.stride(dim);
auto dim_size = values.size(dim);
- AT_DISPATCH_ALL_TYPES_AND3(
- ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, iter.dtype(),
- "sorting_kernel_method_name", [&] {
+ AT_DISPATCH_V2(
+ iter.dtype(), "sorting_kernel_method_name", AT_WRAP([&] {
auto loop = [&](char** data, const int64_t* strides, int64_t n) {
auto* values_data_bytes = data[0];
auto* indices_data_bytes = data[1];
@@ -69,7 +69,7 @@ void _dim_apply(
int64_t grain_size = internal::GRAIN_SIZE / std::max(int64_t{1}, dim_size);
iter.for_each(loop, /*grain_size=*/grain_size);
- }
+ }), kBool, kHalf, kBFloat16, AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)
);
}
diff --git a/aten/src/ATen/native/cuda/Unique.cu b/aten/src/ATen/native/cuda/Unique.cu
index 30b4640be6..e2654be013 100644
--- a/aten/src/ATen/native/cuda/Unique.cu
+++ b/aten/src/ATen/native/cuda/Unique.cu
@@ -1,6 +1,6 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
-#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/ThrustAllocator.h>
@@ -186,45 +186,45 @@ std::tuple<Tensor, Tensor, Tensor> unique_dim_cuda_template(
std::tuple<Tensor, Tensor>
_unique_cuda(const Tensor& self, const bool sorted, const bool return_inverse) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
auto [output, inverse, _] = internal::unique_cuda_template<scalar_t>(self, false, return_inverse, false);
return std::make_tuple(output, inverse);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
_unique2_cuda(const Tensor& self, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
return internal::unique_cuda_template<scalar_t>(self, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return unique_dim_cuda_template<scalar_t>(self, dim, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_consecutive_cuda(const Tensor& self, const int64_t dim, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return unique_dim_cuda_template<scalar_t>(self, dim, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_consecutive_cuda(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional<int64_t> dim) {
if (!dim.has_value()) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
return internal::unique_cuda_template<scalar_t>(self, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
return unique_dim_consecutive_cuda(self, dim.value(), return_inverse, return_counts);
}
diff --git a/aten/src/ATen/native/cuda/UniqueCub.cu b/aten/src/ATen/native/cuda/UniqueCub.cu
index f9e71bde1a..bbd8673bcf 100644
--- a/aten/src/ATen/native/cuda/UniqueCub.cu
+++ b/aten/src/ATen/native/cuda/UniqueCub.cu
@@ -335,6 +335,9 @@ INSTANTIATE_UNIQUE_CUDA_TEMPLATE(float);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int32_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int64_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int16_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint32_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint64_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint16_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(bool);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(at::Half);
diff --git a/c10/core/ScalarType.cpp b/c10/core/ScalarType.cpp
index a942ae252d..f9704c8157 100644
--- a/c10/core/ScalarType.cpp
+++ b/c10/core/ScalarType.cpp
@@ -84,6 +84,13 @@ ScalarType promoteTypes(ScalarType a, ScalarType b) {
// - We must not promote uint64 to int64 because this will overflow.
//
// It'll be a bit of work to fix it, so we're punting on it for now.
+ // However, float promotion is fine, so we handle that.
+ if (isFloatingType(a)) {
+ return a;
+ }
+ if (isFloatingType(b)) {
+ return b;
+ }
TORCH_CHECK(
false,
"Promotion for uint16, uint32, uint64 types is not supported, attempted to promote ",
diff --git a/test/test_meta.py b/test/test_meta.py
index b081ce173d..deb421adee 100644
--- a/test/test_meta.py
+++ b/test/test_meta.py
@@ -64,6 +64,9 @@ i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
+u16 = torch.uint16
+u32 = torch.uint32
+u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
@@ -659,8 +662,8 @@ meta_function_expected_failures = {
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
- torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32},
- torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32},
+ torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
+ torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histc : {f64, f16, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
@@ -832,7 +835,7 @@ meta_dispatch_expected_failures = {
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
- aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
+ aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histc.default : {bf16, f32, f64},
@@ -840,8 +843,8 @@ meta_dispatch_expected_failures = {
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, f16, bf16, f32, i32, i16, u8},
- aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
- aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
+ aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
+ aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
diff --git a/test/test_testing.py b/test/test_testing.py
index 5c81197f9f..26722e2bfb 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -2055,7 +2055,7 @@ class TestTestParametrizationDeviceType(TestCase):
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
- ('test_three' in name and name.endswith('int16')))
+ ('test_three' in name and name.endswith('_int16')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 7a8a529180..84798ebac5 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -16793,8 +16793,8 @@ op_db: List[OpInfo] = [
skips=(
)),
OpInfo('unique',
- dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
- dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
+ dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64),
+ dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.uint16, torch.uint32, torch.uint64),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False, | 2.41.0 |
2f687f32c3abddc0999733e26761a1f608029f3 | Thu, 11 Apr 2024 06:53:10 +0000 | [PATCH 0003/1000] Option to include stride and device annotation in gm.print_readable() (#123690) | Summary: Sample output for gm.print_readable(include_stride=True, include_device=True) ``` getitem_21: "i32[1200][1]cuda:0" = auto_functionalized_4[1] copy_2: "f32[2, 60][60, 1]cuda:1" = .... ``` Test Plan: CI Differential Revision: D55949129 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123690 Approved by: https://github.com/Chillee | diff --git a/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect b/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
index d6630cff36..2996edd485 100644
--- a/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
+++ b/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
@@ -22,7 +22,7 @@ torch.fx.graph.Graph.node_copy(self, node: torch.fx.node.Node, arg_transform: Ca
torch.fx.graph.Graph.output(self, result: 'Argument', type_expr: Optional[Any] = None)
torch.fx.graph.Graph.placeholder(self, name: str, type_expr: Optional[Any] = None, default_value: Any) -> torch.fx.node.Node
torch.fx.graph.Graph.print_tabular(self)
-torch.fx.graph.Graph.python_code(self, root_module: str, verbose: bool = False) -> torch.fx.graph.PythonCode
+torch.fx.graph.Graph.python_code(self, root_module: str, verbose: bool = False, include_stride: bool = False, include_device: bool = False) -> torch.fx.graph.PythonCode
torch.fx.graph_module.GraphModule.__init__(self, root: Union[torch.nn.modules.module.Module, Dict[str, Any]], graph: torch.fx.graph.Graph, class_name: str = 'GraphModule')
torch.fx.graph_module.GraphModule.add_submodule(self, target: str, m: torch.nn.modules.module.Module) -> bool
torch.fx.graph_module.GraphModule.delete_all_unused_submodules(self) -> None
diff --git a/torch/fx/graph.py b/torch/fx/graph.py
index 50f94bfca8..7ff8f94dbf 100644
--- a/torch/fx/graph.py
+++ b/torch/fx/graph.py
@@ -4,8 +4,9 @@ import torch.utils._pytree as pytree
from . import _pytree as fx_pytree
from ._compatibility import compatibility
+import os
import contextlib
-from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
+from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type, Iterable
from dataclasses import dataclass
from contextlib import contextmanager
import copy
@@ -378,7 +379,8 @@ class CodeGen:
return []
def _gen_python_code(
- self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False,
+ self, nodes, root_module: str, namespace: _Namespace, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
) -> PythonCode:
free_vars: List[str] = []
body: List[str] = []
@@ -387,6 +389,8 @@ class CodeGen:
# Wrap string in list to pass by reference
maybe_return_annotation : List[str] = ['']
+ include_stride = include_stride or (os.environ.get("FX_GRAPH_SHOW_STRIDE", "0") == "1")
+ include_device = include_device or (os.environ.get("FX_GRAPH_SHOW_DEVICE", "0") == "1")
def add_global(name_hint: str, obj: Any):
"""Add an obj to be tracked as a global.
@@ -530,7 +534,7 @@ class CodeGen:
prev_stacktrace = ""
body.append('\n# No stacktrace found for following nodes\n')
- def stringify_shape(shape : torch.Size) -> str:
+ def stringify_shape(shape : Iterable) -> str:
return f"[{', '.join(str(x) for x in shape)}]"
def emit_node(node : Node):
@@ -543,10 +547,13 @@ class CodeGen:
from torch.fx.passes.shape_prop import TensorMetadata
meta_val = node.meta.get('val', node.meta.get('tensor_meta', None))
-
# use string as annotation, to make it valid python code
if isinstance(meta_val, FakeTensor):
- maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
+ stride_annotation = f"{stringify_shape(meta_val.stride())}" if include_stride else ""
+ device_annotation = f"{meta_val.device}" if include_device else ""
+ maybe_type_annotation = \
+ f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}' \
+ f'{stride_annotation}{device_annotation}"'
elif isinstance(meta_val, py_sym_types):
maybe_type_annotation = f': "Sym({meta_val})"'
elif isinstance(meta_val, TensorMetadata):
@@ -1346,7 +1353,10 @@ class Graph:
return op
@compatibility(is_backward_compatible=True)
- def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode:
+ def python_code(
+ self, root_module: str, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
+ ) -> PythonCode:
"""
Turn this ``Graph`` into valid Python code.
@@ -1405,10 +1415,19 @@ class Graph:
node._repr_fn = orig_repr_fns[node]
with override_node_repr(self):
- return self._python_code(root_module, namespace, verbose=verbose)
+ return self._python_code(
+ root_module, namespace,
+ verbose=verbose, include_stride=include_stride, include_device=include_device
+ )
- def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
- return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose)
+ def _python_code(
+ self, root_module: str, namespace: _Namespace, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
+ ) -> PythonCode:
+ return self._codegen._gen_python_code(
+ self.nodes, root_module, namespace,
+ verbose=verbose, include_stride=include_stride, include_device=include_device
+ )
def __str__(self) -> str:
diff --git a/torch/fx/graph_module.py b/torch/fx/graph_module.py
index 1a1e7087dc..9569a0d01b 100644
--- a/torch/fx/graph_module.py
+++ b/torch/fx/graph_module.py
@@ -818,11 +818,13 @@ class {module_name}(torch.nn.Module):
return res
@compatibility(is_backward_compatible=False)
- def print_readable(self, print_output=True):
+ def print_readable(self, print_output=True, include_stride=False, include_device=False):
"""
Return the Python code generated for current GraphModule and its children GraphModules
"""
- verbose_python_code = self._graph.python_code(root_module="self", verbose=True)
+ verbose_python_code = self._graph.python_code(
+ root_module="self", verbose=True, include_stride=include_stride, include_device=include_device
+ )
module_code = verbose_python_code.src
module_code = module_code.lstrip("\n")
module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code | 2.41.0 |
8d2504eece2ba5e464a42b253ea07f70e9ba5b6 | Tue, 9 Apr 2024 12:11:09 -0700 | [PATCH 0004/1000] [aot] always pass inputs to runtime_wrapper as list and add type annotations (#123630) | `runtime_wrapper` unpacking the arguments as a Tuple[arg] will prevent them from being freed within its scope. This is problematic if inductors wants to free those inputs, which could be activations in the compiled backwards case. This PR only changes the signature to pass as list, but does not clear it, keeping same refcount as before. Also adding some mypy annotations. Ideally, instead of `Any`, I would want a type to describe single arg which seems to be usually Tensor or int. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123630 Approved by: https://github.com/jansel, https://github.com/bdhirsh | diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index dda3144b24..5c9c3424d3 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -162,7 +162,7 @@ def aot_dispatch_base(
# Create a wrapper to set up the rng functionalize bits
@wraps(compiled_fw)
- def rng_functionalization_wrapper(args):
+ def rng_functionalization_wrapper(args: List[Any]):
# see note: [Returning Fake Tensors on First AOT Autograd Call]
nonlocal fakified_out
if fakified_out is not None:
@@ -993,7 +993,7 @@ Got grad_output types: {str(grad_output_types)}"""
]
@wraps(compiled_function)
- def debug_compiled_function(*args):
+ def debug_compiled_function(args: List[Any]):
# TODO: Check aliasing relationships
# TODO: Check strides for metadata mutation
# (NB: ideally, this logic is factored out of this function and
@@ -1013,6 +1013,6 @@ Got grad_output types: {str(grad_output_types)}"""
f"{describe_input(i, aot_config)} would not require grad",
)
- return compiled_function(*args)
+ return compiled_function(args)
return debug_compiled_function
diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index 6c2a8beddc..1ef2df56a2 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -72,13 +72,14 @@ def create_runtime_wrapper(
if not hasattr(compiled_fn, "_boxed_call"):
compiled_fn = make_boxed_func(compiled_fn)
- def runtime_wrapper(*args):
+ def runtime_wrapper(args: List[Any]):
num_tokens = len(runtime_metadata.tokens)
if config.unlift_effect_tokens:
assert num_tokens == 0
elif num_tokens > 0:
# Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
- args = ([None] * num_tokens, *args)
+ # NOTE: this keeps an extra reference to the old args until the end of this function
+ args = [[None] * num_tokens, *args]
if trace_joint:
args_ = list(args)
@@ -572,11 +573,8 @@ fw_metadata={str(fw_metadata)}
wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata
)
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
@wraps(compiled_fn)
- def wrapped_compiled_fn(args):
+ def wrapped_compiled_fn(args: List[Any]):
deduped_args = remove_dupe_args(args)
args.clear()
return compiled_fn(deduped_args)
@@ -742,9 +740,6 @@ fw_metadata={str(fw_metadata)}
fw_metadata=fw_metadata_updated,
)
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
@wraps(compiled_fn)
def wrapped_compiled_fn(args):
args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
diff --git a/torch/_functorch/_aot_autograd/utils.py b/torch/_functorch/_aot_autograd/utils.py
index 0e4989860b..97512f6836 100644
--- a/torch/_functorch/_aot_autograd/utils.py
+++ b/torch/_functorch/_aot_autograd/utils.py
@@ -7,7 +7,7 @@ import operator
import warnings
from contextlib import nullcontext
from functools import wraps
-from typing import Any, Callable, List, Optional, Tuple
+from typing import Any, Callable, List, Optional, Tuple, Union
import torch
import torch.utils._pytree as pytree
@@ -103,7 +103,9 @@ def make_boxed_compiler(compiler):
return f
-def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False):
+def call_func_at_runtime_with_args(
+ f, args: Union[Tuple[Any], List[Any]], steal_args=False, disable_amp=False
+):
if not steal_args:
args = list(args)
assert isinstance(args, list)
diff --git a/torch/_functorch/aot_autograd.py b/torch/_functorch/aot_autograd.py
index 3a06db7d1f..8a421573f0 100644
--- a/torch/_functorch/aot_autograd.py
+++ b/torch/_functorch/aot_autograd.py
@@ -648,9 +648,6 @@ or otherwise set torch._functorch.config.functionalize_rng_ops = False.""")
assert isinstance(compiled_fn, torch.fx.GraphModule)
return compiled_fn, fw_metadata
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
return compiled_fn
@@ -925,7 +922,7 @@ def aot_module_simplified(
# the boxed calling convention, but aot_module_simplified somehow
# historically returned a function that was not the boxed calling
# convention. This should get fixed...
- def forward(*runtime_args):
+ def forward(*runtime_args: Tuple[Any]):
full_args = []
full_args.extend(params_flat)
full_args.extend(runtime_args) | 2.41.0 |
510afb8857e6565612862496a6478733fe7b8db | Wed, 10 Apr 2024 17:53:07 -0700 | [PATCH 0005/1000] [aot] refactor runtime_wrapper's epilogue args access (#123674) | I want runtime_wrapper args to be stealable by call_func_at_runtime_with_args, since the args may contain activations which we don't want to hold alive in this scope. The args to runtime_wrapper **should always be** from a list created within aot_autograd, so it **should always be** safe to steal them: https://github.com/pytorch/pytorch/blob/a4a49f77b8c45ea459263c2242ab391b3d0577f2/torch/_functorch/aot_autograd.py#L928-L932 There are some accesses after we execute the compiled_fn, but those index accesses are already inferred at compile time. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123674 Approved by: https://github.com/jansel, https://github.com/bdhirsh ghstack dependencies: #123630 | diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index 1ef2df56a2..3d11c01fe9 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -72,8 +72,29 @@ def create_runtime_wrapper(
if not hasattr(compiled_fn, "_boxed_call"):
compiled_fn = make_boxed_func(compiled_fn)
+ # Note [Inputs needed in runtime epilogue after list clearing]
+ # In Python functions, you can't free the input arguments of a function within the scope of that function. A workaround is to
+ # wrap the input arguments in a list, and clear the list from within the function.
+ # Here, this is implemented as `call_func_at_runtime_with_args(..., steal_args=True)`.
+ #
+ # This is needed for Compiled Autograd since some of the inputs (activations) should be freed early.
+ # However, we cannot blindly clear the entire list, because AOTAutograd may need access to some of the graph inputs
+ # **after** the compiled function has finished running. There are two main cases:
+ # (1) Input mutations: If there are an input mutations that we must run outside of the graph, we need access to the input.
+ # (2) Output aliasing: Outputs that aliases graph inputs generally must be regenerated outside of the `autograd.Function`,
+ # and doing so requires us accessing the corresponding input after the compiled artifact has run.
+ epilogue_args_idx = []
+ epilogue_args_idx.extend(runtime_metadata.mutated_inp_runtime_indices)
+ num_tokens = len(runtime_metadata.tokens)
+ for info in runtime_metadata.output_info:
+ if (
+ info.output_type == OutputType.alias_of_input
+ or info.output_type == OutputType.is_input
+ ):
+ assert isinstance(info.base_idx, int)
+ epilogue_args_idx.append(info.base_idx + num_tokens)
+
def runtime_wrapper(args: List[Any]):
- num_tokens = len(runtime_metadata.tokens)
if config.unlift_effect_tokens:
assert num_tokens == 0
elif num_tokens > 0:
@@ -81,6 +102,9 @@ def create_runtime_wrapper(
# NOTE: this keeps an extra reference to the old args until the end of this function
args = [[None] * num_tokens, *args]
+ # stash a ref to each input tensor we plan to use after the compiled function
+ orig_inputs = {i: args[i] for i in epilogue_args_idx}
+
if trace_joint:
args_ = list(args)
# See Note [Detaching inputs that never need gradients]
@@ -89,9 +113,7 @@ def create_runtime_wrapper(
args_[idx] = args_[idx].detach()
with torch.autograd._force_original_view_tracking(True):
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args_,
- disable_amp=disable_amp,
+ compiled_fn, args_, disable_amp=disable_amp, steal_args=True
)
else:
# When we have an inference graph, we run with torch.no_grad.
@@ -101,16 +123,13 @@ def create_runtime_wrapper(
if torch.is_grad_enabled():
with torch.no_grad():
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args,
- disable_amp=disable_amp,
+ compiled_fn, args, disable_amp=disable_amp, steal_args=True
)
else:
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args,
- disable_amp=disable_amp,
+ compiled_fn, args, disable_amp=disable_amp, steal_args=True
)
+ del args
num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices
num_intermediate_bases = runtime_metadata.num_intermediate_bases
@@ -144,7 +163,7 @@ def create_runtime_wrapper(
meta = runtime_metadata.input_info[inpt_idx]
if not meta.mutates_data and not meta.mutates_metadata:
continue
- original_inpt = args[inpt_idx]
+ original_inpt = orig_inputs[inpt_idx]
updated_inpt = updated_inputs[i]
if meta.mutates_storage_metadata:
# mutates_storage_metadata means our input saw a x.set_(y) call.
@@ -237,14 +256,14 @@ def create_runtime_wrapper(
o_grad = runtime_metadata.output_info[i].requires_grad
if info.output_type == OutputType.alias_of_input:
- aliased_base_tensor = args[info.base_idx + num_tokens] # type: ignore[index]
+ aliased_base_tensor = orig_inputs[info.base_idx + num_tokens] # type: ignore[index]
regenerated_out = gen_alias_from_base(
aliased_base_tensor, o_, o_grad
)
fw_outs_including_aliases.append(regenerated_out)
continue
elif info.output_type == OutputType.is_input:
- aliased_base_tensor = args[info.base_idx + num_tokens] # type: ignore[index]
+ aliased_base_tensor = orig_inputs[info.base_idx + num_tokens] # type: ignore[index]
regenerated_out = aliased_base_tensor
fw_outs_including_aliases.append(regenerated_out)
continue | 2.41.0 |
00282fecfcb53790aebfb24cc48a8703577778e | Wed, 10 Apr 2024 18:33:29 -0700 | [PATCH 0006/1000] [c10d] make monitorThread sleep when we try to dump (#123788) | Summary: We seperated the FR dump logic from the desync debug logic, so we no longer set collectiveDebugInfoMode_ to true when we just need FR dump. That's why monitor thread did not sleep and try to kill the process without waiting for the dump. The fix is simple, we should sleep whenever shouldDump_ is true Test Plan: Existing unit tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123788 Approved by: https://github.com/wconstab | diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index d9f9e6e574..def79cde2b 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1268,6 +1268,7 @@ void ProcessGroupNCCL::heartbeatMonitor() {
lastTimePollStore = currentTime;
if (globalStore_->check({std::string(EXCEPTION_DUMP)})) {
int timeOutRank = -1;
+ shouldDump_.store(true);
try {
auto vec = globalStore_->get(std::string(EXCEPTION_DUMP));
TORCH_CHECK_WITH(
@@ -1312,6 +1313,7 @@ void ProcessGroupNCCL::heartbeatMonitor() {
if (heartbeat != heartBeatCounter) {
heartBeatCounter = heartbeat;
} else {
+ shouldDump_.store(true);
// No heartbeat increase detected and timeout.
errorMsg = c10::str(
logPrefix(),
@@ -1388,7 +1390,8 @@ void ProcessGroupNCCL::heartbeatMonitor() {
// Case two: desync might be slow or get stuck. Or we get stuck in
// destructors, we will sleep for some time before calling std::abort() to
// kill the whole process.
- if ((terminateProcessGroup_.load() || collectiveDebugInfoMode_.load()) &&
+ if ((terminateProcessGroup_.load() || collectiveDebugInfoMode_.load() ||
+ shouldDump_.load()) &&
!terminateHeartbeatMonitorThread_.load()) {
// Leave another two mins for desync report generation or process group
// destroy. | 2.41.0 |
ac99d539be35e806d8d719fa69ceddaf63c6373 | Thu, 11 Apr 2024 08:56:02 +0000 | [PATCH 0007/1000] Only initialize state if needed in SGD (#123757) | Fixes [T184381726](https://www.internalfb.com/intern/tasks/?t=184381726) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123757 Approved by: https://github.com/janeyx99 | diff --git a/test/inductor/test_compiled_optimizers.py b/test/inductor/test_compiled_optimizers.py
index 7c93f326f4..d076f27b17 100644
--- a/test/inductor/test_compiled_optimizers.py
+++ b/test/inductor/test_compiled_optimizers.py
@@ -310,7 +310,8 @@ def make_recompile_test(optim_cls, closure=None, kernel_count=2, **kwargs):
# perturb state to force recompile
# Adagrad doesn't reinitialize state on each step
- if optim_cls is Adagrad:
+ # SGD has an empty state
+ if optim_cls in (Adagrad, SGD):
opt_compiled.param_groups[0]["lr"] = 0.02
elif optim_cls is Adam: # ensure we are guarding on the data_ptr of states
state_tensor = opt_compiled.state[
diff --git a/test/test_optim.py b/test/test_optim.py
index 49c4e86464..d11fe8d42f 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -1582,6 +1582,36 @@ class TestOptimRenewed(TestCase):
optim.step()
self.assertTrue(mocked_foreach_impl.called)
+ @optims(optim_db, dtypes=[torch.float32])
+ def test_non_empty_state(self, device, dtype, optim_info):
+ # There are internal tests that check that the state is not empty
+ optim_cls = optim_info.optim_cls
+ model = torch.nn.Linear(5, 5)
+ model.to(dtype=dtype, device=device)
+ inpt = torch.rand(2, 5, dtype=dtype, device=device)
+
+ for optim_input in optim_info.optim_inputs_func(device=device):
+ optim = optim_cls(model.parameters(), **optim_input.kwargs)
+ optim.zero_grad()
+ output = model(inpt)
+ loss = output.sum()
+ loss.backward()
+
+ if optim_info.only_supports_sparse_grads:
+ for param in model.parameters():
+ if param.grad is not None:
+ param.grad = param.grad.to_sparse()
+
+ if optim_info.step_requires_closure:
+ optim.step(lambda: 1.0)
+ else:
+ optim.step()
+
+ for state in optim.state.values():
+ self.assertGreater(len(state), 0)
+
+
+
instantiate_device_type_tests(TestOptimRenewed, globals(), allow_mps=True)
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
index ca9985dc9d..7002d98502 100644
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -52,8 +52,8 @@ class SGD(Optimizer):
if p.grad.is_sparse:
has_sparse_grad = True
- state = self.state[p]
if group["momentum"] != 0:
+ state = self.state[p]
momentum_buffer_list.append(state.get('momentum_buffer'))
return has_sparse_grad | 2.41.0 |
b7741546b1ee53e5aa3768616c50eab72372a3a | Thu, 11 Apr 2024 09:02:31 +0000 | [PATCH 0008/1000] Fixed arange decomp for float dtype (#121013) | ## Description: - [x] Fixed arange decomp for float dtype - [x] Added a test ## Current state Arange graph and C++ generated code are not optimal when arange is created directly using float32 dtype: ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s, dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on `main`: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:8 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f64[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float64); iota = None mul: "f64[10]" = torch.ops.aten.mul.Tensor(convert_element_type, 1); convert_element_type = None add: "f64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type_1: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:9 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type_1, 10); convert_element_type_1 = None return (add_1,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:15 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f64[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float64); iota = None mul: "f64[10]" = torch.ops.aten.mul.Tensor(convert_element_type, 1); convert_element_type = None add: "f64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type_1: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:16 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type_1, 10); convert_element_type_1 = None return (add_1,) ``` and C++ ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<double>(tmp0); // <---- useless ops auto tmp2 = static_cast<double>(1.0); // <---- auto tmp3 = decltype(tmp1)(tmp1 * tmp2); // <---- auto tmp4 = static_cast<double>(0.0); // <---- auto tmp5 = decltype(tmp3)(tmp3 + tmp4); // <---- auto tmp6 = c10::convert<float>(tmp5); auto tmp7 = static_cast<float>(10.0); auto tmp8 = decltype(tmp6)(tmp6 + tmp7); out_ptr0[static_cast<long>(x0)] = tmp8; } } } ``` However, if we manually create arange on i64 and then put to float32, generated graph and C++ code are more natural and benefit of a speed-up. ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s).to(dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on `main`: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:14 in func, code: a = torch.arange(s).to(dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float32); iota = None # File: check_arange_decomp.py:15 in func, code: return s + a add: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:21 in func, code: a = torch.arange(s).to(dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float32); iota = None # File: check_arange_decomp.py:22 in func, code: return s + a add: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add,) ``` C++ on `main` ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = static_cast<float>(10.0); auto tmp3 = decltype(tmp1)(tmp1 + tmp2); out_ptr0[static_cast<long>(x0)] = tmp3; } } } ``` For example, the speed-up seen on upsample_nearest2d on cpu: ``` [----------------------------------------------------------------------------------------------------------------------------------------------- Interpolate, cpu ----------------------------------------------------------------------------------------------------------------------------------------------] | Eager (2.3.0a0+gitb4324ed) PR | Compiled (2.3.0a0+gitb4324ed) PR | Compiled (2.3.0a0+git0d1e705) Nightly | speed-up PR vs Nightly | Eager (2.3.0a0+git0d1e705) Nightly 1 threads: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Input (1, 3, 500, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 287.988 (+-10.399) | 200.034 (+-8.630) | 285.143 (+-8.412) | 1.425 (+-0.000) | 287.991 (+-11.302) Input (1, 3, 500, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 697.206 (+-27.033) | 171.650 (+-7.381) | 193.280 (+-5.840) | 1.126 (+-0.000) | 701.642 (+-26.461) Input (1, 3, 500, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 149.149 (+-6.045) | 222.780 (+-6.852) | 299.968 (+-12.354) | 1.346 (+-0.000) | 145.055 (+-7.232) Input (1, 3, 500, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 596.741 (+-27.970) | 205.923 (+-8.648) | 233.912 (+-7.742) | 1.136 (+-0.000) | 598.000 (+-25.630) Input (4, 3, 500, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 1095.734 (+-51.658) | 700.850 (+-24.852) | 1044.255 (+-38.216) | 1.490 (+-0.000) | 1097.977 (+-35.521) Input (4, 3, 500, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 2741.813 (+-122.917) | 583.073 (+-16.998) | 665.029 (+-36.331) | 1.141 (+-0.000) | 2722.388 (+-116.263) Input (4, 3, 500, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 578.183 (+-37.266) | 833.295 (+-42.264) | 1131.341 (+-54.710) | 1.358 (+-0.000) | 584.953 (+-45.549) Input (4, 3, 500, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 2332.508 (+-103.556) | 840.194 (+-47.664) | 935.625 (+-47.467) | 1.114 (+-0.000) | 2334.314 (+-91.644) Input (1, 3, 1200, 1300), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 272.631 (+-11.348) | 195.988 (+-5.748) | 274.021 (+-9.475) | 1.398 (+-0.000) | 272.752 (+-12.716) Input (1, 3, 1200, 1300), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 640.409 (+-25.465) | 164.773 (+-7.372) | 185.018 (+-8.349) | 1.123 (+-0.000) | 639.390 (+-30.761) Input (1, 3, 1200, 1300), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 158.602 (+-6.593) | 220.478 (+-6.809) | 286.376 (+-8.981) | 1.299 (+-0.000) | 158.557 (+-6.143) Input (1, 3, 1200, 1300), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 548.903 (+-22.889) | 202.788 (+-9.158) | 227.404 (+-8.995) | 1.121 (+-0.000) | 554.096 (+-21.330) Input (4, 3, 1200, 1300), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 1036.061 (+-35.285) | 680.728 (+-30.925) | 986.254 (+-42.732) | 1.449 (+-0.000) | 1038.718 (+-43.070) Input (4, 3, 1200, 1300), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 2504.520 (+-125.805) | 550.067 (+-21.383) | 628.000 (+-27.589) | 1.142 (+-0.000) | 2523.134 (+-113.336) Input (4, 3, 1200, 1300), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 1058.188 (+-57.853) | 1216.427 (+-76.160) | 1380.231 (+-98.939) | 1.135 (+-0.000) | 1057.031 (+-66.075) Input (4, 3, 1200, 1300), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 2305.911 (+-116.864) | 1080.189 (+-79.934) | 1141.561 (+-67.959) | 1.057 (+-0.000) | 2306.606 (+-121.544) Input (1, 3, 300, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 1689.489 (+-60.579) | 1077.401 (+-44.948) | 1634.264 (+-64.340) | 1.517 (+-0.000) | 1693.945 (+-67.998) Input (1, 3, 300, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 4198.368 (+-179.096) | 886.656 (+-30.355) | 1028.568 (+-46.310) | 1.160 (+-0.000) | 4174.351 (+-141.020) Input (1, 3, 300, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 716.572 (+-51.954) | 1175.864 (+-52.191) | 1674.373 (+-51.815) | 1.424 (+-0.000) | 715.724 (+-41.104) Input (1, 3, 300, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 3604.989 (+-132.489) | 1096.933 (+-54.290) | 1270.347 (+-60.932) | 1.158 (+-0.000) | 3601.864 (+-140.218) Input (4, 3, 300, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 6721.610 (+-355.997) | 4203.213 (+-134.362) | 6423.763 (+-225.311) | 1.528 (+-0.000) | 6715.626 (+-288.233) Input (4, 3, 300, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 16695.467 (+-709.620) | 3460.013 (+-149.456) | 4001.810 (+-218.093) | 1.157 (+-0.000) | 16621.138 (+-713.320) Input (4, 3, 300, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 3020.017 (+-147.314) | 4743.164 (+-135.850) | 6709.494 (+-281.025) | 1.415 (+-0.000) | 3015.602 (+-105.852) Input (4, 3, 300, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 14456.688 (+-752.839) | 5150.893 (+-201.571) | 5737.315 (+-138.011) | 1.114 (+-0.000) | 14464.472 (+-720.027) Times are in microseconds (us). ``` ## PR This PR improves arange decomp such that `arange(s, dtype=torch.float32)` removing extra dtype conversion to double: Code: ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s, dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on this PR: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:15 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) mul: "i64[10]" = torch.ops.aten.mul.Tensor(iota, 1); iota = None add: "i64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:16 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add_1,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:16 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) mul: "i64[10]" = torch.ops.aten.mul.Tensor(iota, 1); iota = None add: "i64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:17 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add_1,) ``` and C++ on this PR: ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = static_cast<float>(10.0); auto tmp3 = decltype(tmp1)(tmp1 + tmp2); out_ptr0[static_cast<long>(x0)] = tmp3; } } } ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/121013 Approved by: https://github.com/peterbell10 | diff --git a/test/test_decomp.py b/test/test_decomp.py
index 4e482a92d5..39d0c2eef2 100644
--- a/test/test_decomp.py
+++ b/test/test_decomp.py
@@ -38,6 +38,7 @@ from torch._ops import DispatchKey
import itertools
import functools
from functools import partial
+import re
import unittest
aten = torch.ops.aten
@@ -630,6 +631,45 @@ class TestDecomp(TestCase):
res = torch._decomp.decompositions.native_batch_norm(input, weight, bias, mean, var, False, 1, 1e-05)
self.assertEqual(shape, res[0].shape)
+ def test_arange_graph(self, device):
+ from torch.fx.experimental.proxy_tensor import make_fx
+
+ def func(x, start):
+ le = x.shape[-1]
+ if start is None:
+ a = torch.arange(le, dtype=torch.float32, device=x.device)
+ else:
+ a = torch.arange(start, le, dtype=torch.float32, device=x.device)
+ return a
+
+ pattern = r", device = device\(.+\), requires_grad = False"
+
+ cfunc = make_fx(func, decomposition_table=decomposition_table)
+ fx_g = cfunc(torch.rand(10, device=device), None)
+ fx_g_code = fx_g.code.strip()
+ # Remove device and requires_grad
+ fx_g_code = re.sub(pattern, "", fx_g_code)
+ self.assertExpectedInline(fx_g_code, """\
+def forward(self, x_1, start_1):
+ iota = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64)
+ mul = torch.ops.prims.mul.default(iota, 1); iota = None
+ add = torch.ops.prims.add.default(mul, 0); mul = None
+ convert_element_type = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None
+ return convert_element_type""")
+
+ fx_g = cfunc(torch.rand(10, device=device), 1)
+ fx_g_code = fx_g.code.strip()
+ # Remove device and requires_grad
+ fx_g_code = re.sub(pattern, "", fx_g_code)
+ self.assertExpectedInline(fx_g_code, """\
+def forward(self, x_1, start_1):
+ iota = torch.ops.prims.iota.default(9, start = 0, step = 1, dtype = torch.int64)
+ mul = torch.ops.prims.mul.default(iota, 1); iota = None
+ add = torch.ops.prims.add.default(mul, 1); mul = None
+ convert_element_type = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None
+ return convert_element_type""")
+
+
class DecompCrossRefMode(TorchDispatchMode):
def __init__(self, test_case, saved_precision, saved_rel_tol, dtype, run_all):
self.test_case = test_case
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index 31dbe27c22..0fe4b7261a 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -1830,8 +1830,8 @@ class CppKernel(Kernel):
if cse_var == var:
if is_to_lowp_dtype(expr):
m = re.search(r"tmp\d+", expr)
- assert m
- fp32_cse_var_name = m.group()
+ if m is not None:
+ fp32_cse_var_name = m.group()
if fp32_cse_var_name:
for cse_var in cache.values():
if cse_var.name == fp32_cse_var_name:
diff --git a/torch/_inductor/fx_passes/joint_graph.py b/torch/_inductor/fx_passes/joint_graph.py
index 3be10498f8..df89037067 100644
--- a/torch/_inductor/fx_passes/joint_graph.py
+++ b/torch/_inductor/fx_passes/joint_graph.py
@@ -247,7 +247,7 @@ def constant_fold_uniform_value(gm: torch.fx.GraphModule):
):
torch._check(runtime_size == compile_time_size)
- # zeros, and ones just get traced into full, so we insert those
+ # zeros and ones just get traced into full, so we insert those
new_node = graph.call_function(
aten.full.default,
args=(node_replacements_shapes[node], value),
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index 8e1d50a00c..eeb4ad00d3 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -4932,9 +4932,10 @@ def arange(
lambda: f"step must be finite but got {step}",
)
+ args = (start, end, step)
+ integer_args = builtins.all(isinstance(arg, IntLike) for arg in args)
+
if dtype is None:
- args = (start, end, step)
- integer_args = builtins.all(isinstance(arg, IntLike) for arg in args)
dtype = torch.int64 if integer_args else torch.get_default_dtype()
is_integer = utils.is_integer_dtype(dtype)
@@ -4962,7 +4963,6 @@ def arange(
requires_grad=requires_grad,
)
- computation_dtype = utils.get_acc_type(dtype, device)
index = prims.iota(
length,
start=0,
@@ -4971,6 +4971,10 @@ def arange(
device=device,
requires_grad=False,
)
+
+ computation_dtype = (
+ torch.long if integer_args else utils.get_acc_type(dtype, device)
+ )
index = _maybe_convert_to_dtype(index, computation_dtype)
result = start + step * index
result = _maybe_convert_to_dtype(result, dtype) | 2.41.0 |
798f5bf0d58fb9655c4da9c0a8bc1ec8af31aea | Wed, 10 Apr 2024 23:23:28 -0700 | [PATCH 0009/1000] Add Quantization recipe filter per operator type for x86_inductor_quantizer (#122775) | **Summary** Default recipes are enabled in `X86InductorQuantizer` and request comes to customize recipes based on these defaults. - Avoid annotation propagation and restrict annotation only to annotate `conv`/`linear`. - Add `matmul` in the quantization recipes, noting that it's not a general recipe but tailored to meet accuracy criteria for specific models. To meet these requests, we made changes in this PR by introducing interface as `set_function_type_qconfig` and `set_module_type_qconfig` - `set_function_type_qconfig` accepts functional input as `torch.nn.functional.linear` or `torch.matmul`; `set_module_type_qconfig` accepts nn.Module input as `torch.nn.Conv2d`. - To disable the recipe for this operator, user can simply exclude it from the list of operations as `quantizer.set_function_type_qconfig(op, None)`. - To modify or extend the recipe for this operator with default recipe, user can customize as `quantizer.set_function_type_qconfig(op, config)`. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_conv2d_recipe python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_linear_recipe python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_maxpool2d_recipe ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122775 Approved by: https://github.com/jgong5, https://github.com/jerryzh168 | diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index 06e2e6c9f9..c9df319bfd 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -1346,3 +1346,105 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_list,
is_qat=True,
)
+
+ @skipIfNoX86
+ def test_filter_conv2d_recipe(self):
+ """
+ Test removing conv2d from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.Conv2dUnaryModule(torch.nn.ReLU(inplace=False)).eval()
+ example_inputs = (torch.randn(2, 3, 16, 16),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_module_type_qconfig(torch.nn.Conv2d, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 0,
+ }
+ node_list = [
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.relu.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
+
+ @skipIfNoX86
+ def test_filter_linear_recipe(self):
+ """
+ Test removing linear from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.LinearUnaryModule(
+ use_bias=True,
+ postop=nn.ReLU,
+ ).eval()
+ example_inputs = (torch.randn(2, 4),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_function_type_qconfig(torch.nn.functional.linear, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 0,
+ }
+ node_list = [
+ torch.ops.aten.linear.default,
+ torch.ops.aten.relu.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
+
+ @skipIfNoX86
+ def test_filter_maxpool2d_recipe(self):
+ """
+ Test removing maxpool2d from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.Conv2dUnaryModule(torch.nn.ReLU(inplace=False)).eval()
+ example_inputs = (torch.randn(2, 3, 16, 16),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_function_type_qconfig(torch.nn.functional.max_pool2d, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 1,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 1,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
+ }
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.relu.default,
+ torch.ops.aten.max_pool2d.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
index e83cf1e4da..8889cf2df0 100644
--- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
+++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
@@ -2,8 +2,9 @@ import copy
import functools
import itertools
import operator
+import warnings
from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
+from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple
import torch
import torch.nn.functional as F
@@ -57,10 +58,10 @@ class _X86InductorQuantizationAnnotation(QuantizationAnnotation):
_is_output_of_quantized_pattern: bool = False
-# Operations that:
-# 1. Operations are optimized to run with int8 when int8 input provided.
-# 2. Operations do not support int8 input and produce fp32 output.
-int8_in_int8_out_ops_pt2e: Set = {
+# Operators that:
+# 1. Operators are optimized to run with int8 when int8 input provided.
+# 2. Operators do not support int8 input and produce fp32 output.
+int8_in_int8_out_ops: Set = {
torch.ops.aten.max_pool2d.default,
torch.ops.aten.cat.default,
torch.ops.aten.avg_pool2d.default,
@@ -68,14 +69,53 @@ int8_in_int8_out_ops_pt2e: Set = {
torch.ops.aten.flatten.using_ints,
}
+# Operators that support the int8 data type for quantization config propagation.
+# A superset of int8_in_int8_out_ops incorporating additional operators.
+propagation_quantizable_ops = int8_in_int8_out_ops
-# Operations support the int8 data type and exclude operations such as conv and linear.
-# A superset of int8_in_int8_out_ops_pt2e incorporating additional operators.
-quantizable_ops_pt2e = copy.deepcopy(int8_in_int8_out_ops_pt2e)
+# Operators support the int8 data type
+# and recipe is configured by default in X86InductorQuantizer.
+default_quantizable_ops = propagation_quantizable_ops | {
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.linear.default,
+}
+
+# A superset of default_quantizable_ops includes operators support the int8 data type
+# but not enabled by default recipe of X86InductorQuantizer.
+quantizable_ops = default_quantizable_ops
QUANT_ANNOTATION_KEY = "quantization_annotation"
+def _map_module_function_to_aten_operator_type():
+ module_function_to_aten_operator: Dict[Callable, torch._ops.OpOverloadPacket] = {}
+ map_list = (
+ ([torch.nn.Conv2d, F.conv2d], torch.ops.aten.conv2d.default),
+ ([torch.nn.Linear, F.linear], torch.ops.aten.linear.default),
+ ([torch.nn.MaxPool2d, F.max_pool2d], torch.ops.aten.max_pool2d.default),
+ (
+ [
+ torch.cat,
+ ],
+ torch.ops.aten.cat.default,
+ ),
+ ([torch.nn.AvgPool2d, F.avg_pool2d], torch.ops.aten.avg_pool2d.default),
+ (
+ [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d],
+ torch.ops.aten.adaptive_avg_pool2d.default,
+ ),
+ (
+ [
+ torch.flatten,
+ ],
+ torch.ops.aten.flatten.using_ints,
+ ),
+ )
+ for map_item in map_list:
+ module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload]
+ return module_function_to_aten_operator
+
+
def _mark_nodes_as_annotated(nodes: List[Node]):
for node in nodes:
if node is not None:
@@ -235,11 +275,14 @@ def _get_supported_config_and_operators() -> List[OperatorConfig]:
class X86InductorQuantizer(Quantizer):
supported_config_and_operators = _get_supported_config_and_operators()
+ module_function_to_aten_operator_type = _map_module_function_to_aten_operator_type()
def __init__(self):
super().__init__()
self.global_config: QuantizationConfig = None # type: ignore[assignment]
- self.operator_type_config: Dict[str, Optional[QuantizationConfig]] = {}
+ self.operator_type_qconfig: Dict[
+ torch._ops.OpOverloadPacket, Optional[QuantizationConfig]
+ ] = {}
@classmethod
def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
@@ -267,12 +310,62 @@ class X86InductorQuantizer(Quantizer):
self.global_config = quantization_config
return self
- def set_config_for_operator_type(
- self, operator_type: str, quantization_config: QuantizationConfig
- ):
- self.operator_type_config[operator_type] = quantization_config
+ def set_function_type_qconfig(
+ self,
+ function_type: Callable,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if function_type in X86InductorQuantizer.module_function_to_aten_operator_type:
+ self._set_aten_operator_qconfig(
+ X86InductorQuantizer.module_function_to_aten_operator_type[
+ function_type
+ ],
+ quantization_config,
+ )
+ else:
+ warnings.warn(
+ f"function: Unable to customize quantization config for {function_type} by X86InductorQuantizer."
+ )
+ return self
+
+ def set_module_type_qconfig(
+ self,
+ module_type: torch.nn.Module,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if module_type in X86InductorQuantizer.module_function_to_aten_operator_type:
+ self._set_aten_operator_qconfig(
+ X86InductorQuantizer.module_function_to_aten_operator_type[module_type],
+ quantization_config,
+ )
+ else:
+ warnings.warn(
+ f"Module: Unable to customize quantization config for {module_type} by X86InductorQuantizer."
+ )
+ return self
+
+ def _set_aten_operator_qconfig(
+ self,
+ operator_type: torch._ops.OpOverloadPacket,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if operator_type in quantizable_ops:
+ self.operator_type_qconfig[operator_type] = quantization_config
+ else:
+ warnings.warn(
+ f"operator: Unable to quantize {operator} by X86InductorQuantizer."
+ )
return self
+ def _get_aten_operator_qconfig(
+ self,
+ operator_type: torch._ops.OpOverloadPacket,
+ ) -> Optional[QuantizationConfig]:
+ if operator_type in self.operator_type_qconfig:
+ assert operator_type in quantizable_ops
+ return self.operator_type_qconfig[operator_type]
+ return self.global_config if operator_type in default_quantizable_ops else None
+
def _annotate_conv_node_helper(
self,
conv_node: torch.fx.Node,
@@ -403,36 +496,30 @@ class X86InductorQuantizer(Quantizer):
we need to annotate the output of this pattern.
"""
- config = self.global_config
-
# Step1: Recipe of fusion patterns like conv/linear.
- if config.is_qat:
- # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat
- self._annotate_qat_conv2d_fusion_pattern(model, config)
-
- self._annotate_conv2d_fusion_pattern(model, config)
+ self._annotate_conv2d_fusion_pattern(model)
+ self._annotate_linear_fusion_pattern(model)
# Step2: Recipe to propagate annotation for patterns beside conv/linear.
# Go through all the nodes from start to end.
# Recipe refer to https://github.com/intel/intel-extension-for-pytorch/blob/
# 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L538
for node in model.graph.nodes:
- self._annotation_propagation_quantizable_pattern(node, config)
+ self._annotate_propagation_quantizable_pattern(node)
# Step3: For quantizable ops, such as maxpool2d, we need to quantize its output if it is quantized
# in inputs. So, we can fuse dq-operator-q into a quantized op.
# Refer to https://github.com/intel/intel-extension-for-pytorch/blob/
# 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L487
for node in model.graph.nodes:
- self._annotate_output_for_int8_in_int8_out_pattern(node, config)
+ self._annotate_output_for_int8_in_int8_out_pattern(node)
return model
def _annotate_for_dynamic_quantization_config(
self, model: torch.fx.GraphModule
) -> torch.fx.GraphModule:
- config = self.global_config
- self._annotate_linear(model, config)
+ self._annotate_linear_fusion_pattern(model)
return model
def _annotate_qat_conv2d_fusion_pattern(
@@ -648,15 +735,22 @@ class X86InductorQuantizer(Quantizer):
nodes_to_mark_annotated.extend(list(bn_partition.nodes))
_mark_nodes_as_annotated(nodes_to_mark_annotated)
- def _annotate_conv2d_fusion_pattern(
- self, model: torch.fx.GraphModule, config: QuantizationConfig
- ):
- self._annotate_conv2d_binary_unary(model, config)
- self._annotate_conv2d_binary(model, config)
- self._annotate_conv2d_unary(model, config)
- self._annotate_conv2d(model, config)
- self._annotate_linear_unary(model, config)
- self._annotate_linear(model, config)
+ def _annotate_conv2d_fusion_pattern(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.conv2d.default):
+ if config.is_qat:
+ # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat
+ self._annotate_qat_conv2d_fusion_pattern(model, config)
+ self._annotate_conv2d_binary_unary(model, config)
+ self._annotate_conv2d_binary(model, config)
+ self._annotate_conv2d_unary(model, config)
+ self._annotate_conv2d(model, config)
+
+ def _annotate_linear_fusion_pattern(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.linear.default):
+ if config.input_activation and not config.input_activation.is_dynamic:
+ # <TODO> Weiwen: Dynamic Quant of linear unary will be supported in next step
+ self._annotate_linear_unary(model, config)
+ self._annotate_linear(model, config)
def _annotate_conv2d_binary_unary(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
@@ -851,14 +945,13 @@ class X86InductorQuantizer(Quantizer):
_is_output_of_quantized_pattern=True,
)
- def _annotation_propagation_quantizable_pattern(
- self, node: Node, quantization_config: QuantizationConfig
- ) -> None:
+ def _annotate_propagation_quantizable_pattern(self, node: Node) -> None:
# Propagate annotation to quantizable patterns.
if (
- (node.target in quantizable_ops_pt2e)
+ (node.target in propagation_quantizable_ops)
and (not _is_any_annotated([node]))
and (node.op == "call_function")
+ and (quantization_config := self._get_aten_operator_qconfig(node.target)) # type: ignore[arg-type]
):
def is_all_inputs_connected_to_quantized_op(input_nodes):
@@ -915,16 +1008,18 @@ class X86InductorQuantizer(Quantizer):
)
return
- def _annotate_output_for_int8_in_int8_out_pattern(
- self, node: Node, quantization_config: QuantizationConfig
- ) -> None:
+ def _annotate_output_for_int8_in_int8_out_pattern(self, node: Node) -> None:
r"""
- Check and insert observer at output of node in int8_in_int8_out_ops_pt2e if needed.
+ Check and insert observer at output of node in int8_in_int8_out_ops if needed.
Recipe refers to https://github.com/intel/intel-extension-for-pytorch/blob/
90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_utils.py#L495
"""
edge_or_node: Tuple[Node, Node]
- if (node.target in int8_in_int8_out_ops_pt2e) and (_is_any_annotated([node])):
+ if (
+ (node.target in int8_in_int8_out_ops)
+ and (_is_any_annotated([node]))
+ and (quantization_config := self._get_aten_operator_qconfig(node.target)) # type: ignore[arg-type]
+ ):
if node.target == torch.ops.aten.max_pool2d.default:
maxpool_node = node
if not _is_all_annotated( | 2.41.0 |
8e9261b906f69b397e4027362be801f98a68d62 | Wed, 10 Apr 2024 23:23:28 -0700 | [PATCH 0010/1000] Add Matmul recipe into x86_inductor_quantizer (#122776) | **Summary** Add `matmul` in the quantization recipes, noting that it's not a general recipe but tailored to meet accuracy criteria for specific models. `matmul` recipe is disabled by default. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_attention_block ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122776 Approved by: https://github.com/jgong5, https://github.com/jerryzh168 ghstack dependencies: #122775 | diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index c9df319bfd..4af5a30ddf 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -289,21 +289,42 @@ class TestHelperModules:
return tmp + self.bn2(self.conv2(tmp))
class SelfAttnLikeModule(torch.nn.Module):
- def __init__(self, input_dim) -> None:
+ def __init__(
+ self,
+ input_dim,
+ transpose_for_score=False,
+ num_attention_heads=None,
+ attention_head_size=None,
+ ) -> None:
super().__init__()
self.input_dim = input_dim
self.q_proj = nn.Linear(input_dim, input_dim, bias=False)
self.k_proj = nn.Linear(input_dim, input_dim, bias=False)
self.v_proj = nn.Linear(input_dim, input_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
+ self.transpose_for_score = transpose_for_score
+ if self.transpose_for_score:
+ assert num_attention_heads is not None
+ assert attention_head_size is not None
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_size = attention_head_size
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
def forward(self, x):
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
- scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim ** 0.5)
+ if self.transpose_for_score:
+ q = self.transpose_for_scores(q)
+ k = self.transpose_for_scores(k)
+ v = self.transpose_for_scores(v)
+ scores = torch.matmul(q, k.transpose(-1, -2)) / (self.input_dim ** 0.5)
attention = self.softmax(scores)
- weighted = torch.bmm(attention, v)
+ weighted = torch.matmul(attention, v)
return weighted
class X86InductorQuantTestCase(QuantizationTestCase):
@@ -1448,3 +1469,68 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_occurrence,
node_list,
)
+
+ @skipIfNoX86
+ def test_attention_block(self):
+ """
+ Test pattern of Attention like Block with X86InductorQuantizer.
+ """
+ for annotate_matmul in [False, True]:
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.SelfAttnLikeModule(
+ input_dim=64 * 16,
+ transpose_for_score=True,
+ num_attention_heads=16,
+ attention_head_size=64,
+ ).eval()
+ example_inputs = (torch.randn(2, 384, 1024),)
+
+ m(*example_inputs)
+
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+
+ if annotate_matmul:
+ quantizer.set_function_type_qconfig(torch.matmul, quantizer.get_global_quantization_config())
+
+ node_occurrence = {
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 5 if annotate_matmul else 1,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 7 if annotate_matmul else 3,
+ # quantize_per_channel for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 3,
+ }
+ if annotate_matmul:
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
+ torch.ops.aten.linear.default,
+ torch.ops.aten.view.default,
+ torch.ops.aten.permute.default,
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.aten.matmul.default,
+ torch.ops.aten.div.Tensor,
+ torch.ops.aten.softmax.int,
+ ]
+ else:
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
+ torch.ops.aten.linear.default,
+ torch.ops.aten.view.default,
+ torch.ops.aten.permute.default,
+ torch.ops.aten.matmul.default,
+ torch.ops.aten.div.Tensor,
+ torch.ops.aten.softmax.int,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
index 8889cf2df0..226d722357 100644
--- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
+++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
@@ -82,7 +82,9 @@ default_quantizable_ops = propagation_quantizable_ops | {
# A superset of default_quantizable_ops includes operators support the int8 data type
# but not enabled by default recipe of X86InductorQuantizer.
-quantizable_ops = default_quantizable_ops
+quantizable_ops = default_quantizable_ops | {
+ torch.ops.aten.matmul.default,
+}
QUANT_ANNOTATION_KEY = "quantization_annotation"
@@ -110,6 +112,12 @@ def _map_module_function_to_aten_operator_type():
],
torch.ops.aten.flatten.using_ints,
),
+ (
+ [
+ torch.matmul,
+ ],
+ torch.ops.aten.matmul.default,
+ ),
)
for map_item in map_list:
module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload]
@@ -310,6 +318,14 @@ class X86InductorQuantizer(Quantizer):
self.global_config = quantization_config
return self
+ def get_global_quantization_config(self):
+ if not isinstance(self.global_config, QuantizationConfig):
+ warnings.warn(
+ "The global_config for X86InductorQuantizer is currently invalid. \
+ Please ensure that you use set_global to establish the global quantization configuration."
+ )
+ return self.global_config
+
def set_function_type_qconfig(
self,
function_type: Callable,
@@ -499,6 +515,7 @@ class X86InductorQuantizer(Quantizer):
# Step1: Recipe of fusion patterns like conv/linear.
self._annotate_conv2d_fusion_pattern(model)
self._annotate_linear_fusion_pattern(model)
+ self._annotate_matmul(model)
# Step2: Recipe to propagate annotation for patterns beside conv/linear.
# Go through all the nodes from start to end.
@@ -752,6 +769,24 @@ class X86InductorQuantizer(Quantizer):
self._annotate_linear_unary(model, config)
self._annotate_linear(model, config)
+ def _annotate_matmul(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.matmul.default):
+ for node in model.graph.nodes:
+ if node.target == torch.ops.aten.matmul.default and not _is_annotated(
+ [node]
+ ):
+ input_qspec_map = {}
+ matmul_node = node
+ for input_node in matmul_node.args:
+ input_qspec_map[input_node] = get_input_act_qspec(config)
+ matmul_node.meta[
+ QUANT_ANNOTATION_KEY
+ ] = _X86InductorQuantizationAnnotation(
+ input_qspec_map=input_qspec_map,
+ _annotated=True,
+ _is_output_of_quantized_pattern=True,
+ )
+
def _annotate_conv2d_binary_unary(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
) -> None: | 2.41.0 |
4580f76d9e4a81b70a94062b762e3af919d95d0 | Wed, 10 Apr 2024 21:38:33 -0700 | [PATCH 0011/1000] fix flop counter issue with out parameters (#123768) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123768 Approved by: https://github.com/zou3519 | diff --git a/test/test_flop_counter.py b/test/test_flop_counter.py
index 74bc666db6..1a9a757f9f 100644
--- a/test/test_flop_counter.py
+++ b/test/test_flop_counter.py
@@ -248,8 +248,8 @@ class TestFlopCounter(TestCase):
self.assertExpectedInline(get_total_flops(mode), """5""")
- def count(*args, out):
- return out.numel()
+ def count(*args, out_val):
+ return out_val.numel()
count._get_raw = True
mode = FlopCounterMode(custom_mapping={torch.ops.aten.add: count})
@@ -328,6 +328,17 @@ class TestFlopCounter(TestCase):
self.assertExpectedInline(str(flops_fw_bw_math), """805306368""")
self.assertExpectedInline(str(flops_fw_bw_efficient), """939524096""")
+ def test_addmm_out(self):
+ def f(x):
+ y = torch.zeros(10, 10)
+ return torch.mm(x, x, out=y)
+
+ mode = FlopCounterMode()
+ with mode:
+ f(torch.randn(10, 10))
+
+ self.assertExpectedInline(get_total_flops(mode), """2000""")
+
def test_hook_registration(self):
model = torch.nn.Linear(100, 100)
x = torch.randn(3, 100)
diff --git a/torch/utils/flop_counter.py b/torch/utils/flop_counter.py
index c76a9a2432..fcad5d1fd3 100644
--- a/torch/utils/flop_counter.py
+++ b/torch/utils/flop_counter.py
@@ -24,8 +24,8 @@ flop_registry: Dict[Any, Any] = {}
def shape_wrapper(f):
@wraps(f)
- def nf(*args, out=None, **kwargs):
- args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out))
+ def nf(*args, out_val=None, **kwargs):
+ args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out_val))
return f(*args, out_shape=out_shape, **kwargs)
return nf
@@ -542,7 +542,7 @@ class FlopCounterMode(TorchDispatchMode):
func_packet = func._overloadpacket
if func_packet in self.flop_registry:
flop_count_func = self.flop_registry[func_packet]
- flop_count = flop_count_func(*args, **kwargs, out=out) # type: ignore[operator]
+ flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator]
if len(set(self.parents)) != len(self.parents):
print(
"The module hierarchy tracking seems to be messed up." | 2.41.0 |
a5e7a01b5368b8ba11edcb62942630a1474e6e3 | Wed, 10 Apr 2024 11:02:32 -0700 | [PATCH 0015/1000] [custom_op] Schema inference now includes default values (#123453) | If the function has default values, we should be able to do schema inference and put the default values into the schema. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123453 Approved by: https://github.com/albanD | diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 7479225785..10cb60e8ae 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -688,20 +688,6 @@ class TestCustomOp(CustomOpTestCaseBase):
infer_schema(foo)
- with self.assertRaisesRegex(ValueError, "default value"):
-
- def foo(x: Optional[Tensor] = None):
- raise NotImplementedError()
-
- infer_schema(foo)
-
- with self.assertRaisesRegex(ValueError, "default value"):
-
- def foo(x: Optional[Tensor] = None):
- raise NotImplementedError()
-
- infer_schema(foo)
-
with self.assertRaisesRegex(ValueError, "unsupported"):
def foo(x: Tensor) -> Tuple[Tensor, ...]:
@@ -2151,6 +2137,25 @@ class TestCustomOpAPI(TestCase):
self.assertEqual(z, x + y)
self.assertTrue(cpu_called)
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ def test_default_values(self):
+ defaults = []
+
+ @torch.library.custom_op("_torch_testing::f", mutates_args=())
+ def f(
+ x: Tensor,
+ a: Optional[int] = None,
+ b: float = 3.14,
+ c: bool = True,
+ d: int = 3,
+ ) -> Tensor:
+ defaults.extend([a, b, c, d])
+ return x.clone()
+
+ x = torch.randn(3)
+ f(x)
+ self.assertEqual(defaults, [None, 3.14, True, 3])
+
def test_mutated_error(self):
with self.assertRaisesRegex(
ValueError, r".*{'y'} in mutates_args were not found"
diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py
index b4ea032380..fefd7cedf9 100644
--- a/torch/_custom_op/impl.py
+++ b/torch/_custom_op/impl.py
@@ -801,19 +801,22 @@ def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str:
f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}."
)
- if param.default is not inspect.Parameter.empty:
- error_fn(
- f"Parameter {name} has a default value; this is not supported. "
- f"If you want to use default values then create a function with "
- f"default values that invokes the custom op."
- )
schema_type = SUPPORTED_PARAM_TYPES[param.annotation]
if name in mutates_args:
if not schema_type.startswith("Tensor"):
error_fn(f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated")
schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}"
seen_args.add(name)
- params.append(f"{schema_type} {name}")
+ if param.default is inspect.Parameter.empty:
+ params.append(f"{schema_type} {name}")
+ else:
+ if param.default is not None and not isinstance(param.default, (int, float, bool)):
+ error_fn(
+ f"Parameter {name} has an unsupported default value (we only support "
+ f"int, float, bool, None). Please file an issue on GitHub so we can "
+ f"prioritize this."
+ )
+ params.append(f"{schema_type} {name}={param.default}")
mutates_args_not_seen = set(mutates_args) - seen_args
if len(mutates_args_not_seen) > 0:
error_fn(f"{mutates_args_not_seen} in mutates_args were not found in " | 2.41.0 |
b4419dc4d9a4e5555de2a4def0eb77f10c8832a | Wed, 10 Apr 2024 11:02:32 -0700 | [PATCH 0016/1000] Refresh OpOverloadPacket if a new OpOverload gets added (#123578) | If a user accesses an OpOverloadPacket, then creates a new OpOverload, then uses the OpOverloadPacket, the new OpOverload never gets hit. This is because OpOverloadPacket caches OpOverloads when it is constructed. This PR fixes the problem by "refreshing" the OpOverloadPacket if a new OpOverload gets constructed and the OpOverloadPacket exists. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123578 Approved by: https://github.com/albanD ghstack dependencies: #123453 | diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 10cb60e8ae..86c21f228d 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -2393,6 +2393,30 @@ Please use `add.register_fake` to add an fake impl.""",
y = f(x)
self.assertEqual(y, x.sin())
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ def test_overloading(self):
+ called_f = 0
+ called_f1 = 0
+
+ @torch.library.custom_op("_torch_testing::f", mutates_args=())
+ def f(x: Tensor) -> Tensor:
+ nonlocal called_f
+ called_f += 1
+ return x.clone()
+
+ x = torch.randn(2, 3)
+ torch.ops._torch_testing.f(x)
+ self.assertEqual(called_f, 1)
+
+ @torch.library.custom_op("_torch_testing::f.overload", mutates_args=())
+ def f1(x: Tensor, y: Tensor) -> Tensor:
+ nonlocal called_f1
+ called_f1 += 1
+ return x.clone()
+
+ torch.ops._torch_testing.f(x, x)
+ self.assertEqual(called_f1, 1)
+
def test_disallows_output_aliasing(self):
@torch.library.custom_op("_torch_testing::f", mutates_args=())
def f(x: Tensor) -> Tensor:
diff --git a/torch/_ops.py b/torch/_ops.py
index 08abfecb5a..7b081e1360 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -931,8 +931,10 @@ class _OpNamespace(types.ModuleType):
# for overloads and raise an exception if there are more than one.
namespace_name = self.name
qualified_op_name = f"{namespace_name}::{op_name}"
+ op_module = self.__module__ + "." + namespace_name
+
try:
- op, overload_names = torch._C._jit_get_operation(qualified_op_name)
+ op, overload_names = _get_packet(qualified_op_name, op_module)
if op is None:
raise AttributeError(
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
@@ -944,10 +946,6 @@ class _OpNamespace(types.ModuleType):
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
) from e
- # let the script frontend know that op is identical to the builtin op
- # with qualified_op_name
- torch.jit._builtins._register_builtin(op, qualified_op_name)
- op.__module__ = self.__module__ + "." + namespace_name
opoverloadpacket = OpOverloadPacket(
qualified_op_name, op_name, op, overload_names
)
@@ -959,6 +957,22 @@ class _OpNamespace(types.ModuleType):
return opoverloadpacket
+def _get_packet(qualname, op_module):
+ op, overload_names = torch._C._jit_get_operation(qualname)
+ if op is not None:
+ op.__module__ = op_module
+ # let the script frontend know that op is identical to the builtin op
+ # with qualified_op_name
+ torch.jit._builtins._register_builtin(op, qualname)
+ return op, overload_names
+
+
+def _refresh_packet(packet):
+ op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
+ packet._op = op
+ packet._overload_names = overload_names
+
+
class _PyOpNamespace(_OpNamespace):
def __init__(self, name, ops):
super().__init__(name)
diff --git a/torch/library.py b/torch/library.py
index 88c72047ed..a7488c81d6 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -107,7 +107,18 @@ class Library:
if isinstance(tags, torch.Tag):
tags = (tags,)
result = self.m.define(schema, alias_analysis, tuple(tags))
- qualname = self.ns + "::" + schema.split("(")[0]
+ name = schema.split("(")[0]
+ qualname = self.ns + "::" + name
+
+ # If the OpOverloadPacket exists already, then this means we're adding a
+ # new OpOverload for it. Refresh the packet to include the new OpOverload.
+ packet_name = name.split(".")[0] if "." in name else name
+ if hasattr(torch.ops, self.ns):
+ ns = getattr(torch.ops, self.ns)
+ if hasattr(ns, packet_name):
+ packet = getattr(ns, packet_name)
+ torch._ops._refresh_packet(packet)
+
self._op_defs.add(qualname)
_defs.add(qualname)
return result | 2.41.0 |
38729c0cdf3ce4274f4d68f8e46e5a1cd36cbe8 | Wed, 10 Apr 2024 11:02:33 -0700 | [PATCH 0017/1000] Switch quantized_decomposed over to new custom ops API (#123454) | We are taking API feedback. Changes: - I removed some of the default values (they weren't being used). - I was unable to convert the last op (which is essentially an autograd.Function registered as CompositeImplicitAutograd). That one is "incorrectly registered"; I punt fixing it to the future. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123454 Approved by: https://github.com/andrewor14 ghstack dependencies: #123453, #123578 | diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py
index fefd7cedf9..6f25e2b9af 100644
--- a/torch/_custom_op/impl.py
+++ b/torch/_custom_op/impl.py
@@ -882,6 +882,11 @@ SUPPORTED_RETURN_TYPES = {
def parse_return(annotation, error_fn):
+ if annotation == inspect.Signature.empty:
+ error_fn(
+ "There was no return annotation. Please add one."
+ )
+
if annotation is None:
return "()"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 18dd61c37c..67f7b3f509 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -4,11 +4,11 @@ from typing import Optional, Tuple
import torch
from torch._refs import _unsqueeze_multiple
from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
-from torch.library import impl, Library
+from torch.library import custom_op, Library, impl
# Note: decomposed means decomposed quantized tensor, using decomposed so that the
# name is not too long
-quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
+ns = "quantized_decomposed"
_DTYPE_TO_QVALUE_BOUNDS = {
torch.uint8: (0, 255),
@@ -31,11 +31,8 @@ def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
"quant_max out of bound for dtype, " \
f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
-quantized_decomposed_lib.define(
- "quantize_per_tensor(Tensor input, float scale, int zero_point, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor", mutates_args=())
def quantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -67,8 +64,8 @@ def quantize_per_tensor(
inv_scale = 1.0 / scale
return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor", "Meta")
-def quantize_per_tensor_meta(
+@quantize_per_tensor.register_fake
+def _(
input: torch.Tensor,
scale: float,
zero_point: int,
@@ -81,11 +78,7 @@ def quantize_per_tensor_meta(
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor.tensor", mutates_args=())
def quantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -103,7 +96,7 @@ def quantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
+@quantize_per_tensor_tensor.register_fake
def quantize_per_tensor_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -120,11 +113,7 @@ def quantize_per_tensor_tensor_meta(
return torch.empty_like(input, dtype=dtype)
# TODO: remove other variants and keep this one
-quantized_decomposed_lib.define(
- "quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
- "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor.tensor2", mutates_args=())
def quantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -142,8 +131,8 @@ def quantize_per_tensor_tensor2(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta")
-def quantize_per_tensor_tensor2_meta(
+@quantize_per_tensor_tensor2.register_fake
+def _(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -157,11 +146,7 @@ def quantize_per_tensor_tensor2_meta(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-quantized_decomposed_lib.define(
- "dequantize_per_tensor(Tensor input, float scale, int zero_point, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor", mutates_args=())
def dequantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -209,7 +194,7 @@ def dequantize_per_tensor(
else:
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
-@impl(quantized_decomposed_lib, "dequantize_per_tensor", "Meta")
+@dequantize_per_tensor.register_fake
def dequantize_per_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -224,11 +209,7 @@ def dequantize_per_tensor_meta(
out_dtype = torch.float32
return torch.empty_like(input, dtype=out_dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor.tensor", mutates_args=())
def dequantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -248,8 +229,8 @@ def dequantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
-def dequantize_per_tensor_tensor_meta(
+@dequantize_per_tensor_tensor.register_fake
+def dequantize_per_tensor_tensor_fake(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -270,11 +251,7 @@ def dequantize_per_tensor_tensor_meta(
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
# TODO: remove other variants and keep this one
-quantized_decomposed_lib.define(
- "dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
- "Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor.tensor2", mutates_args=())
def dequantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -295,8 +272,8 @@ def dequantize_per_tensor_tensor2(
return dequantize_per_tensor(
input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta")
-def dequantize_per_tensor_tensor2_meta(
+@dequantize_per_tensor_tensor2.register_fake
+def _(
input,
scale,
zero_point,
@@ -306,13 +283,9 @@ def dequantize_per_tensor_tensor2_meta(
*,
out_dtype: Optional[torch.dtype] = None
) -> torch.Tensor:
- return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
-
-quantized_decomposed_lib.define(
- "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
- "float eps, ScalarType dtype) -> (Tensor, Tensor)")
+ return dequantize_per_tensor_tensor_fake(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::choose_qparams.tensor", mutates_args=())
def choose_qparams_tensor(
input: torch.Tensor,
qmin: int,
@@ -347,11 +320,7 @@ def choose_qparams_tensor(
return determine_qparams(
min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)
-quantized_decomposed_lib.define(
- "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
- "float eps, ScalarType dtype) -> (Tensor, Tensor)")
-
-@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::choose_qparams_symmetric.tensor", mutates_args=())
def choose_qparams_symmetric_tensor(
input: torch.Tensor,
qmin: int,
@@ -393,8 +362,8 @@ def choose_qparams_symmetric_tensor(
qscheme=torch.per_tensor_symmetric
)
-@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
-def choose_qparams_tensor_meta(
+@choose_qparams_tensor.register_fake
+def _(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -410,8 +379,8 @@ def choose_qparams_tensor_meta(
{quant_min} max: {quant_max}"
return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
-@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
-def choose_qparams_symmetric_tensor_meta(
+@choose_qparams_symmetric_tensor.register_fake
+def _(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -428,11 +397,7 @@ def _permute_to_axis_zero(x, axis):
y = x.permute(tuple(new_axis_list))
return y, new_axis_list
-quantized_decomposed_lib.define(
- "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_channel", mutates_args=())
def quantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -477,7 +442,7 @@ def quantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out.to(dtype)
-@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
+@quantize_per_channel.register_fake
def quantize_per_channel_meta(
input: torch.Tensor,
scales: torch.Tensor,
@@ -498,11 +463,7 @@ def quantize_per_channel_meta(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-quantized_decomposed_lib.define(
- "dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_channel", mutates_args=())
def dequantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -560,8 +521,8 @@ def dequantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out
-@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
-def dequantize_per_channel_meta(
+@dequantize_per_channel.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: Optional[torch.Tensor],
@@ -580,16 +541,7 @@ def dequantize_per_channel_meta(
return torch.empty_like(input, dtype=out_dtype)
-quantized_decomposed_lib.define(
- "choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::choose_qparams_per_token", mutates_args=())
def choose_qparams_per_token(
input: torch.Tensor,
dtype: torch.dtype,
@@ -623,12 +575,8 @@ def choose_qparams_per_token(
return scales, zero_points
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token",
- "Meta",
-)
-def choose_qparams_per_token_meta(
+@choose_qparams_per_token.register_fake
+def _(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -639,16 +587,7 @@ def choose_qparams_per_token_meta(
# TODO: move this to https://github.com/pytorch/pytorch/blob/main/torch/ao/quantization/fx/_decomposed.py
-quantized_decomposed_lib.define(
- "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::choose_qparams_per_token_asymmetric", mutates_args=())
def choose_qparams_per_token_asymmetric(
input: torch.Tensor,
dtype: torch.dtype,
@@ -691,12 +630,8 @@ def choose_qparams_per_token_asymmetric(
return scale.to(torch.float32), zero_point.to(torch.float32)
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
- "Meta",
-)
-def choose_qparams_per_token_asymmetric_meta(
+@choose_qparams_per_token_asymmetric.register_fake
+def _(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -716,13 +651,7 @@ def _per_token_quant_qparam_dim_check(input, scales, zero_points):
), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}"
-quantized_decomposed_lib.define(
- "quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor"
-)
-
-
-@impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_token", mutates_args=())
def quantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -730,7 +659,7 @@ def quantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
-):
+) -> torch.Tensor:
"""Per token quantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -757,8 +686,8 @@ def quantize_per_token(
return input
-@impl(quantized_decomposed_lib, "quantize_per_token", "Meta")
-def quantize_per_token_meta(
+@quantize_per_token.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -770,13 +699,7 @@ def quantize_per_token_meta(
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
- "int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
-)
-
-
-@impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_token", mutates_args=())
def dequantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -784,8 +707,8 @@ def dequantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- output_dtype: torch.dtype = torch.float32,
-):
+ output_dtype: torch.dtype,
+) -> torch.Tensor:
"""Per token dequantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -809,8 +732,8 @@ def dequantize_per_token(
return input
-@impl(quantized_decomposed_lib, "dequantize_per_token", "Meta")
-def dequantize_per_token_meta(
+@dequantize_per_token.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -824,16 +747,7 @@ def dequantize_per_token_meta(
return torch.empty_like(input, dtype=output_dtype)
-quantized_decomposed_lib.define(
- "quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, "
- "int quant_max, ScalarType dtype, int group_size) -> Tensor"
-)
-
-
-# TODO: dtype is ignored for now
-@impl(
- quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd"
-)
+@custom_op(f"{ns}::quantize_per_channel_group", mutates_args=())
def quantize_per_channel_group(
input: torch.Tensor,
scales: torch.Tensor,
@@ -841,8 +755,8 @@ def quantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size=128,
-):
+ group_size: int,
+) -> torch.Tensor:
assert group_size > 1
# needed for GPTQ single column quantize
if group_size > input.shape[-1] and scales.shape[-1] == 1:
@@ -870,16 +784,16 @@ def quantize_per_channel_group(
return input_int8
-@impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta")
-def quantize_per_channel_group_meta(
+@quantize_per_channel_group.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size=128,
-):
+ group_size,
+) -> torch.Tensor:
"""Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -908,17 +822,7 @@ def quantize_per_channel_group_meta(
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, "
- "int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "dequantize_per_channel_group",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::dequantize_per_channel_group", mutates_args=())
def dequantize_per_channel_group(
w_int8: torch.Tensor,
scales: torch.Tensor,
@@ -926,9 +830,9 @@ def dequantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size: int = 128,
- output_dtype: torch.dtype = torch.float32,
-):
+ group_size: int,
+ output_dtype: torch.dtype,
+) -> torch.Tensor:
"""Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -965,6 +869,10 @@ def dequantize_per_channel_group(
return w_dq
+quantized_decomposed_lib = Library(ns, "DEF")
+
+# TODO: Migrate this to the new torch.library.custom_ops API. This requires a refactor
+# of the autograd.Function. We leave this work to the future.
quantized_decomposed_lib.define(
"fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
"int quant_min, int quant_max) -> Tensor") | 2.41.0 |
34e56fa3352aefa208b33b0a86aaabed8033f7a | Wed, 10 Apr 2024 15:10:59 -0700 | [PATCH 0018/1000] inductor: log unique id to match output_code to aot graphs (#118647) | I found it helpful to be able to see, given some inductor output code, which AOT graph it came from. When you have large models with multiple graphs floating around this can be difficult, so I added the aot_config.aot_id to the printed inductor output. Pull Request resolved: https://github.com/pytorch/pytorch/pull/118647 Approved by: https://github.com/ezyang | diff --git a/torch/_functorch/_aot_autograd/logging_utils.py b/torch/_functorch/_aot_autograd/logging_utils.py
index 28f82555ac..414166cbdd 100644
--- a/torch/_functorch/_aot_autograd/logging_utils.py
+++ b/torch/_functorch/_aot_autograd/logging_utils.py
@@ -46,12 +46,22 @@ def track_graph_compiling(aot_config, graph_name):
global graph_being_compiled
# TODO: Don't shove the aot_id in here; set it in the context
graph_being_compiled = [f"{aot_config.aot_id}_{graph_name}"]
+ old_name = None
+ if tracing_context := torch._guards.TracingContext.try_get():
+ old_name = tracing_context.aot_graph_name
+ tracing_context.aot_graph_name = graph_being_compiled
+ has_tracing_context = True
+ else:
+ has_tracing_context = False
try:
yield
finally:
global nth_graph
nth_graph += 1
graph_being_compiled = []
+ if has_tracing_context:
+ if tracing_context := torch._guards.TracingContext.try_get():
+ tracing_context.aot_graph_name = old_name
# Set up hooks so that during backward the fx's stack_trace is properly set
diff --git a/torch/_guards.py b/torch/_guards.py
index 09ed4a85b3..5f4c6d9941 100644
--- a/torch/_guards.py
+++ b/torch/_guards.py
@@ -615,6 +615,8 @@ class TracingContext:
self.loc_in_frame = None
# this is only set after aot_autograd
self.fw_metadata = None
+ # this is only set after aot_autograd
+ self.aot_graph_name = None
self.params_flat = None
# this is for extended return calling convention from backend
# compiler to aot_autograd
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 1098112680..3ffab72a19 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -473,8 +473,13 @@ class WrapperCodeGen(CodeGen):
self.header.writeline(f"{name} = None # {hashed}")
def write_header(self) -> None:
+ context = torch._guards.TracingContext.try_get()
+ aot_config_comment = ""
+ if context is not None and context.aot_graph_name is not None:
+ aot_config_comment = f"# AOT ID: {context.aot_graph_name}"
self.header.splice(
f"""
+ {aot_config_comment}
from ctypes import c_void_p, c_long
import torch
import math | 2.41.0 |
83900887f2fb5c7a04e7fd78ad8de7a20f356d4 | Wed, 10 Apr 2024 14:19:07 -0700 | [PATCH 0019/1000] [quant] Enable backward for choose_qparams_per_token_asymmetric (#123452) | Summary: When running the backward for this op, we get the error: ``` RuntimeError: derivative for aten::aminmax is not implemented ``` This commit replaces this call with separate amin and amax calls instead, which do have implemented derivatives. Test Plan: python test/test_quantization.py -k test_decomposed_choose_qparams_per_token_asymmetric_backward Reviewers: jerryzh168, digantdesai Subscribers: jerryzh168, digantdesai, supriyar Differential Revision: [D55805170](https://our.internmc.facebook.com/intern/diff/D55805170) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123452 Approved by: https://github.com/digantdesai, https://github.com/jerryzh168 | diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py
index b2bd97bdc3..228f1f8ee7 100644
--- a/test/quantization/core/test_quantized_tensor.py
+++ b/test/quantization/core/test_quantized_tensor.py
@@ -1602,6 +1602,14 @@ class TestQuantizedTensor(TestCase):
self.assertEqual(quantized_X.int_repr(), quantized_decomposed_X)
self.assertEqual(dequantized_X, dequantized_decomposed_X)
+ def test_decomposed_choose_qparams_per_token_asymmetric_backward(self):
+ # register the ops
+ import torch.ao.quantization.fx._decomposed
+ x = torch.randn(2, 3).requires_grad_()
+ (s, zp) = torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric(x, torch.int8)
+ out = x.div(s).add(zp).round()
+ out.sum().backward()
+
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 67f7b3f509..94fdd8a6f5 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -606,7 +606,8 @@ def choose_qparams_per_token_asymmetric(
"""
# Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
qmin, qmax = -128, 127
- min_val, max_val = torch.aminmax(input, dim=-1, keepdim=True)
+ min_val = torch.amin(input, dim=-1, keepdim=True)
+ max_val = torch.amax(input, dim=-1, keepdim=True)
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
eps = torch.finfo(torch.float32).eps # use xnnpack eps? | 2.41.0 |
fa36ef09210b67022439b49eee01d7b63bd6d96 | Wed, 10 Apr 2024 19:31:01 -0400 | [PATCH 0020/1000] Natively support int truncation, don't guard on positive/negative (#122827) | This doesn't entirely fix the original problem that prompted this, but it seems to just be getting stuck in export constraint formatting now which seems like progress to me. Signed-off-by: Edward Z. Yang <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/122827 Approved by: https://github.com/avikchaudhuri | diff --git a/test/export/test_export.py b/test/export/test_export.py
index 80a6f0b993..d04ab18384 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -1086,6 +1086,36 @@ class TestExport(TestCase):
inps = (torch.ones(6, 4), torch.tensor(5), torch.tensor(4))
self._test_export_same_as_eager(list_tensor_map, inps)
+ @unittest.expectedFailure
+ def test_crop_like(self):
+ # https://fb.workplace.com/groups/1405155842844877/posts/8195050017188725/
+
+ # Minimal crop code copied from https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/functional
+ class CropLike(torch.nn.Module):
+ def forward(self, image, crop_height, crop_width):
+ c, image_height, image_width = image.shape
+ crop_top = int(round((image_height - crop_height) / 2.0))
+ crop_left = int(round((image_width - crop_width) / 2.0))
+ return image[
+ ...,
+ crop_top : crop_top + crop_height,
+ crop_left : crop_left + crop_width,
+ ]
+
+ crop = CropLike()
+ imagew = Dim("width")
+ imageh = Dim("height")
+ dynamic_dims = {
+ "image": {0: None, 1: imageh, 2: imagew},
+ "crop_height": None,
+ "crop_width": None,
+ }
+ args = (torch.rand(3, 512, 512), 150, 150)
+ ecrop = export(crop, args=args, dynamic_shapes=dynamic_dims)
+
+ args = (torch.rand(3, 700, 700), 150, 150)
+ self.assertEqual(ecrop.module()(*args), ecrop(*args))
+
def test_export_func_with_kwargs(self):
class Module(torch.nn.Module):
def forward(self, arg1, arg2, kw1, kw2):
diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py
index 674839be4c..752638104d 100644
--- a/test/test_dynamic_shapes.py
+++ b/test/test_dynamic_shapes.py
@@ -404,13 +404,13 @@ class TestPySymInt(TestCase):
r = sym_int(a1 / 2)
self.assertEqual(guard_int(r), 3)
self.assertIsInstance(r, torch.SymInt, msg=type(r))
- self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(floor(s1/2), 3)""")
+ self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(Trunc(s1/2), 3)""")
a3 = create_symint(shape_env, 3)
r = sym_int(2.0 * torch.sym_float(a3))
self.assertEqual(guard_int(r), 6)
self.assertIsInstance(r, torch.SymInt, msg=type(r))
- self.assertExpectedInline(str(shape_env.guards[2][0]), """Eq(2*s2, 6)""")
+ self.assertExpectedInline(str(shape_env.guards[2][0]), """Eq(Trunc(2.0*s2), 6)""")
def test_sym_sqrt(self):
shape_env = ShapeEnv()
@@ -432,6 +432,18 @@ class TestPySymInt(TestCase):
self.assertIsInstance(r, torch.SymInt, msg=type(r))
self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(3*s0, 15)""")
+ def test_sym_trunc(self):
+ shape_env = ShapeEnv()
+ a0 = create_symint(shape_env, 5)
+ r = math.trunc(a0 / 2)
+ self.assertEqual(r, 2)
+ self.assertIsInstance(r, torch.SymInt, msg=type(r))
+ self.assertExpectedInline(str(shape_env.guards[0][0]), """Eq(Trunc(s0/2), 2)""")
+ r = torch.sym_int(torch.sym_sqrt(a0))
+ self.assertEqual(r, 2)
+ self.assertIsInstance(r, torch.SymInt, msg=type(r))
+ self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(Trunc(OpaqueUnaryFn_sqrt(s0)), 2)""")
+
def test_sym_ceil(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 5)
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index 67445dae25..74eac04572 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1898,7 +1898,6 @@ symbolic_tensor_failures = {
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition
- xfail('nn.functional.fractional_max_pool2d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('nn.functional.fractional_max_pool3d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('resize_as_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
diff --git a/torch/__init__.py b/torch/__init__.py
index ec5234bd69..3a10130d5f 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -339,6 +339,9 @@ class SymFloat:
def __ge__(self, other) -> builtins.bool:
raise AssertionError("type stub not overridden")
+ def __trunc__(self):
+ raise AssertionError("type stub not overridden")
+
def __sym_max__(self, other):
raise AssertionError("type stub not overridden")
@@ -465,7 +468,7 @@ def sym_int(a):
if isinstance(a, SymInt):
return a
elif isinstance(a, SymFloat):
- return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
+ return math.trunc(a)
return py_int(a) # type: ignore[operator]
def sym_max(a, b):
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index e34d3311d1..4caa4dc367 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -449,6 +449,10 @@ class PythonPrinter(ExprPrinter):
assert len(expr.args) == 1
return f"math.floor({self._print(expr.args[0])})"
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ return f"math.trunc({self._print(expr.args[0])})"
+
def _print_ceiling(self, expr):
assert len(expr.args) == 1
return f"math.ceil({self._print(expr.args[0])})"
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index 0fe4b7261a..26a68ea837 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -562,6 +562,11 @@ class CppPrinter(ExprPrinter):
r = f"std::floor({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ r = f"std::trunc({self._print(expr.args[0])})"
+ return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
+
def _print_Pow(self, expr):
# Uses float constants to perform FP div
base, exp = expr.args
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 508382f4f3..0133c3585d 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -305,6 +305,12 @@ class TritonPrinter(PythonPrinter):
f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ return (
+ f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
+ )
+
def _print_ceiling(self, expr):
assert len(expr.args) == 1
return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
diff --git a/torch/fx/experimental/sym_node.py b/torch/fx/experimental/sym_node.py
index 783196d39c..8ec9b816be 100644
--- a/torch/fx/experimental/sym_node.py
+++ b/torch/fx/experimental/sym_node.py
@@ -233,6 +233,9 @@ class SymNode:
def round(self, ndigits=None) -> "SymNode":
return self._round(ndigits) # type: ignore[attr-defined]
+ def trunc(self) -> "SymNode":
+ return self._trunc() # type: ignore[attr-defined]
+
def add(self, other) -> "SymNode":
return self._add(other) # type: ignore[attr-defined]
@@ -454,6 +457,7 @@ METHOD_TO_OPERATOR = {
"ceil": math.ceil,
"eq": operator.eq,
"floor": math.floor,
+ "trunc": math.trunc,
"floordiv": operator.floordiv,
"ge": operator.ge,
"gt": operator.gt,
@@ -486,6 +490,7 @@ unary_magic_methods = {
"neg",
"sym_not",
"pos",
+ "trunc",
}
@@ -548,7 +553,7 @@ for name in math_op_names:
always_float_magic_methods.add(sym_name)
-always_int_magic_methods = {"ceil", "floor"}
+always_int_magic_methods = {"ceil", "floor", "trunc"}
always_bool_magic_methods = {
"eq",
"ne",
@@ -653,6 +658,12 @@ def _sympy_floor(a):
return _floor_ceil_helper(a, sympy.floor)
+def _sympy_trunc(a):
+ from torch.utils._sympy.functions import Trunc
+
+ return Trunc(a)
+
+
def _sympy_ceil(a):
import sympy
@@ -774,6 +785,7 @@ magic_methods = {
"le": _sympy_le,
"ge": _sympy_ge,
"floor": _sympy_floor,
+ "trunc": _sympy_trunc,
"sym_float": _sympy_sym_float,
"ceil": _sympy_ceil,
"neg": operator.neg,
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 06356f70cd..b634f6e313 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -1717,7 +1717,7 @@ class DimConstraints:
elif left.isdigit():
relation_with_digit(right, flip(op), int(left))
else:
- assert op == "=="
+ assert op == "==", t
results[left]["eq"] = sympy.sympify(right)
buf = ""
diff --git a/torch/utils/_sympy/functions.py b/torch/utils/_sympy/functions.py
index 48ad414512..427333b07c 100644
--- a/torch/utils/_sympy/functions.py
+++ b/torch/utils/_sympy/functions.py
@@ -328,6 +328,17 @@ class IsNonOverlappingAndDenseIndicator(sympy.Function):
return None
+class Trunc(sympy.Function):
+ is_integer = True
+
+ @classmethod
+ def eval(cls, number):
+ if number.is_integer:
+ return number
+ elif isinstance(number, sympy.Number):
+ return sympy.Integer(math.trunc(float(number)))
+
+
class Round(sympy.Function):
is_integer = True
diff --git a/torch/utils/_sympy/interp.py b/torch/utils/_sympy/interp.py
index 8f67f891f9..806e91cfe2 100644
--- a/torch/utils/_sympy/interp.py
+++ b/torch/utils/_sympy/interp.py
@@ -24,6 +24,7 @@ from .functions import (
Round,
RoundDecimal,
TrueDiv,
+ Trunc,
Where,
)
@@ -51,6 +52,7 @@ def handlers():
TrueDiv: "truediv",
FloorDiv: "floordiv",
CleanDiv: "div",
+ Trunc: "trunc",
Where: "where",
sympy.Add: "add",
sympy.Mul: "mul",
diff --git a/torch/utils/_sympy/value_ranges.py b/torch/utils/_sympy/value_ranges.py
index 7d03dfd6ee..a056db6dbb 100644
--- a/torch/utils/_sympy/value_ranges.py
+++ b/torch/utils/_sympy/value_ranges.py
@@ -745,6 +745,13 @@ class SymPyValueRangeAnalysis:
def atan(x):
return ValueRanges.increasing_map(x, OpaqueUnaryFn_atan)
+ @staticmethod
+ def trunc(x):
+ def trunc(x):
+ return sympy.Integer(x) if x.is_finite else x
+
+ return ValueRanges.increasing_map(x, trunc)
+
class ValueRangeAnalysis(SymPyValueRangeAnalysis):
def __init__(self):
@@ -829,10 +836,7 @@ class ValueRangeAnalysis(SymPyValueRangeAnalysis):
if x == ValueRanges.unknown():
return x
- def trunc(x):
- return sympy.Integer(x) if x.is_finite else x
-
- return ValueRanges.increasing_map(x, trunc)
+ return cls.trunc(x)
@classmethod
def sub(cls, a, b): | 2.41.0 |
02374cc091e549c586b72c9b252d33256ec921e | Thu, 11 Apr 2024 17:34:47 +0000 | [PATCH 0023/1000] [CI] show doc coverage repro instructions (#123688)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit | remind devs they can reproduce the doc coverage error locally with following msg ```You can reproduce locally by running 'cd pytorch/docs && make coverage && cat build/coverage/python.txt'``` I spent 20min to figure out how to test locally so want to enrich the error msg <img width="542" alt="Screenshot 2024-04-09 at 5 22 45 PM" src="https://github.com/pytorch/pytorch/assets/134637289/2c619d9d-74b5-4bda-8903-999ef5c255c2"> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123688 Approved by: https://github.com/clee2000 | diff --git a/.ci/pytorch/python_doc_push_script.sh b/.ci/pytorch/python_doc_push_script.sh
index ce14ac1d02..d4076d3469 100755
--- a/.ci/pytorch/python_doc_push_script.sh
+++ b/.ci/pytorch/python_doc_push_script.sh
@@ -105,6 +105,7 @@ if [ "$is_main_doc" = true ]; then
echo undocumented objects found:
cat build/coverage/python.txt
echo "Make sure you've updated relevant .rsts in docs/source!"
+ echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
exit 1
fi
else | 2.41.0 |
9c565b24e6c305c09c8c908e27f4023f41dd567 | Wed, 10 Apr 2024 18:54:51 -0700 | [PATCH 0024/1000] [inductor] Write generated files from parent process (#123409) | Before this PR we would pass generated source code over a pipe to the compile worker then the compile worker would write out the file. Doing it this way is faster and results in smaller messages to the workers (and lets us skip creating the workers in the warm start case). Pull Request resolved: https://github.com/pytorch/pytorch/pull/123409 Approved by: https://github.com/desertfire | diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 98cf75fc23..4e84838504 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,12 +59,7 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import (
- cache_dir,
- clear_on_fresh_inductor_cache,
- developer_warning,
- is_linux,
-)
+from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -2021,7 +2016,7 @@ def custom_op_wrapper(op: str, *args):
@clear_on_fresh_inductor_cache
class CppCodeCache:
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@@ -2032,13 +2027,17 @@ class CppCodeCache:
@classmethod
def _load_library(cls, path: str, key: str) -> Union[CDLL, ModuleType]:
try:
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
except (ImportError, OSError) as e:
if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
# hacky workaround for fbcode/buck
global _libgomp
_libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
if "failed to map segment from shared object" in str(e):
raise OSError(
f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
@@ -2049,42 +2048,68 @@ class CppCodeCache:
raise
@classmethod
- def load(cls, source_code: str, cuda: bool = False) -> Union[CDLL, ModuleType]:
- cls.cpp_compile_command_flags.update({"cuda": cuda})
- picked_vec_isa = pick_vec_isa()
- cpp_command = repr(
- cpp_compile_command(
- "i", "o", vec_isa=picked_vec_isa, **cls.cpp_compile_command_flags
- )
- )
+ def load_async(cls, source_code: str, cuda=False, submit_fn=None):
+ compile_command = {
+ **cls.cpp_compile_command_flags,
+ "cuda": cuda,
+ "vec_isa": pick_vec_isa(),
+ }
+ cpp_command = repr(cpp_compile_command("i", "o", **compile_command))
key, input_path = write(source_code, "cpp", extra=cpp_command)
+
if key not in cls.cache:
from filelock import FileLock
- lock_dir = get_lock_dir()
- lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
- with lock:
- output_path = input_path[:-3] + "so"
- if not os.path.exists(output_path):
- cmd = shlex.split(
- cpp_compile_command(
- input=input_path,
- output=output_path,
- vec_isa=picked_vec_isa,
- **cls.cpp_compile_command_flags,
- )
- )
- compile_file(input_path, output_path, cmd)
- cls.cache[key] = cls._load_library(output_path, key)
- cls.cache[key].key = key # type: ignore[union-attr]
+ lock_path = os.path.join(get_lock_dir(), key + ".lock")
+ output_path = input_path[:-3] + "so"
+ future: Optional[Future[Any]] = None
+ lib = None
+ worker_fn = functools.partial(
+ _worker_compile_cpp,
+ lock_path,
+ input_path,
+ output_path,
+ cpp_compile_command(
+ input=input_path, output=output_path, **compile_command
+ ),
+ )
+
+ def load_fn():
+ nonlocal lib
+ if lib is None:
+ if future is not None:
+ future.result()
+ worker_fn()
+ lib = cls._load_library(output_path, key)
+ assert lib is not None
+ return lib
+
+ if submit_fn is not None:
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ future = submit_fn(worker_fn)
+
+ cls.cache[key] = load_fn
return cls.cache[key]
+ @classmethod
+ def load(cls, source_code: str, cuda: bool = False):
+ return cls.load_async(source_code, cuda)()
+
+
+def _worker_compile_cpp(lock_path, input_path, output_path, cmd):
+ from filelock import FileLock
+
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ compile_file(input_path, output_path, shlex.split(cmd))
+
# Customized Python binding for cpp kernels
@clear_on_fresh_inductor_cache
class CppPythonBindingsCodeCache(CppCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
@@ -2176,12 +2201,13 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return module
@classmethod
- def load_pybinding(
+ def load_pybinding_async(
cls,
argtypes: List[str],
source_code: str,
cuda: bool = False,
num_outputs: int = -1,
+ submit_fn=None,
) -> Any:
"""
Wrap a C++ function in fast Python bindings.
@@ -2209,14 +2235,26 @@ class CppPythonBindingsCodeCache(CppCodeCache):
cls.entry_function,
cls.entry_function,
)
- result = cls.load(source_code + suffix, cuda)
- assert isinstance(result, ModuleType)
- return getattr(result, cls.entry_function)
+ get_result = cls.load_async(source_code + suffix, cuda, submit_fn=submit_fn)
+ result = None
+
+ def future():
+ nonlocal result
+ if result is None:
+ result = get_result()
+ assert isinstance(result, ModuleType)
+ return getattr(result, cls.entry_function)
+
+ return future
+
+ @classmethod
+ def load_pybinding(cls, *args, **kwargs) -> Any:
+ return cls.load_pybinding_async(*args, **kwargs)()
@clear_on_fresh_inductor_cache
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
@@ -2277,6 +2315,10 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
+def _reload_python_module_in_subproc(key, path):
+ return PyCodeCache.load_by_key_path(key, path)
+
+
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2330,6 +2372,11 @@ class PyCodeCache:
for k, v in attrs.items():
setattr(mod, k, v)
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
+
return cls.cache[key]
@classmethod
@@ -2361,11 +2408,25 @@ class PyCodeCache:
return parse_stack_trace(entry)
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
+
+
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
mod = PyCodeCache.load(source_code)
- return getattr(mod, kernel_name)
+ return cls._mod_to_kernel(mod, kernel_name)
+
+ @classmethod
+ def _mod_to_kernel(cls, mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
def _cuda_compiler() -> Optional[str]:
@@ -2652,6 +2713,7 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
[email protected]_cache(None)
def _set_triton_ptxas_path() -> None:
if os.environ.get("TRITON_PTXAS_PATH") is not None:
return
@@ -2666,54 +2728,50 @@ def _set_triton_ptxas_path() -> None:
warnings.warn(f"{ptxas_path} exists but is not an executable")
-def _worker_compile(
- kernel_name: str,
- source_code: str,
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
cc: int,
device: torch.device,
device_interface: Type[DeviceInterface],
-) -> None:
+):
+ _set_triton_ptxas_path()
device_interface.Worker.set_device(device.index)
- kernel = TritonCodeCache.load(kernel_name, source_code)
+ kernel = load_kernel()
kernel.precompile(warm_cache_only_with_cc=cc)
-def _load_kernel(kernel_name: str, source_code: str) -> ModuleType:
- _set_triton_ptxas_path()
- kernel = TritonCodeCache.load(kernel_name, source_code)
- kernel.precompile()
- return kernel
+class CodeCacheFuture:
+ def result(self):
+ raise NotImplementedError()
-class TritonFuture:
+class TritonFuture(CodeCacheFuture):
kernel: ModuleType
def __init__(
self,
- kernel_name: str,
- source_code: str,
- future: Future[Any],
+ kernel: Any,
+ future: Optional[Future[Any]],
) -> None:
- self.kernel_name = kernel_name
- self.source_code = source_code
+ self.kernel = kernel
self.future = future
# @dynamo_utils.dynamo_timed
def result(self) -> ModuleType:
- t0 = time()
- if hasattr(self, "kernel"):
- return self.kernel
- # If the worker failed this will throw an exception.
- self.future.result()
- kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
- latency = time() - t0
- if latency > 50:
- developer_warning(
- f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
- )
- developer_warning(self.source_code)
- del self.kernel_name, self.source_code, self.future
- return kernel
+ if self.future is not None:
+ # If the worker failed this will throw an exception.
+ self.future.result()
+ self.future = None
+ self.kernel.precompile()
+ return self.kernel
+
+
+class LambdaFuture(CodeCacheFuture):
+ def __init__(self, result_fn):
+ self.result_fn = result_fn
+
+ def result(self):
+ return self.result_fn()
# If this process dies abnormally (e.g. segfault)
@@ -2747,10 +2805,21 @@ _pool_set: Set[ProcessPoolExecutor] = set()
def shutdown_compile_workers() -> None:
"""Shut down all outstanding compile-worker pools."""
- global _pool_set
for pool in _pool_set:
pool.shutdown()
+ after_fork()
+
+
+def after_fork():
+ """Reset pools to initial state without shutting them down"""
_pool_set.clear()
+ AsyncCompile.process_pool.cache_clear()
+
+
+try:
+ os.register_at_fork(after_in_child=after_fork)
+except AttributeError:
+ pass # register_at_fork does not exists on windows
class AsyncCompile:
@@ -2825,21 +2894,26 @@ class AsyncCompile:
return task()
return cls.pool().submit(task)
- def triton(
- self, kernel_name: str, source_code: str, device_str: str = "cuda"
- ) -> Union[TritonFuture, ModuleType]:
+ def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
_compile_start()
+ _set_triton_ptxas_path()
+ kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
device_interface = get_interface_for_device(device_str)
device = torch.device(device_str, device_interface.current_device())
cc = device_interface.get_compute_capability(device)
future = self.process_pool().submit(
- _worker_compile, kernel_name, source_code, cc, device, device_interface
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ cc,
+ device,
+ device_interface,
)
- return TritonFuture(kernel_name, source_code, future)
+ return TritonFuture(kernel, future)
else:
- return _load_kernel(kernel_name, source_code)
+ kernel.precompile()
+ return kernel
def multi_kernel(self, *args, **kwargs) -> Any:
from torch._inductor.codegen.multi_kernel import MultiKernelCall
@@ -2847,18 +2921,21 @@ class AsyncCompile:
# no need to call this in parallel since the sub-kernels are already parallel tasks
return MultiKernelCall(*args, **kwargs)
- def cpp(self, source_code: str) -> ModuleType:
- def task():
+ def cpp(self, source_code: str):
+ if config.compile_threads <= 1:
return CppCodeCache.load(source_code).kernel
+ else:
+ get_result = CppCodeCache.load_async(source_code, submit_fn=self.submit)
+ return LambdaFuture(lambda: get_result().kernel)
- return self.submit(task)
-
- def cpp_pybinding(self, argtypes: List[str], source_code: str) -> ModuleType:
- return self.submit(
- functools.partial(
- CppPythonBindingsCodeCache.load_pybinding, argtypes, source_code
+ def cpp_pybinding(self, argtypes: List[str], source_code: str):
+ if config.compile_threads <= 1:
+ return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
+ else:
+ get_result = CppPythonBindingsCodeCache.load_pybinding_async(
+ argtypes, source_code, submit_fn=self.submit
)
- )
+ return LambdaFuture(get_result)
def cuda(self, source_code, dst_file_ext):
def task():
@@ -2871,7 +2948,7 @@ class AsyncCompile:
[
value
for key, value in scope.items()
- if isinstance(value, (Future, TritonFuture))
+ if isinstance(value, (Future, CodeCacheFuture))
]
)
pbar = tqdm(
@@ -2884,18 +2961,18 @@ class AsyncCompile:
for key, result in scope.items():
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
pbar.set_postfix_str(key)
- if isinstance(result, (Future, TritonFuture)):
+ if isinstance(result, (Future, CodeCacheFuture)):
scope[key] = result.result()
pbar.update(1)
_compile_end()
-if os.environ.get("TORCH_TNT_IN_USE", "0") == "1":
- # When TorchTNT is used, calling warm_pool() here will cause the
- # compile workers created not being able to be shut down inside
- # shutdown_compile_workers(). This may cause significant QPS drop.
- log.info("Do not call AsyncCompile.warm_pool() because TorchTNT is in use.")
+if (
+ os.environ.get("TORCH_TNT_IN_USE", "0") == "1"
+ or os.environ.get("TORCH_WARM_POOL", "1") != "1"
+):
+ pass
elif sys.version_info >= (3, 12):
log.info("AsyncCompile.warm_pool() is broken on 3.12+.")
else: | 2.41.0 |
c451798cc5a7882e95b01600aa643b042b11b1e | Wed, 10 Apr 2024 12:50:21 -0700 | [PATCH 0025/1000] [inductor] Disable channels_last heuristic when channels==1 (#123758) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123758 Approved by: https://github.com/shunting314 | diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index 9cc0e9b93a..80a0fed789 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -1630,6 +1630,19 @@ class CPUReproTests(TestCase):
self.common(fn, (value, mask))
assert metrics.generated_cpp_vec_kernel_count >= 1
+ def test_channels_last_view_as_complex(self):
+ # https://github.com/pytorch/pytorch/issues/122448#issuecomment-2046169554
+
+ def reduce_example(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
+ """Applies the rotary embedding to the query and key tensors."""
+ x_out = torch.view_as_complex(torch.stack([x.float(), y.float()], dim=-1))
+ return x_out
+
+ args = [torch.randn(1, 1, 1, 128), torch.randn(1, 1, 1, 128)]
+ expected = reduce_example(*args)
+ actual = torch.compile(reduce_example, fullgraph=True)(*args)
+ self.assertEqual(expected, actual)
+
def test_load_same_bool_tensor_twice(self):
@torch._dynamo.optimize("inductor")
def fn(a, b):
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 1233753dc9..e783009a45 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -2518,7 +2518,7 @@ class Layout(IRNode):
def is_channels_last_contiguous(self):
ndim = len(self.size)
- if ndim not in [4, 5]:
+ if ndim not in [4, 5] or self.size[1] == 1:
return False
for left, right, size in zip(
self.stride, make_channels_last_strides_for(self.size), self.size # type: ignore[arg-type] | 2.41.0 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 61