commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
---|---|---|---|---|---|
16f53275378de95723b41dc23c0ec52ef54ae29 | Thu, 11 Apr 2024 06:39:54 +0000 | [PATCH 0001/1000] [AOTI] Serialize large weights (#123002) | But appending them to the end of the shared library and mmaping afterwards Disabled by default, but overridable by `config.aot_inductor.force_mmap_weights` Implemented by adding `USE_MMAP_SELF` define to `inductor/aoti_runtime/model.h` which is defined when weights are appended to the binary. In that case, shared library name is determined by calling `dladdr`, mmaped and finally checked against random magic number embedded at the end of the weights as well as in const section of the library in question Added unites to validate that it works as expected TODO: - Extend support to CUDA - munmap region if the same library is reused Pull Request resolved: https://github.com/pytorch/pytorch/pull/123002 Approved by: https://github.com/jansel, https://github.com/desertfire, https://github.com/mikekgfb | diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index ea21e5f140..5de6d91a0b 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -269,6 +269,22 @@ class AOTInductorTestsTemplate:
)
self.check_model(Model(), example_inputs)
+ def test_large_mmaped_weights(self):
+ class Model(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.linear = torch.nn.Linear(512, 250112)
+
+ def forward(self, x, y):
+ return x + self.linear(y)
+
+ example_inputs = (
+ torch.randn(1, 250112, device=self.device),
+ torch.randn(1, 512, device=self.device),
+ )
+ with config.patch({"aot_inductor.force_mmap_weights": True}):
+ self.check_model(Model(), example_inputs)
+
def test_with_offset(self):
class Model(torch.nn.Module):
def __init__(self, device):
@@ -2727,6 +2743,7 @@ if TEST_WITH_ROCM:
"test_bmm_multiple_dynamic": fail_cuda(is_skip=True),
"test_convolution": fail_cuda(is_skip=True),
"test_large": fail_cuda(is_skip=True),
+ "test_large_mmaped_weights": fail_cuda(is_skip=True),
"test_missing_cubin": fail_cuda(is_skip=True),
"test_multi_device": fail_cuda(is_skip=True),
"test_poi_multiple_dynamic": fail_cuda(is_skip=True),
@@ -2762,6 +2779,7 @@ if not IS_FBCODE:
"test_convolution": fail_minimal_arrayref_interface(is_skip=True),
"test_empty_graph": fail_minimal_arrayref_interface(is_skip=True),
"test_large": fail_minimal_arrayref_interface(is_skip=True),
+ "test_large_mmaped_weights": fail_minimal_arrayref_interface(is_skip=True),
"test_missing_output": fail_minimal_arrayref_interface(is_skip=True),
"test_model_modified_weights": fail_minimal_arrayref_interface(
is_skip=True
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index cc43a50b20..98cf75fc23 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -19,6 +19,7 @@ import re
import shlex
import shutil
import signal
+import struct
import subprocess
import sys
import sysconfig
@@ -38,6 +39,7 @@ from types import ModuleType
from typing import (
Any,
Callable,
+ cast,
Dict,
List,
Optional,
@@ -1545,6 +1547,7 @@ def cpp_compile_command(
aot_mode: bool = False,
compile_only: bool = False,
use_absolute_path: bool = False,
+ use_mmap_weights: bool = False,
) -> str:
ipaths, lpaths, libs, macros, build_arch_flags = get_include_and_linking_paths(
include_pytorch, vec_isa, cuda, aot_mode
@@ -1577,6 +1580,9 @@ def cpp_compile_command(
if compile_only:
libs, lpaths = "", ""
inp_name_str = " ".join(inp_name)
+ if use_mmap_weights:
+ macros += " -D USE_MMAP_SELF"
+
return re.sub(
r"[ \n]+",
" ",
@@ -1655,7 +1661,11 @@ class AotCodeCompiler:
picked_vec_isa = pick_vec_isa()
cpp_command = repr(
cpp_compile_command(
- "i", "o", vec_isa=picked_vec_isa, cuda=cuda, aot_mode=graph.aot_mode
+ "i",
+ "o",
+ vec_isa=picked_vec_isa,
+ cuda=cuda,
+ aot_mode=graph.aot_mode,
)
)
fbcode_aot_cpu_re = False
@@ -1794,6 +1804,17 @@ class AotCodeCompiler:
)
output_o = os.path.splitext(input_path)[0] + ".o"
+ consts_size = sum(
+ tensor.untyped_storage().nbytes()
+ for (name, tensor) in graph.constants.items()
+ if name not in graph.folded_constants
+ )
+ # TODO: Fix mmap weights with cuda
+ use_mmap_weights = (
+ not cuda and not config.is_fbcode() and consts_size > 2_000_000_000
+ )
+ if config.aot_inductor.force_mmap_weights and not cuda:
+ use_mmap_weights = True
compile_cmd = cpp_compile_command(
input=input_path,
output=output_o,
@@ -1802,6 +1823,7 @@ class AotCodeCompiler:
aot_mode=graph.aot_mode,
compile_only=True,
use_absolute_path=use_absolute_path,
+ use_mmap_weights=use_mmap_weights,
)
log.debug("aot compilation command: %s", compile_cmd)
if fbcode_aot_cpu_re:
@@ -1826,11 +1848,19 @@ class AotCodeCompiler:
return bytes(raw_array.contents)
- aot_constants = b"".join(
+ serialized_weights = b"".join(
_to_bytes(graph.get_original_value_of_constant(name))
for name in graph.constants.keys()
if name not in graph.folded_constants
)
+ if not use_mmap_weights:
+ aot_constants = serialized_weights
+ magic_number = 0
+ else:
+ magic_number = cast(
+ int, torch.randint(0, torch.iinfo(torch.int64).max, (1,)).item()
+ )
+ aot_constants = struct.pack("qq", consts_size + 8, magic_number)
consts_o = {
"linux": _compile_consts_linux,
"darwin": _compile_consts_darwin,
@@ -1851,6 +1881,14 @@ class AotCodeCompiler:
else:
run_command_and_check(link_cmd)
+ if use_mmap_weights:
+ with open(output_so, "a+b") as f_so:
+ so_size = f_so.tell()
+ # Page align the weights
+ f_so.write(b" " * (16384 - so_size % 16384))
+ f_so.write(serialized_weights)
+ f_so.write(struct.pack("q", magic_number))
+
# Append cmds to the end of codegen-ed wrapper file
with open(input_path, "a") as f:
f.write("\n")
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index c52b3cbd9b..26015bbc03 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -699,6 +699,10 @@ class aot_inductor:
# flag to decide whether to create a submodule for constant graph.
use_runtime_constant_folding: bool = False
+ # flag to force weight to be appened to the shared library and mmaped by the runtime
+ # rather than embedded into the data section. Needed to support 1B+ parameter models
+ force_mmap_weights: bool = False
+
class cuda:
# CUDA arch to use for CUDA template kernel compilation.
diff --git a/torch/csrc/inductor/aoti_runtime/model.h b/torch/csrc/inductor/aoti_runtime/model.h
index ad0970ebae..f03bf6d0fa 100644
--- a/torch/csrc/inductor/aoti_runtime/model.h
+++ b/torch/csrc/inductor/aoti_runtime/model.h
@@ -1,7 +1,12 @@
#pragma once
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
#include <optional>
#include <regex>
+#include <stdexcept>
#include <unordered_map>
// WARNING: Be careful when adding new includes here. This header will be used
@@ -268,7 +273,43 @@ class AOTInductorModelBase {
cudaMemcpyHostToDevice));
}
return internal_ptr;
-#else // !USE_CUDA
+#elif USE_MMAP_SELF
+ // get pointer to constant which is packed in model during compile time.
+ AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy");
+ if (!self_mmap) {
+ Dl_info dl_info;
+ // get pointer to constant which are appended to the binary
+ AOTI_RUNTIME_CHECK(
+ dladdr(__func__, &dl_info), "Can't find shared library name");
+ int fd = open(dl_info.dli_fname, O_RDONLY);
+ AOTI_RUNTIME_CHECK(fd >= 0, "Shared library file cannot be opened");
+ auto fsize = lseek(fd, 0, SEEK_END);
+ auto weights_size =
+ reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[0];
+ auto magic_number =
+ reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[1];
+ auto weights_offset = fsize - weights_size;
+ AOTI_RUNTIME_CHECK(
+ (weights_offset & 0x3fff) == 0,
+ "weights_offset must be aligned to 16K boundary");
+ auto ptr = mmap(
+ NULL,
+ weights_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ weights_offset);
+ close(fd);
+ AOTI_RUNTIME_CHECK(ptr != MAP_FAILED, "mmap() failed");
+ self_mmap = static_cast<uint8_t*>(ptr);
+ AOTI_RUNTIME_CHECK(
+ reinterpret_cast<uint64_t*>(
+ self_mmap + weights_size - sizeof(uint64_t))[0] == magic_number,
+ "Weigths data seems corrupt");
+ }
+ return self_mmap + bytes_read;
+
+#else // !USE_CUDA&& !USE_MMAP_SELF
// get pointer to constant which is packed in model during compile time.
AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy");
return const_cast<uint8_t*>(_binary_constants_bin_start) + bytes_read;
@@ -457,6 +498,9 @@ class AOTInductorModelBase {
// Holds the blob storage for constants' at::Tensor for CUDA.
CUDAPtr constant_blob_;
#endif // USE_CUDA
+#ifdef USE_MMAP_SELF
+ uint8_t* self_mmap = NULL;
+#endif
// A directory with CUDA binary files, e.g. compiled kernels, etc.
const std::optional<std::string> cubin_dir_; | 2.41.0 |
aad72b0d3f2b03ae6d268b0c78a3cf349c0ae9f | Wed, 10 Apr 2024 18:05:40 -0700 | [PATCH 0002/1000] Support all unsigned int sizes on unique (#123643) | Signed-off-by: Edward Z. Yang <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123643 Approved by: https://github.com/albanD, https://github.com/kit1980 | diff --git a/aten/src/ATen/cuda/cub-RadixSortKeys.cu b/aten/src/ATen/cuda/cub-RadixSortKeys.cu
index cf88c8aa0c..74e82ae55c 100644
--- a/aten/src/ATen/cuda/cub-RadixSortKeys.cu
+++ b/aten/src/ATen/cuda/cub-RadixSortKeys.cu
@@ -51,5 +51,8 @@ void radix_sort_keys(
int64_t end_bit);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
+AT_INSTATIATE_CUB_TEMPLATES(uint16_t, UInt16)
+AT_INSTATIATE_CUB_TEMPLATES(uint32_t, UInt32)
+AT_INSTATIATE_CUB_TEMPLATES(uint64_t, UInt64)
} // namespace at::cuda::cub
diff --git a/aten/src/ATen/cuda/cub-RadixSortPairs.cu b/aten/src/ATen/cuda/cub-RadixSortPairs.cu
index bd20069cf6..cc7c969300 100644
--- a/aten/src/ATen/cuda/cub-RadixSortPairs.cu
+++ b/aten/src/ATen/cuda/cub-RadixSortPairs.cu
@@ -77,6 +77,9 @@ AT_INSTANTIATE_SORT_PAIRS(int64_t, 4)
AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)
+AT_INSTANTIATE_SORT_PAIRS(uint16_t, 8)
+AT_INSTANTIATE_SORT_PAIRS(uint32_t, 8)
+AT_INSTANTIATE_SORT_PAIRS(uint64_t, 8)
// BFloat16 Radix sort is supported from ROCm 4.5 onwards
#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)
diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp
index 24247c0b8e..d29b177c13 100644
--- a/aten/src/ATen/native/ReduceOps.cpp
+++ b/aten/src/ATen/native/ReduceOps.cpp
@@ -4,6 +4,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
@@ -2255,7 +2256,7 @@ bool cpu_equal(const Tensor& self, const Tensor& other) {
.promote_inputs_to_common_dtype(true)
.build();
- AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, iter.input_dtype(), "equal_cpu", [&] {
+ AT_DISPATCH_V2(iter.input_dtype(), "equal_cpu", AT_WRAP([&] {
iter.for_each([&](char** data, const int64_t *strides, int64_t dim_size) {
if (!result) {
return;
@@ -2271,7 +2272,7 @@ bool cpu_equal(const Tensor& self, const Tensor& other) {
other_data += strides[1];
}
});
- });
+ }), kBool, kBFloat16, kHalf, AT_EXPAND(AT_ALL_TYPES_AND_COMPLEX), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
return result.load();
}
diff --git a/aten/src/ATen/native/Unique.cpp b/aten/src/ATen/native/Unique.cpp
index 79306f3eee..801af5d5e7 100644
--- a/aten/src/ATen/native/Unique.cpp
+++ b/aten/src/ATen/native/Unique.cpp
@@ -2,7 +2,7 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
-#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/native/TensorIterator.h>
#include <c10/util/irange.h>
@@ -446,13 +446,13 @@ _unique_cpu(const Tensor& self, const bool sorted, const bool return_inverse) {
self, return_inverse, /* return_counts */false);
return std::make_tuple(output, inverse);
}
- return AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", [&] AT_WRAP({
// The current CPU implementation of unique always sort due to
// this is faster than hash table
auto [output, inverse, _] = unique_cpu_sorted_template<scalar_t>(
self, return_inverse, /* return_counts */false, IsUnique<scalar_t, /* equal_nan */false>());
return std::make_tuple(output, inverse);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
@@ -460,35 +460,35 @@ _unique2_cpu(const Tensor& self, const bool sorted, const bool return_inverse, c
if (self.scalar_type() == kBool) {
return unique_cpu_bool_template(self, return_inverse, return_counts);
}
- return AT_DISPATCH_ALL_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CPU implementation of unique always sort due to
// this is faster than hash table
return unique_cpu_sorted_template<scalar_t>(
self, return_inverse, return_counts, IsUnique<scalar_t, /* equal_nan */ false>());
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_cpu(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
// The current implementation using `dim` always sorts due to unhashable tensors
return _unique_dim_cpu_template<scalar_t>(self, dim, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_consecutive_cpu(const Tensor& self, const int64_t dim, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return _unique_dim_cpu_template<scalar_t>(self, dim, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_consecutive_cpu(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional<int64_t> dim) {
if (!dim.has_value() || (dim.value() == 0 && self.dim() == 1)) {
- return AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
return unique_consecutive_cpu_template<scalar_t>(self, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBFloat16, kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
return unique_dim_consecutive_cpu(self, dim.value(), return_inverse, return_counts);
}
diff --git a/aten/src/ATen/native/cpu/SortingKernel.cpp b/aten/src/ATen/native/cpu/SortingKernel.cpp
index 3349cd3be1..22ba015215 100644
--- a/aten/src/ATen/native/cpu/SortingKernel.cpp
+++ b/aten/src/ATen/native/cpu/SortingKernel.cpp
@@ -5,6 +5,7 @@
#include <ATen/native/Sorting.h>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/Parallel.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorIterator.h>
@@ -42,9 +43,8 @@ void _dim_apply(
auto indices_dim_stride = indices.stride(dim);
auto dim_size = values.size(dim);
- AT_DISPATCH_ALL_TYPES_AND3(
- ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, iter.dtype(),
- "sorting_kernel_method_name", [&] {
+ AT_DISPATCH_V2(
+ iter.dtype(), "sorting_kernel_method_name", AT_WRAP([&] {
auto loop = [&](char** data, const int64_t* strides, int64_t n) {
auto* values_data_bytes = data[0];
auto* indices_data_bytes = data[1];
@@ -69,7 +69,7 @@ void _dim_apply(
int64_t grain_size = internal::GRAIN_SIZE / std::max(int64_t{1}, dim_size);
iter.for_each(loop, /*grain_size=*/grain_size);
- }
+ }), kBool, kHalf, kBFloat16, AT_EXPAND(AT_ALL_TYPES), AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES)
);
}
diff --git a/aten/src/ATen/native/cuda/Unique.cu b/aten/src/ATen/native/cuda/Unique.cu
index 30b4640be6..e2654be013 100644
--- a/aten/src/ATen/native/cuda/Unique.cu
+++ b/aten/src/ATen/native/cuda/Unique.cu
@@ -1,6 +1,6 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
-#include <ATen/Dispatch.h>
+#include <ATen/Dispatch_v2.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/ThrustAllocator.h>
@@ -186,45 +186,45 @@ std::tuple<Tensor, Tensor, Tensor> unique_dim_cuda_template(
std::tuple<Tensor, Tensor>
_unique_cuda(const Tensor& self, const bool sorted, const bool return_inverse) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
auto [output, inverse, _] = internal::unique_cuda_template<scalar_t>(self, false, return_inverse, false);
return std::make_tuple(output, inverse);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
_unique2_cuda(const Tensor& self, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
return internal::unique_cuda_template<scalar_t>(self, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return unique_dim_cuda_template<scalar_t>(self, dim, false, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_dim_consecutive_cuda(const Tensor& self, const int64_t dim, const bool return_inverse, const bool return_counts) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique_dim", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique_dim", AT_WRAP([&] {
return unique_dim_cuda_template<scalar_t>(self, dim, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
std::tuple<Tensor, Tensor, Tensor>
unique_consecutive_cuda(const Tensor& self, const bool return_inverse, const bool return_counts, c10::optional<int64_t> dim) {
if (!dim.has_value()) {
- return AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, self.scalar_type(), "unique", [&] {
+ return AT_DISPATCH_V2(self.scalar_type(), "unique", AT_WRAP([&] {
// The current CUDA implementation of unique always sort due to the
// lack of hashtable implementation in thrust
return internal::unique_cuda_template<scalar_t>(self, true, return_inverse, return_counts);
- });
+ }), AT_EXPAND(AT_ALL_TYPES), kBool, kHalf, AT_EXPAND(AT_BAREBONES_UNSIGNED_TYPES));
}
return unique_dim_consecutive_cuda(self, dim.value(), return_inverse, return_counts);
}
diff --git a/aten/src/ATen/native/cuda/UniqueCub.cu b/aten/src/ATen/native/cuda/UniqueCub.cu
index f9e71bde1a..bbd8673bcf 100644
--- a/aten/src/ATen/native/cuda/UniqueCub.cu
+++ b/aten/src/ATen/native/cuda/UniqueCub.cu
@@ -335,6 +335,9 @@ INSTANTIATE_UNIQUE_CUDA_TEMPLATE(float);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int32_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int64_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int16_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint32_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint64_t);
+INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint16_t);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(bool);
INSTANTIATE_UNIQUE_CUDA_TEMPLATE(at::Half);
diff --git a/c10/core/ScalarType.cpp b/c10/core/ScalarType.cpp
index a942ae252d..f9704c8157 100644
--- a/c10/core/ScalarType.cpp
+++ b/c10/core/ScalarType.cpp
@@ -84,6 +84,13 @@ ScalarType promoteTypes(ScalarType a, ScalarType b) {
// - We must not promote uint64 to int64 because this will overflow.
//
// It'll be a bit of work to fix it, so we're punting on it for now.
+ // However, float promotion is fine, so we handle that.
+ if (isFloatingType(a)) {
+ return a;
+ }
+ if (isFloatingType(b)) {
+ return b;
+ }
TORCH_CHECK(
false,
"Promotion for uint16, uint32, uint64 types is not supported, attempted to promote ",
diff --git a/test/test_meta.py b/test/test_meta.py
index b081ce173d..deb421adee 100644
--- a/test/test_meta.py
+++ b/test/test_meta.py
@@ -64,6 +64,9 @@ i32 = torch.int32
i64 = torch.int64
b8 = torch.bool
u8 = torch.uint8
+u16 = torch.uint16
+u32 = torch.uint32
+u64 = torch.uint64
foreach_op_db = (
foreach_unary_op_db +
@@ -659,8 +662,8 @@ meta_function_expected_failures = {
torch.Tensor.nonzero : {f64, i32, c128, i64, i16, c32, f16, u8, c64, bf16, b8, i8, f32},
torch.Tensor.item : {f64, i32, c128, i64, i16, f16, u8, c32, c64, bf16, b8, i8, f32},
torch.bincount : {i32, i64, u8, i16, i8},
- torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32},
- torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32},
+ torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
+ torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64},
torch.histc : {f64, f16, bf16, f32},
torch.histogram : {f64, f32},
torch.histogramdd : {f64, f32},
@@ -832,7 +835,7 @@ meta_dispatch_expected_failures = {
aten._histogramdd_from_bin_cts.default : {f32, f64},
aten._histogramdd_from_bin_tensors.default : {f32, f64},
aten._local_scalar_dense.default : {c32, c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
- aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
+ aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.bincount.default : {i64, i8, i32, i16, u8},
aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8},
aten.histc.default : {bf16, f32, f64},
@@ -840,8 +843,8 @@ meta_dispatch_expected_failures = {
aten.histogram.bin_ct : {f32, f64},
aten.histogram.bins_tensor : {f32, f64},
aten.kthvalue.default : {i8, f64, i64, f16, bf16, f32, i32, i16, u8},
- aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
- aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8},
+ aten.unique_consecutive.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
+ aten.unique_dim.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64},
aten.upsample_nearest3d.vec : {bf16, f32, f64, u8},
}
diff --git a/test/test_testing.py b/test/test_testing.py
index 5c81197f9f..26722e2bfb 100644
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -2055,7 +2055,7 @@ class TestTestParametrizationDeviceType(TestCase):
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
- ('test_three' in name and name.endswith('int16')))
+ ('test_three' in name and name.endswith('_int16')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 7a8a529180..84798ebac5 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -16793,8 +16793,8 @@ op_db: List[OpInfo] = [
skips=(
)),
OpInfo('unique',
- dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
- dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
+ dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64),
+ dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.uint16, torch.uint32, torch.uint64),
sample_inputs_func=sample_inputs_unique,
supports_out=False,
supports_autograd=False, | 2.41.0 |
2f687f32c3abddc0999733e26761a1f608029f3 | Thu, 11 Apr 2024 06:53:10 +0000 | [PATCH 0003/1000] Option to include stride and device annotation in gm.print_readable() (#123690) | Summary: Sample output for gm.print_readable(include_stride=True, include_device=True) ``` getitem_21: "i32[1200][1]cuda:0" = auto_functionalized_4[1] copy_2: "f32[2, 60][60, 1]cuda:1" = .... ``` Test Plan: CI Differential Revision: D55949129 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123690 Approved by: https://github.com/Chillee | diff --git a/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect b/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
index d6630cff36..2996edd485 100644
--- a/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
+++ b/test/expect/TestFXAPIBackwardCompatibility.test_function_back_compat-fx_backcompat_function_signatures.expect
@@ -22,7 +22,7 @@ torch.fx.graph.Graph.node_copy(self, node: torch.fx.node.Node, arg_transform: Ca
torch.fx.graph.Graph.output(self, result: 'Argument', type_expr: Optional[Any] = None)
torch.fx.graph.Graph.placeholder(self, name: str, type_expr: Optional[Any] = None, default_value: Any) -> torch.fx.node.Node
torch.fx.graph.Graph.print_tabular(self)
-torch.fx.graph.Graph.python_code(self, root_module: str, verbose: bool = False) -> torch.fx.graph.PythonCode
+torch.fx.graph.Graph.python_code(self, root_module: str, verbose: bool = False, include_stride: bool = False, include_device: bool = False) -> torch.fx.graph.PythonCode
torch.fx.graph_module.GraphModule.__init__(self, root: Union[torch.nn.modules.module.Module, Dict[str, Any]], graph: torch.fx.graph.Graph, class_name: str = 'GraphModule')
torch.fx.graph_module.GraphModule.add_submodule(self, target: str, m: torch.nn.modules.module.Module) -> bool
torch.fx.graph_module.GraphModule.delete_all_unused_submodules(self) -> None
diff --git a/torch/fx/graph.py b/torch/fx/graph.py
index 50f94bfca8..7ff8f94dbf 100644
--- a/torch/fx/graph.py
+++ b/torch/fx/graph.py
@@ -4,8 +4,9 @@ import torch.utils._pytree as pytree
from . import _pytree as fx_pytree
from ._compatibility import compatibility
+import os
import contextlib
-from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
+from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type, Iterable
from dataclasses import dataclass
from contextlib import contextmanager
import copy
@@ -378,7 +379,8 @@ class CodeGen:
return []
def _gen_python_code(
- self, nodes, root_module: str, namespace: _Namespace, *, verbose: bool = False,
+ self, nodes, root_module: str, namespace: _Namespace, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
) -> PythonCode:
free_vars: List[str] = []
body: List[str] = []
@@ -387,6 +389,8 @@ class CodeGen:
# Wrap string in list to pass by reference
maybe_return_annotation : List[str] = ['']
+ include_stride = include_stride or (os.environ.get("FX_GRAPH_SHOW_STRIDE", "0") == "1")
+ include_device = include_device or (os.environ.get("FX_GRAPH_SHOW_DEVICE", "0") == "1")
def add_global(name_hint: str, obj: Any):
"""Add an obj to be tracked as a global.
@@ -530,7 +534,7 @@ class CodeGen:
prev_stacktrace = ""
body.append('\n# No stacktrace found for following nodes\n')
- def stringify_shape(shape : torch.Size) -> str:
+ def stringify_shape(shape : Iterable) -> str:
return f"[{', '.join(str(x) for x in shape)}]"
def emit_node(node : Node):
@@ -543,10 +547,13 @@ class CodeGen:
from torch.fx.passes.shape_prop import TensorMetadata
meta_val = node.meta.get('val', node.meta.get('tensor_meta', None))
-
# use string as annotation, to make it valid python code
if isinstance(meta_val, FakeTensor):
- maybe_type_annotation = f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}"'
+ stride_annotation = f"{stringify_shape(meta_val.stride())}" if include_stride else ""
+ device_annotation = f"{meta_val.device}" if include_device else ""
+ maybe_type_annotation = \
+ f': "{dtype_abbrs[meta_val.dtype]}{stringify_shape(meta_val.shape)}' \
+ f'{stride_annotation}{device_annotation}"'
elif isinstance(meta_val, py_sym_types):
maybe_type_annotation = f': "Sym({meta_val})"'
elif isinstance(meta_val, TensorMetadata):
@@ -1346,7 +1353,10 @@ class Graph:
return op
@compatibility(is_backward_compatible=True)
- def python_code(self, root_module: str, *, verbose: bool = False) -> PythonCode:
+ def python_code(
+ self, root_module: str, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
+ ) -> PythonCode:
"""
Turn this ``Graph`` into valid Python code.
@@ -1405,10 +1415,19 @@ class Graph:
node._repr_fn = orig_repr_fns[node]
with override_node_repr(self):
- return self._python_code(root_module, namespace, verbose=verbose)
+ return self._python_code(
+ root_module, namespace,
+ verbose=verbose, include_stride=include_stride, include_device=include_device
+ )
- def _python_code(self, root_module: str, namespace: _Namespace, *, verbose: bool = False) -> PythonCode:
- return self._codegen._gen_python_code(self.nodes, root_module, namespace, verbose=verbose)
+ def _python_code(
+ self, root_module: str, namespace: _Namespace, *,
+ verbose: bool = False, include_stride: bool = False, include_device: bool = False
+ ) -> PythonCode:
+ return self._codegen._gen_python_code(
+ self.nodes, root_module, namespace,
+ verbose=verbose, include_stride=include_stride, include_device=include_device
+ )
def __str__(self) -> str:
diff --git a/torch/fx/graph_module.py b/torch/fx/graph_module.py
index 1a1e7087dc..9569a0d01b 100644
--- a/torch/fx/graph_module.py
+++ b/torch/fx/graph_module.py
@@ -818,11 +818,13 @@ class {module_name}(torch.nn.Module):
return res
@compatibility(is_backward_compatible=False)
- def print_readable(self, print_output=True):
+ def print_readable(self, print_output=True, include_stride=False, include_device=False):
"""
Return the Python code generated for current GraphModule and its children GraphModules
"""
- verbose_python_code = self._graph.python_code(root_module="self", verbose=True)
+ verbose_python_code = self._graph.python_code(
+ root_module="self", verbose=True, include_stride=include_stride, include_device=include_device
+ )
module_code = verbose_python_code.src
module_code = module_code.lstrip("\n")
module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code | 2.41.0 |
8d2504eece2ba5e464a42b253ea07f70e9ba5b6 | Tue, 9 Apr 2024 12:11:09 -0700 | [PATCH 0004/1000] [aot] always pass inputs to runtime_wrapper as list and add type annotations (#123630) | `runtime_wrapper` unpacking the arguments as a Tuple[arg] will prevent them from being freed within its scope. This is problematic if inductors wants to free those inputs, which could be activations in the compiled backwards case. This PR only changes the signature to pass as list, but does not clear it, keeping same refcount as before. Also adding some mypy annotations. Ideally, instead of `Any`, I would want a type to describe single arg which seems to be usually Tensor or int. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123630 Approved by: https://github.com/jansel, https://github.com/bdhirsh | diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index dda3144b24..5c9c3424d3 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -162,7 +162,7 @@ def aot_dispatch_base(
# Create a wrapper to set up the rng functionalize bits
@wraps(compiled_fw)
- def rng_functionalization_wrapper(args):
+ def rng_functionalization_wrapper(args: List[Any]):
# see note: [Returning Fake Tensors on First AOT Autograd Call]
nonlocal fakified_out
if fakified_out is not None:
@@ -993,7 +993,7 @@ Got grad_output types: {str(grad_output_types)}"""
]
@wraps(compiled_function)
- def debug_compiled_function(*args):
+ def debug_compiled_function(args: List[Any]):
# TODO: Check aliasing relationships
# TODO: Check strides for metadata mutation
# (NB: ideally, this logic is factored out of this function and
@@ -1013,6 +1013,6 @@ Got grad_output types: {str(grad_output_types)}"""
f"{describe_input(i, aot_config)} would not require grad",
)
- return compiled_function(*args)
+ return compiled_function(args)
return debug_compiled_function
diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index 6c2a8beddc..1ef2df56a2 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -72,13 +72,14 @@ def create_runtime_wrapper(
if not hasattr(compiled_fn, "_boxed_call"):
compiled_fn = make_boxed_func(compiled_fn)
- def runtime_wrapper(*args):
+ def runtime_wrapper(args: List[Any]):
num_tokens = len(runtime_metadata.tokens)
if config.unlift_effect_tokens:
assert num_tokens == 0
elif num_tokens > 0:
# Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
- args = ([None] * num_tokens, *args)
+ # NOTE: this keeps an extra reference to the old args until the end of this function
+ args = [[None] * num_tokens, *args]
if trace_joint:
args_ = list(args)
@@ -572,11 +573,8 @@ fw_metadata={str(fw_metadata)}
wrapped_flat_fn, deduped_flat_args, aot_config, fw_metadata=updated_fw_metadata
)
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
@wraps(compiled_fn)
- def wrapped_compiled_fn(args):
+ def wrapped_compiled_fn(args: List[Any]):
deduped_args = remove_dupe_args(args)
args.clear()
return compiled_fn(deduped_args)
@@ -742,9 +740,6 @@ fw_metadata={str(fw_metadata)}
fw_metadata=fw_metadata_updated,
)
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
@wraps(compiled_fn)
def wrapped_compiled_fn(args):
args_with_synthetic_bases, synthetic_base_info = merge_view_inputs(
diff --git a/torch/_functorch/_aot_autograd/utils.py b/torch/_functorch/_aot_autograd/utils.py
index 0e4989860b..97512f6836 100644
--- a/torch/_functorch/_aot_autograd/utils.py
+++ b/torch/_functorch/_aot_autograd/utils.py
@@ -7,7 +7,7 @@ import operator
import warnings
from contextlib import nullcontext
from functools import wraps
-from typing import Any, Callable, List, Optional, Tuple
+from typing import Any, Callable, List, Optional, Tuple, Union
import torch
import torch.utils._pytree as pytree
@@ -103,7 +103,9 @@ def make_boxed_compiler(compiler):
return f
-def call_func_at_runtime_with_args(f, args, steal_args=False, disable_amp=False):
+def call_func_at_runtime_with_args(
+ f, args: Union[Tuple[Any], List[Any]], steal_args=False, disable_amp=False
+):
if not steal_args:
args = list(args)
assert isinstance(args, list)
diff --git a/torch/_functorch/aot_autograd.py b/torch/_functorch/aot_autograd.py
index 3a06db7d1f..8a421573f0 100644
--- a/torch/_functorch/aot_autograd.py
+++ b/torch/_functorch/aot_autograd.py
@@ -648,9 +648,6 @@ or otherwise set torch._functorch.config.functionalize_rng_ops = False.""")
assert isinstance(compiled_fn, torch.fx.GraphModule)
return compiled_fn, fw_metadata
- if not hasattr(compiled_fn, "_boxed_call"):
- compiled_fn = make_boxed_func(compiled_fn)
-
return compiled_fn
@@ -925,7 +922,7 @@ def aot_module_simplified(
# the boxed calling convention, but aot_module_simplified somehow
# historically returned a function that was not the boxed calling
# convention. This should get fixed...
- def forward(*runtime_args):
+ def forward(*runtime_args: Tuple[Any]):
full_args = []
full_args.extend(params_flat)
full_args.extend(runtime_args) | 2.41.0 |
510afb8857e6565612862496a6478733fe7b8db | Wed, 10 Apr 2024 17:53:07 -0700 | [PATCH 0005/1000] [aot] refactor runtime_wrapper's epilogue args access (#123674) | I want runtime_wrapper args to be stealable by call_func_at_runtime_with_args, since the args may contain activations which we don't want to hold alive in this scope. The args to runtime_wrapper **should always be** from a list created within aot_autograd, so it **should always be** safe to steal them: https://github.com/pytorch/pytorch/blob/a4a49f77b8c45ea459263c2242ab391b3d0577f2/torch/_functorch/aot_autograd.py#L928-L932 There are some accesses after we execute the compiled_fn, but those index accesses are already inferred at compile time. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123674 Approved by: https://github.com/jansel, https://github.com/bdhirsh ghstack dependencies: #123630 | diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index 1ef2df56a2..3d11c01fe9 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -72,8 +72,29 @@ def create_runtime_wrapper(
if not hasattr(compiled_fn, "_boxed_call"):
compiled_fn = make_boxed_func(compiled_fn)
+ # Note [Inputs needed in runtime epilogue after list clearing]
+ # In Python functions, you can't free the input arguments of a function within the scope of that function. A workaround is to
+ # wrap the input arguments in a list, and clear the list from within the function.
+ # Here, this is implemented as `call_func_at_runtime_with_args(..., steal_args=True)`.
+ #
+ # This is needed for Compiled Autograd since some of the inputs (activations) should be freed early.
+ # However, we cannot blindly clear the entire list, because AOTAutograd may need access to some of the graph inputs
+ # **after** the compiled function has finished running. There are two main cases:
+ # (1) Input mutations: If there are an input mutations that we must run outside of the graph, we need access to the input.
+ # (2) Output aliasing: Outputs that aliases graph inputs generally must be regenerated outside of the `autograd.Function`,
+ # and doing so requires us accessing the corresponding input after the compiled artifact has run.
+ epilogue_args_idx = []
+ epilogue_args_idx.extend(runtime_metadata.mutated_inp_runtime_indices)
+ num_tokens = len(runtime_metadata.tokens)
+ for info in runtime_metadata.output_info:
+ if (
+ info.output_type == OutputType.alias_of_input
+ or info.output_type == OutputType.is_input
+ ):
+ assert isinstance(info.base_idx, int)
+ epilogue_args_idx.append(info.base_idx + num_tokens)
+
def runtime_wrapper(args: List[Any]):
- num_tokens = len(runtime_metadata.tokens)
if config.unlift_effect_tokens:
assert num_tokens == 0
elif num_tokens > 0:
@@ -81,6 +102,9 @@ def create_runtime_wrapper(
# NOTE: this keeps an extra reference to the old args until the end of this function
args = [[None] * num_tokens, *args]
+ # stash a ref to each input tensor we plan to use after the compiled function
+ orig_inputs = {i: args[i] for i in epilogue_args_idx}
+
if trace_joint:
args_ = list(args)
# See Note [Detaching inputs that never need gradients]
@@ -89,9 +113,7 @@ def create_runtime_wrapper(
args_[idx] = args_[idx].detach()
with torch.autograd._force_original_view_tracking(True):
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args_,
- disable_amp=disable_amp,
+ compiled_fn, args_, disable_amp=disable_amp, steal_args=True
)
else:
# When we have an inference graph, we run with torch.no_grad.
@@ -101,16 +123,13 @@ def create_runtime_wrapper(
if torch.is_grad_enabled():
with torch.no_grad():
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args,
- disable_amp=disable_amp,
+ compiled_fn, args, disable_amp=disable_amp, steal_args=True
)
else:
all_outs = call_func_at_runtime_with_args(
- compiled_fn,
- args,
- disable_amp=disable_amp,
+ compiled_fn, args, disable_amp=disable_amp, steal_args=True
)
+ del args
num_mutated_runtime_inps = runtime_metadata.num_mutated_inp_runtime_indices
num_intermediate_bases = runtime_metadata.num_intermediate_bases
@@ -144,7 +163,7 @@ def create_runtime_wrapper(
meta = runtime_metadata.input_info[inpt_idx]
if not meta.mutates_data and not meta.mutates_metadata:
continue
- original_inpt = args[inpt_idx]
+ original_inpt = orig_inputs[inpt_idx]
updated_inpt = updated_inputs[i]
if meta.mutates_storage_metadata:
# mutates_storage_metadata means our input saw a x.set_(y) call.
@@ -237,14 +256,14 @@ def create_runtime_wrapper(
o_grad = runtime_metadata.output_info[i].requires_grad
if info.output_type == OutputType.alias_of_input:
- aliased_base_tensor = args[info.base_idx + num_tokens] # type: ignore[index]
+ aliased_base_tensor = orig_inputs[info.base_idx + num_tokens] # type: ignore[index]
regenerated_out = gen_alias_from_base(
aliased_base_tensor, o_, o_grad
)
fw_outs_including_aliases.append(regenerated_out)
continue
elif info.output_type == OutputType.is_input:
- aliased_base_tensor = args[info.base_idx + num_tokens] # type: ignore[index]
+ aliased_base_tensor = orig_inputs[info.base_idx + num_tokens] # type: ignore[index]
regenerated_out = aliased_base_tensor
fw_outs_including_aliases.append(regenerated_out)
continue | 2.41.0 |
00282fecfcb53790aebfb24cc48a8703577778e | Wed, 10 Apr 2024 18:33:29 -0700 | [PATCH 0006/1000] [c10d] make monitorThread sleep when we try to dump (#123788) | Summary: We seperated the FR dump logic from the desync debug logic, so we no longer set collectiveDebugInfoMode_ to true when we just need FR dump. That's why monitor thread did not sleep and try to kill the process without waiting for the dump. The fix is simple, we should sleep whenever shouldDump_ is true Test Plan: Existing unit tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123788 Approved by: https://github.com/wconstab | diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index d9f9e6e574..def79cde2b 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1268,6 +1268,7 @@ void ProcessGroupNCCL::heartbeatMonitor() {
lastTimePollStore = currentTime;
if (globalStore_->check({std::string(EXCEPTION_DUMP)})) {
int timeOutRank = -1;
+ shouldDump_.store(true);
try {
auto vec = globalStore_->get(std::string(EXCEPTION_DUMP));
TORCH_CHECK_WITH(
@@ -1312,6 +1313,7 @@ void ProcessGroupNCCL::heartbeatMonitor() {
if (heartbeat != heartBeatCounter) {
heartBeatCounter = heartbeat;
} else {
+ shouldDump_.store(true);
// No heartbeat increase detected and timeout.
errorMsg = c10::str(
logPrefix(),
@@ -1388,7 +1390,8 @@ void ProcessGroupNCCL::heartbeatMonitor() {
// Case two: desync might be slow or get stuck. Or we get stuck in
// destructors, we will sleep for some time before calling std::abort() to
// kill the whole process.
- if ((terminateProcessGroup_.load() || collectiveDebugInfoMode_.load()) &&
+ if ((terminateProcessGroup_.load() || collectiveDebugInfoMode_.load() ||
+ shouldDump_.load()) &&
!terminateHeartbeatMonitorThread_.load()) {
// Leave another two mins for desync report generation or process group
// destroy. | 2.41.0 |
ac99d539be35e806d8d719fa69ceddaf63c6373 | Thu, 11 Apr 2024 08:56:02 +0000 | [PATCH 0007/1000] Only initialize state if needed in SGD (#123757) | Fixes [T184381726](https://www.internalfb.com/intern/tasks/?t=184381726) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123757 Approved by: https://github.com/janeyx99 | diff --git a/test/inductor/test_compiled_optimizers.py b/test/inductor/test_compiled_optimizers.py
index 7c93f326f4..d076f27b17 100644
--- a/test/inductor/test_compiled_optimizers.py
+++ b/test/inductor/test_compiled_optimizers.py
@@ -310,7 +310,8 @@ def make_recompile_test(optim_cls, closure=None, kernel_count=2, **kwargs):
# perturb state to force recompile
# Adagrad doesn't reinitialize state on each step
- if optim_cls is Adagrad:
+ # SGD has an empty state
+ if optim_cls in (Adagrad, SGD):
opt_compiled.param_groups[0]["lr"] = 0.02
elif optim_cls is Adam: # ensure we are guarding on the data_ptr of states
state_tensor = opt_compiled.state[
diff --git a/test/test_optim.py b/test/test_optim.py
index 49c4e86464..d11fe8d42f 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -1582,6 +1582,36 @@ class TestOptimRenewed(TestCase):
optim.step()
self.assertTrue(mocked_foreach_impl.called)
+ @optims(optim_db, dtypes=[torch.float32])
+ def test_non_empty_state(self, device, dtype, optim_info):
+ # There are internal tests that check that the state is not empty
+ optim_cls = optim_info.optim_cls
+ model = torch.nn.Linear(5, 5)
+ model.to(dtype=dtype, device=device)
+ inpt = torch.rand(2, 5, dtype=dtype, device=device)
+
+ for optim_input in optim_info.optim_inputs_func(device=device):
+ optim = optim_cls(model.parameters(), **optim_input.kwargs)
+ optim.zero_grad()
+ output = model(inpt)
+ loss = output.sum()
+ loss.backward()
+
+ if optim_info.only_supports_sparse_grads:
+ for param in model.parameters():
+ if param.grad is not None:
+ param.grad = param.grad.to_sparse()
+
+ if optim_info.step_requires_closure:
+ optim.step(lambda: 1.0)
+ else:
+ optim.step()
+
+ for state in optim.state.values():
+ self.assertGreater(len(state), 0)
+
+
+
instantiate_device_type_tests(TestOptimRenewed, globals(), allow_mps=True)
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
index ca9985dc9d..7002d98502 100644
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -52,8 +52,8 @@ class SGD(Optimizer):
if p.grad.is_sparse:
has_sparse_grad = True
- state = self.state[p]
if group["momentum"] != 0:
+ state = self.state[p]
momentum_buffer_list.append(state.get('momentum_buffer'))
return has_sparse_grad | 2.41.0 |
b7741546b1ee53e5aa3768616c50eab72372a3a | Thu, 11 Apr 2024 09:02:31 +0000 | [PATCH 0008/1000] Fixed arange decomp for float dtype (#121013) | ## Description: - [x] Fixed arange decomp for float dtype - [x] Added a test ## Current state Arange graph and C++ generated code are not optimal when arange is created directly using float32 dtype: ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s, dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on `main`: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:8 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f64[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float64); iota = None mul: "f64[10]" = torch.ops.aten.mul.Tensor(convert_element_type, 1); convert_element_type = None add: "f64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type_1: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:9 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type_1, 10); convert_element_type_1 = None return (add_1,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:15 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f64[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float64); iota = None mul: "f64[10]" = torch.ops.aten.mul.Tensor(convert_element_type, 1); convert_element_type = None add: "f64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type_1: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:16 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type_1, 10); convert_element_type_1 = None return (add_1,) ``` and C++ ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<double>(tmp0); // <---- useless ops auto tmp2 = static_cast<double>(1.0); // <---- auto tmp3 = decltype(tmp1)(tmp1 * tmp2); // <---- auto tmp4 = static_cast<double>(0.0); // <---- auto tmp5 = decltype(tmp3)(tmp3 + tmp4); // <---- auto tmp6 = c10::convert<float>(tmp5); auto tmp7 = static_cast<float>(10.0); auto tmp8 = decltype(tmp6)(tmp6 + tmp7); out_ptr0[static_cast<long>(x0)] = tmp8; } } } ``` However, if we manually create arange on i64 and then put to float32, generated graph and C++ code are more natural and benefit of a speed-up. ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s).to(dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on `main`: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:14 in func, code: a = torch.arange(s).to(dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float32); iota = None # File: check_arange_decomp.py:15 in func, code: return s + a add: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:21 in func, code: a = torch.arange(s).to(dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(iota, torch.float32); iota = None # File: check_arange_decomp.py:22 in func, code: return s + a add: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add,) ``` C++ on `main` ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = static_cast<float>(10.0); auto tmp3 = decltype(tmp1)(tmp1 + tmp2); out_ptr0[static_cast<long>(x0)] = tmp3; } } } ``` For example, the speed-up seen on upsample_nearest2d on cpu: ``` [----------------------------------------------------------------------------------------------------------------------------------------------- Interpolate, cpu ----------------------------------------------------------------------------------------------------------------------------------------------] | Eager (2.3.0a0+gitb4324ed) PR | Compiled (2.3.0a0+gitb4324ed) PR | Compiled (2.3.0a0+git0d1e705) Nightly | speed-up PR vs Nightly | Eager (2.3.0a0+git0d1e705) Nightly 1 threads: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Input (1, 3, 500, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 287.988 (+-10.399) | 200.034 (+-8.630) | 285.143 (+-8.412) | 1.425 (+-0.000) | 287.991 (+-11.302) Input (1, 3, 500, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 697.206 (+-27.033) | 171.650 (+-7.381) | 193.280 (+-5.840) | 1.126 (+-0.000) | 701.642 (+-26.461) Input (1, 3, 500, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 149.149 (+-6.045) | 222.780 (+-6.852) | 299.968 (+-12.354) | 1.346 (+-0.000) | 145.055 (+-7.232) Input (1, 3, 500, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 596.741 (+-27.970) | 205.923 (+-8.648) | 233.912 (+-7.742) | 1.136 (+-0.000) | 598.000 (+-25.630) Input (4, 3, 500, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 1095.734 (+-51.658) | 700.850 (+-24.852) | 1044.255 (+-38.216) | 1.490 (+-0.000) | 1097.977 (+-35.521) Input (4, 3, 500, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 2741.813 (+-122.917) | 583.073 (+-16.998) | 665.029 (+-36.331) | 1.141 (+-0.000) | 2722.388 (+-116.263) Input (4, 3, 500, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (256, 256) | 578.183 (+-37.266) | 833.295 (+-42.264) | 1131.341 (+-54.710) | 1.358 (+-0.000) | 584.953 (+-45.549) Input (4, 3, 500, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (256, 256) | 2332.508 (+-103.556) | 840.194 (+-47.664) | 935.625 (+-47.467) | 1.114 (+-0.000) | 2334.314 (+-91.644) Input (1, 3, 1200, 1300), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 272.631 (+-11.348) | 195.988 (+-5.748) | 274.021 (+-9.475) | 1.398 (+-0.000) | 272.752 (+-12.716) Input (1, 3, 1200, 1300), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 640.409 (+-25.465) | 164.773 (+-7.372) | 185.018 (+-8.349) | 1.123 (+-0.000) | 639.390 (+-30.761) Input (1, 3, 1200, 1300), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 158.602 (+-6.593) | 220.478 (+-6.809) | 286.376 (+-8.981) | 1.299 (+-0.000) | 158.557 (+-6.143) Input (1, 3, 1200, 1300), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 548.903 (+-22.889) | 202.788 (+-9.158) | 227.404 (+-8.995) | 1.121 (+-0.000) | 554.096 (+-21.330) Input (4, 3, 1200, 1300), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 1036.061 (+-35.285) | 680.728 (+-30.925) | 986.254 (+-42.732) | 1.449 (+-0.000) | 1038.718 (+-43.070) Input (4, 3, 1200, 1300), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 2504.520 (+-125.805) | 550.067 (+-21.383) | 628.000 (+-27.589) | 1.142 (+-0.000) | 2523.134 (+-113.336) Input (4, 3, 1200, 1300), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (200, 300) | 1058.188 (+-57.853) | 1216.427 (+-76.160) | 1380.231 (+-98.939) | 1.135 (+-0.000) | 1057.031 (+-66.075) Input (4, 3, 1200, 1300), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (200, 300) | 2305.911 (+-116.864) | 1080.189 (+-79.934) | 1141.561 (+-67.959) | 1.057 (+-0.000) | 2306.606 (+-121.544) Input (1, 3, 300, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 1689.489 (+-60.579) | 1077.401 (+-44.948) | 1634.264 (+-64.340) | 1.517 (+-0.000) | 1693.945 (+-67.998) Input (1, 3, 300, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 4198.368 (+-179.096) | 886.656 (+-30.355) | 1028.568 (+-46.310) | 1.160 (+-0.000) | 4174.351 (+-141.020) Input (1, 3, 300, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 716.572 (+-51.954) | 1175.864 (+-52.191) | 1674.373 (+-51.815) | 1.424 (+-0.000) | 715.724 (+-41.104) Input (1, 3, 300, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 3604.989 (+-132.489) | 1096.933 (+-54.290) | 1270.347 (+-60.932) | 1.158 (+-0.000) | 3601.864 (+-140.218) Input (4, 3, 300, 400), torch.uint8, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 6721.610 (+-355.997) | 4203.213 (+-134.362) | 6423.763 (+-225.311) | 1.528 (+-0.000) | 6715.626 (+-288.233) Input (4, 3, 300, 400), torch.uint8, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 16695.467 (+-709.620) | 3460.013 (+-149.456) | 4001.810 (+-218.093) | 1.157 (+-0.000) | 16621.138 (+-713.320) Input (4, 3, 300, 400), torch.float32, torch.contiguous_format | mode: nearest, align_corners: None, osize: (600, 700) | 3020.017 (+-147.314) | 4743.164 (+-135.850) | 6709.494 (+-281.025) | 1.415 (+-0.000) | 3015.602 (+-105.852) Input (4, 3, 300, 400), torch.float32, torch.channels_last | mode: nearest, align_corners: None, osize: (600, 700) | 14456.688 (+-752.839) | 5150.893 (+-201.571) | 5737.315 (+-138.011) | 1.114 (+-0.000) | 14464.472 (+-720.027) Times are in microseconds (us). ``` ## PR This PR improves arange decomp such that `arange(s, dtype=torch.float32)` removing extra dtype conversion to double: Code: ```python import torch def func(x): s = x.shape[-1] a = torch.arange(s, dtype=torch.float32) return s + a c_func = torch.compile(func) out = c_func(torch.rand(10)) ``` Graph on this PR: ``` ===== Forward graph 0 ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:15 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) mul: "i64[10]" = torch.ops.aten.mul.Tensor(iota, 1); iota = None add: "i64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:16 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add_1,) ===== AFTER POST GRAD ===== /pytorch/torch/fx/_lazy_graph_module.py class <lambda>(torch.nn.Module): def forward(self): # File: check_arange_decomp.py:16 in func, code: a = torch.arange(s, dtype=torch.float32) iota: "i64[10]" = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64, device = device(type='cpu'), requires_grad = False) mul: "i64[10]" = torch.ops.aten.mul.Tensor(iota, 1); iota = None add: "i64[10]" = torch.ops.aten.add.Tensor(mul, 0); mul = None convert_element_type: "f32[10]" = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None # File: check_arange_decomp.py:17 in func, code: return s + a add_1: "f32[10]" = torch.ops.aten.add.Tensor(convert_element_type, 10); convert_element_type = None return (add_1,) ``` and C++ on this PR: ```c++ extern "C" void kernel(float* out_ptr0) { { #pragma GCC ivdep for(long x0=static_cast<long>(0L); x0<static_cast<long>(10L); x0+=static_cast<long>(1L)) { auto tmp0 = c10::convert<long>(x0); auto tmp1 = c10::convert<float>(tmp0); auto tmp2 = static_cast<float>(10.0); auto tmp3 = decltype(tmp1)(tmp1 + tmp2); out_ptr0[static_cast<long>(x0)] = tmp3; } } } ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/121013 Approved by: https://github.com/peterbell10 | diff --git a/test/test_decomp.py b/test/test_decomp.py
index 4e482a92d5..39d0c2eef2 100644
--- a/test/test_decomp.py
+++ b/test/test_decomp.py
@@ -38,6 +38,7 @@ from torch._ops import DispatchKey
import itertools
import functools
from functools import partial
+import re
import unittest
aten = torch.ops.aten
@@ -630,6 +631,45 @@ class TestDecomp(TestCase):
res = torch._decomp.decompositions.native_batch_norm(input, weight, bias, mean, var, False, 1, 1e-05)
self.assertEqual(shape, res[0].shape)
+ def test_arange_graph(self, device):
+ from torch.fx.experimental.proxy_tensor import make_fx
+
+ def func(x, start):
+ le = x.shape[-1]
+ if start is None:
+ a = torch.arange(le, dtype=torch.float32, device=x.device)
+ else:
+ a = torch.arange(start, le, dtype=torch.float32, device=x.device)
+ return a
+
+ pattern = r", device = device\(.+\), requires_grad = False"
+
+ cfunc = make_fx(func, decomposition_table=decomposition_table)
+ fx_g = cfunc(torch.rand(10, device=device), None)
+ fx_g_code = fx_g.code.strip()
+ # Remove device and requires_grad
+ fx_g_code = re.sub(pattern, "", fx_g_code)
+ self.assertExpectedInline(fx_g_code, """\
+def forward(self, x_1, start_1):
+ iota = torch.ops.prims.iota.default(10, start = 0, step = 1, dtype = torch.int64)
+ mul = torch.ops.prims.mul.default(iota, 1); iota = None
+ add = torch.ops.prims.add.default(mul, 0); mul = None
+ convert_element_type = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None
+ return convert_element_type""")
+
+ fx_g = cfunc(torch.rand(10, device=device), 1)
+ fx_g_code = fx_g.code.strip()
+ # Remove device and requires_grad
+ fx_g_code = re.sub(pattern, "", fx_g_code)
+ self.assertExpectedInline(fx_g_code, """\
+def forward(self, x_1, start_1):
+ iota = torch.ops.prims.iota.default(9, start = 0, step = 1, dtype = torch.int64)
+ mul = torch.ops.prims.mul.default(iota, 1); iota = None
+ add = torch.ops.prims.add.default(mul, 1); mul = None
+ convert_element_type = torch.ops.prims.convert_element_type.default(add, torch.float32); add = None
+ return convert_element_type""")
+
+
class DecompCrossRefMode(TorchDispatchMode):
def __init__(self, test_case, saved_precision, saved_rel_tol, dtype, run_all):
self.test_case = test_case
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index 31dbe27c22..0fe4b7261a 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -1830,8 +1830,8 @@ class CppKernel(Kernel):
if cse_var == var:
if is_to_lowp_dtype(expr):
m = re.search(r"tmp\d+", expr)
- assert m
- fp32_cse_var_name = m.group()
+ if m is not None:
+ fp32_cse_var_name = m.group()
if fp32_cse_var_name:
for cse_var in cache.values():
if cse_var.name == fp32_cse_var_name:
diff --git a/torch/_inductor/fx_passes/joint_graph.py b/torch/_inductor/fx_passes/joint_graph.py
index 3be10498f8..df89037067 100644
--- a/torch/_inductor/fx_passes/joint_graph.py
+++ b/torch/_inductor/fx_passes/joint_graph.py
@@ -247,7 +247,7 @@ def constant_fold_uniform_value(gm: torch.fx.GraphModule):
):
torch._check(runtime_size == compile_time_size)
- # zeros, and ones just get traced into full, so we insert those
+ # zeros and ones just get traced into full, so we insert those
new_node = graph.call_function(
aten.full.default,
args=(node_replacements_shapes[node], value),
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index 8e1d50a00c..eeb4ad00d3 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -4932,9 +4932,10 @@ def arange(
lambda: f"step must be finite but got {step}",
)
+ args = (start, end, step)
+ integer_args = builtins.all(isinstance(arg, IntLike) for arg in args)
+
if dtype is None:
- args = (start, end, step)
- integer_args = builtins.all(isinstance(arg, IntLike) for arg in args)
dtype = torch.int64 if integer_args else torch.get_default_dtype()
is_integer = utils.is_integer_dtype(dtype)
@@ -4962,7 +4963,6 @@ def arange(
requires_grad=requires_grad,
)
- computation_dtype = utils.get_acc_type(dtype, device)
index = prims.iota(
length,
start=0,
@@ -4971,6 +4971,10 @@ def arange(
device=device,
requires_grad=False,
)
+
+ computation_dtype = (
+ torch.long if integer_args else utils.get_acc_type(dtype, device)
+ )
index = _maybe_convert_to_dtype(index, computation_dtype)
result = start + step * index
result = _maybe_convert_to_dtype(result, dtype) | 2.41.0 |
798f5bf0d58fb9655c4da9c0a8bc1ec8af31aea | Wed, 10 Apr 2024 23:23:28 -0700 | [PATCH 0009/1000] Add Quantization recipe filter per operator type for x86_inductor_quantizer (#122775) | **Summary** Default recipes are enabled in `X86InductorQuantizer` and request comes to customize recipes based on these defaults. - Avoid annotation propagation and restrict annotation only to annotate `conv`/`linear`. - Add `matmul` in the quantization recipes, noting that it's not a general recipe but tailored to meet accuracy criteria for specific models. To meet these requests, we made changes in this PR by introducing interface as `set_function_type_qconfig` and `set_module_type_qconfig` - `set_function_type_qconfig` accepts functional input as `torch.nn.functional.linear` or `torch.matmul`; `set_module_type_qconfig` accepts nn.Module input as `torch.nn.Conv2d`. - To disable the recipe for this operator, user can simply exclude it from the list of operations as `quantizer.set_function_type_qconfig(op, None)`. - To modify or extend the recipe for this operator with default recipe, user can customize as `quantizer.set_function_type_qconfig(op, config)`. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_conv2d_recipe python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_linear_recipe python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_filter_maxpool2d_recipe ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122775 Approved by: https://github.com/jgong5, https://github.com/jerryzh168 | diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index 06e2e6c9f9..c9df319bfd 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -1346,3 +1346,105 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_list,
is_qat=True,
)
+
+ @skipIfNoX86
+ def test_filter_conv2d_recipe(self):
+ """
+ Test removing conv2d from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.Conv2dUnaryModule(torch.nn.ReLU(inplace=False)).eval()
+ example_inputs = (torch.randn(2, 3, 16, 16),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_module_type_qconfig(torch.nn.Conv2d, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 0,
+ }
+ node_list = [
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.relu.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
+
+ @skipIfNoX86
+ def test_filter_linear_recipe(self):
+ """
+ Test removing linear from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.LinearUnaryModule(
+ use_bias=True,
+ postop=nn.ReLU,
+ ).eval()
+ example_inputs = (torch.randn(2, 4),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_function_type_qconfig(torch.nn.functional.linear, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 0,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 0,
+ }
+ node_list = [
+ torch.ops.aten.linear.default,
+ torch.ops.aten.relu.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
+
+ @skipIfNoX86
+ def test_filter_maxpool2d_recipe(self):
+ """
+ Test removing maxpool2d from default recipe of X86InductorQuantizer.
+ """
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.Conv2dUnaryModule(torch.nn.ReLU(inplace=False)).eval()
+ example_inputs = (torch.randn(2, 3, 16, 16),)
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+ quantizer.set_function_type_qconfig(torch.nn.functional.max_pool2d, None)
+ node_occurrence = {
+ # one for input and weight of the conv
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 1,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 1,
+ # note: quantize op for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 1,
+ }
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.relu.default,
+ torch.ops.aten.max_pool2d.default,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
index e83cf1e4da..8889cf2df0 100644
--- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
+++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
@@ -2,8 +2,9 @@ import copy
import functools
import itertools
import operator
+import warnings
from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
+from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple
import torch
import torch.nn.functional as F
@@ -57,10 +58,10 @@ class _X86InductorQuantizationAnnotation(QuantizationAnnotation):
_is_output_of_quantized_pattern: bool = False
-# Operations that:
-# 1. Operations are optimized to run with int8 when int8 input provided.
-# 2. Operations do not support int8 input and produce fp32 output.
-int8_in_int8_out_ops_pt2e: Set = {
+# Operators that:
+# 1. Operators are optimized to run with int8 when int8 input provided.
+# 2. Operators do not support int8 input and produce fp32 output.
+int8_in_int8_out_ops: Set = {
torch.ops.aten.max_pool2d.default,
torch.ops.aten.cat.default,
torch.ops.aten.avg_pool2d.default,
@@ -68,14 +69,53 @@ int8_in_int8_out_ops_pt2e: Set = {
torch.ops.aten.flatten.using_ints,
}
+# Operators that support the int8 data type for quantization config propagation.
+# A superset of int8_in_int8_out_ops incorporating additional operators.
+propagation_quantizable_ops = int8_in_int8_out_ops
-# Operations support the int8 data type and exclude operations such as conv and linear.
-# A superset of int8_in_int8_out_ops_pt2e incorporating additional operators.
-quantizable_ops_pt2e = copy.deepcopy(int8_in_int8_out_ops_pt2e)
+# Operators support the int8 data type
+# and recipe is configured by default in X86InductorQuantizer.
+default_quantizable_ops = propagation_quantizable_ops | {
+ torch.ops.aten.conv2d.default,
+ torch.ops.aten.linear.default,
+}
+
+# A superset of default_quantizable_ops includes operators support the int8 data type
+# but not enabled by default recipe of X86InductorQuantizer.
+quantizable_ops = default_quantizable_ops
QUANT_ANNOTATION_KEY = "quantization_annotation"
+def _map_module_function_to_aten_operator_type():
+ module_function_to_aten_operator: Dict[Callable, torch._ops.OpOverloadPacket] = {}
+ map_list = (
+ ([torch.nn.Conv2d, F.conv2d], torch.ops.aten.conv2d.default),
+ ([torch.nn.Linear, F.linear], torch.ops.aten.linear.default),
+ ([torch.nn.MaxPool2d, F.max_pool2d], torch.ops.aten.max_pool2d.default),
+ (
+ [
+ torch.cat,
+ ],
+ torch.ops.aten.cat.default,
+ ),
+ ([torch.nn.AvgPool2d, F.avg_pool2d], torch.ops.aten.avg_pool2d.default),
+ (
+ [torch.nn.AdaptiveAvgPool2d, F.adaptive_avg_pool2d],
+ torch.ops.aten.adaptive_avg_pool2d.default,
+ ),
+ (
+ [
+ torch.flatten,
+ ],
+ torch.ops.aten.flatten.using_ints,
+ ),
+ )
+ for map_item in map_list:
+ module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload]
+ return module_function_to_aten_operator
+
+
def _mark_nodes_as_annotated(nodes: List[Node]):
for node in nodes:
if node is not None:
@@ -235,11 +275,14 @@ def _get_supported_config_and_operators() -> List[OperatorConfig]:
class X86InductorQuantizer(Quantizer):
supported_config_and_operators = _get_supported_config_and_operators()
+ module_function_to_aten_operator_type = _map_module_function_to_aten_operator_type()
def __init__(self):
super().__init__()
self.global_config: QuantizationConfig = None # type: ignore[assignment]
- self.operator_type_config: Dict[str, Optional[QuantizationConfig]] = {}
+ self.operator_type_qconfig: Dict[
+ torch._ops.OpOverloadPacket, Optional[QuantizationConfig]
+ ] = {}
@classmethod
def get_supported_quantization_configs(cls) -> List[QuantizationConfig]:
@@ -267,12 +310,62 @@ class X86InductorQuantizer(Quantizer):
self.global_config = quantization_config
return self
- def set_config_for_operator_type(
- self, operator_type: str, quantization_config: QuantizationConfig
- ):
- self.operator_type_config[operator_type] = quantization_config
+ def set_function_type_qconfig(
+ self,
+ function_type: Callable,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if function_type in X86InductorQuantizer.module_function_to_aten_operator_type:
+ self._set_aten_operator_qconfig(
+ X86InductorQuantizer.module_function_to_aten_operator_type[
+ function_type
+ ],
+ quantization_config,
+ )
+ else:
+ warnings.warn(
+ f"function: Unable to customize quantization config for {function_type} by X86InductorQuantizer."
+ )
+ return self
+
+ def set_module_type_qconfig(
+ self,
+ module_type: torch.nn.Module,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if module_type in X86InductorQuantizer.module_function_to_aten_operator_type:
+ self._set_aten_operator_qconfig(
+ X86InductorQuantizer.module_function_to_aten_operator_type[module_type],
+ quantization_config,
+ )
+ else:
+ warnings.warn(
+ f"Module: Unable to customize quantization config for {module_type} by X86InductorQuantizer."
+ )
+ return self
+
+ def _set_aten_operator_qconfig(
+ self,
+ operator_type: torch._ops.OpOverloadPacket,
+ quantization_config: Optional[QuantizationConfig],
+ ) -> "X86InductorQuantizer":
+ if operator_type in quantizable_ops:
+ self.operator_type_qconfig[operator_type] = quantization_config
+ else:
+ warnings.warn(
+ f"operator: Unable to quantize {operator} by X86InductorQuantizer."
+ )
return self
+ def _get_aten_operator_qconfig(
+ self,
+ operator_type: torch._ops.OpOverloadPacket,
+ ) -> Optional[QuantizationConfig]:
+ if operator_type in self.operator_type_qconfig:
+ assert operator_type in quantizable_ops
+ return self.operator_type_qconfig[operator_type]
+ return self.global_config if operator_type in default_quantizable_ops else None
+
def _annotate_conv_node_helper(
self,
conv_node: torch.fx.Node,
@@ -403,36 +496,30 @@ class X86InductorQuantizer(Quantizer):
we need to annotate the output of this pattern.
"""
- config = self.global_config
-
# Step1: Recipe of fusion patterns like conv/linear.
- if config.is_qat:
- # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat
- self._annotate_qat_conv2d_fusion_pattern(model, config)
-
- self._annotate_conv2d_fusion_pattern(model, config)
+ self._annotate_conv2d_fusion_pattern(model)
+ self._annotate_linear_fusion_pattern(model)
# Step2: Recipe to propagate annotation for patterns beside conv/linear.
# Go through all the nodes from start to end.
# Recipe refer to https://github.com/intel/intel-extension-for-pytorch/blob/
# 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L538
for node in model.graph.nodes:
- self._annotation_propagation_quantizable_pattern(node, config)
+ self._annotate_propagation_quantizable_pattern(node)
# Step3: For quantizable ops, such as maxpool2d, we need to quantize its output if it is quantized
# in inputs. So, we can fuse dq-operator-q into a quantized op.
# Refer to https://github.com/intel/intel-extension-for-pytorch/blob/
# 90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_recipe.py#L487
for node in model.graph.nodes:
- self._annotate_output_for_int8_in_int8_out_pattern(node, config)
+ self._annotate_output_for_int8_in_int8_out_pattern(node)
return model
def _annotate_for_dynamic_quantization_config(
self, model: torch.fx.GraphModule
) -> torch.fx.GraphModule:
- config = self.global_config
- self._annotate_linear(model, config)
+ self._annotate_linear_fusion_pattern(model)
return model
def _annotate_qat_conv2d_fusion_pattern(
@@ -648,15 +735,22 @@ class X86InductorQuantizer(Quantizer):
nodes_to_mark_annotated.extend(list(bn_partition.nodes))
_mark_nodes_as_annotated(nodes_to_mark_annotated)
- def _annotate_conv2d_fusion_pattern(
- self, model: torch.fx.GraphModule, config: QuantizationConfig
- ):
- self._annotate_conv2d_binary_unary(model, config)
- self._annotate_conv2d_binary(model, config)
- self._annotate_conv2d_unary(model, config)
- self._annotate_conv2d(model, config)
- self._annotate_linear_unary(model, config)
- self._annotate_linear(model, config)
+ def _annotate_conv2d_fusion_pattern(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.conv2d.default):
+ if config.is_qat:
+ # Annotate QAT specific pattern: mainly due to BN not folded in prepare_qat
+ self._annotate_qat_conv2d_fusion_pattern(model, config)
+ self._annotate_conv2d_binary_unary(model, config)
+ self._annotate_conv2d_binary(model, config)
+ self._annotate_conv2d_unary(model, config)
+ self._annotate_conv2d(model, config)
+
+ def _annotate_linear_fusion_pattern(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.linear.default):
+ if config.input_activation and not config.input_activation.is_dynamic:
+ # <TODO> Weiwen: Dynamic Quant of linear unary will be supported in next step
+ self._annotate_linear_unary(model, config)
+ self._annotate_linear(model, config)
def _annotate_conv2d_binary_unary(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
@@ -851,14 +945,13 @@ class X86InductorQuantizer(Quantizer):
_is_output_of_quantized_pattern=True,
)
- def _annotation_propagation_quantizable_pattern(
- self, node: Node, quantization_config: QuantizationConfig
- ) -> None:
+ def _annotate_propagation_quantizable_pattern(self, node: Node) -> None:
# Propagate annotation to quantizable patterns.
if (
- (node.target in quantizable_ops_pt2e)
+ (node.target in propagation_quantizable_ops)
and (not _is_any_annotated([node]))
and (node.op == "call_function")
+ and (quantization_config := self._get_aten_operator_qconfig(node.target)) # type: ignore[arg-type]
):
def is_all_inputs_connected_to_quantized_op(input_nodes):
@@ -915,16 +1008,18 @@ class X86InductorQuantizer(Quantizer):
)
return
- def _annotate_output_for_int8_in_int8_out_pattern(
- self, node: Node, quantization_config: QuantizationConfig
- ) -> None:
+ def _annotate_output_for_int8_in_int8_out_pattern(self, node: Node) -> None:
r"""
- Check and insert observer at output of node in int8_in_int8_out_ops_pt2e if needed.
+ Check and insert observer at output of node in int8_in_int8_out_ops if needed.
Recipe refers to https://github.com/intel/intel-extension-for-pytorch/blob/
90d19323d96afc53fcc22ba5a7bb3fb07fdd6c1c/intel_extension_for_pytorch/quantization/_utils.py#L495
"""
edge_or_node: Tuple[Node, Node]
- if (node.target in int8_in_int8_out_ops_pt2e) and (_is_any_annotated([node])):
+ if (
+ (node.target in int8_in_int8_out_ops)
+ and (_is_any_annotated([node]))
+ and (quantization_config := self._get_aten_operator_qconfig(node.target)) # type: ignore[arg-type]
+ ):
if node.target == torch.ops.aten.max_pool2d.default:
maxpool_node = node
if not _is_all_annotated( | 2.41.0 |
8e9261b906f69b397e4027362be801f98a68d62 | Wed, 10 Apr 2024 23:23:28 -0700 | [PATCH 0010/1000] Add Matmul recipe into x86_inductor_quantizer (#122776) | **Summary** Add `matmul` in the quantization recipes, noting that it's not a general recipe but tailored to meet accuracy criteria for specific models. `matmul` recipe is disabled by default. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_attention_block ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122776 Approved by: https://github.com/jgong5, https://github.com/jerryzh168 ghstack dependencies: #122775 | diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index c9df319bfd..4af5a30ddf 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -289,21 +289,42 @@ class TestHelperModules:
return tmp + self.bn2(self.conv2(tmp))
class SelfAttnLikeModule(torch.nn.Module):
- def __init__(self, input_dim) -> None:
+ def __init__(
+ self,
+ input_dim,
+ transpose_for_score=False,
+ num_attention_heads=None,
+ attention_head_size=None,
+ ) -> None:
super().__init__()
self.input_dim = input_dim
self.q_proj = nn.Linear(input_dim, input_dim, bias=False)
self.k_proj = nn.Linear(input_dim, input_dim, bias=False)
self.v_proj = nn.Linear(input_dim, input_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
+ self.transpose_for_score = transpose_for_score
+ if self.transpose_for_score:
+ assert num_attention_heads is not None
+ assert attention_head_size is not None
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_size = attention_head_size
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
def forward(self, x):
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
- scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim ** 0.5)
+ if self.transpose_for_score:
+ q = self.transpose_for_scores(q)
+ k = self.transpose_for_scores(k)
+ v = self.transpose_for_scores(v)
+ scores = torch.matmul(q, k.transpose(-1, -2)) / (self.input_dim ** 0.5)
attention = self.softmax(scores)
- weighted = torch.bmm(attention, v)
+ weighted = torch.matmul(attention, v)
return weighted
class X86InductorQuantTestCase(QuantizationTestCase):
@@ -1448,3 +1469,68 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_occurrence,
node_list,
)
+
+ @skipIfNoX86
+ def test_attention_block(self):
+ """
+ Test pattern of Attention like Block with X86InductorQuantizer.
+ """
+ for annotate_matmul in [False, True]:
+ with override_quantized_engine("x86"), torch.no_grad():
+ m = TestHelperModules.SelfAttnLikeModule(
+ input_dim=64 * 16,
+ transpose_for_score=True,
+ num_attention_heads=16,
+ attention_head_size=64,
+ ).eval()
+ example_inputs = (torch.randn(2, 384, 1024),)
+
+ m(*example_inputs)
+
+ quantizer = X86InductorQuantizer().set_global(
+ xiq.get_default_x86_inductor_quantization_config()
+ )
+
+ if annotate_matmul:
+ quantizer.set_function_type_qconfig(torch.matmul, quantizer.get_global_quantization_config())
+
+ node_occurrence = {
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: 5 if annotate_matmul else 1,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: 7 if annotate_matmul else 3,
+ # quantize_per_channel for weights are const propagated
+ torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default: 3,
+ }
+ if annotate_matmul:
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
+ torch.ops.aten.linear.default,
+ torch.ops.aten.view.default,
+ torch.ops.aten.permute.default,
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.aten.matmul.default,
+ torch.ops.aten.div.Tensor,
+ torch.ops.aten.softmax.int,
+ ]
+ else:
+ node_list = [
+ torch.ops.quantized_decomposed.quantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default,
+ torch.ops.quantized_decomposed.dequantize_per_channel.default,
+ torch.ops.aten.linear.default,
+ torch.ops.aten.view.default,
+ torch.ops.aten.permute.default,
+ torch.ops.aten.matmul.default,
+ torch.ops.aten.div.Tensor,
+ torch.ops.aten.softmax.int,
+ ]
+ self._test_quantizer(
+ m,
+ example_inputs,
+ quantizer,
+ node_occurrence,
+ node_list,
+ )
diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
index 8889cf2df0..226d722357 100644
--- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
+++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
@@ -82,7 +82,9 @@ default_quantizable_ops = propagation_quantizable_ops | {
# A superset of default_quantizable_ops includes operators support the int8 data type
# but not enabled by default recipe of X86InductorQuantizer.
-quantizable_ops = default_quantizable_ops
+quantizable_ops = default_quantizable_ops | {
+ torch.ops.aten.matmul.default,
+}
QUANT_ANNOTATION_KEY = "quantization_annotation"
@@ -110,6 +112,12 @@ def _map_module_function_to_aten_operator_type():
],
torch.ops.aten.flatten.using_ints,
),
+ (
+ [
+ torch.matmul,
+ ],
+ torch.ops.aten.matmul.default,
+ ),
)
for map_item in map_list:
module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload]
@@ -310,6 +318,14 @@ class X86InductorQuantizer(Quantizer):
self.global_config = quantization_config
return self
+ def get_global_quantization_config(self):
+ if not isinstance(self.global_config, QuantizationConfig):
+ warnings.warn(
+ "The global_config for X86InductorQuantizer is currently invalid. \
+ Please ensure that you use set_global to establish the global quantization configuration."
+ )
+ return self.global_config
+
def set_function_type_qconfig(
self,
function_type: Callable,
@@ -499,6 +515,7 @@ class X86InductorQuantizer(Quantizer):
# Step1: Recipe of fusion patterns like conv/linear.
self._annotate_conv2d_fusion_pattern(model)
self._annotate_linear_fusion_pattern(model)
+ self._annotate_matmul(model)
# Step2: Recipe to propagate annotation for patterns beside conv/linear.
# Go through all the nodes from start to end.
@@ -752,6 +769,24 @@ class X86InductorQuantizer(Quantizer):
self._annotate_linear_unary(model, config)
self._annotate_linear(model, config)
+ def _annotate_matmul(self, model: torch.fx.GraphModule):
+ if config := self._get_aten_operator_qconfig(torch.ops.aten.matmul.default):
+ for node in model.graph.nodes:
+ if node.target == torch.ops.aten.matmul.default and not _is_annotated(
+ [node]
+ ):
+ input_qspec_map = {}
+ matmul_node = node
+ for input_node in matmul_node.args:
+ input_qspec_map[input_node] = get_input_act_qspec(config)
+ matmul_node.meta[
+ QUANT_ANNOTATION_KEY
+ ] = _X86InductorQuantizationAnnotation(
+ input_qspec_map=input_qspec_map,
+ _annotated=True,
+ _is_output_of_quantized_pattern=True,
+ )
+
def _annotate_conv2d_binary_unary(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
) -> None: | 2.41.0 |
4580f76d9e4a81b70a94062b762e3af919d95d0 | Wed, 10 Apr 2024 21:38:33 -0700 | [PATCH 0011/1000] fix flop counter issue with out parameters (#123768) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123768 Approved by: https://github.com/zou3519 | diff --git a/test/test_flop_counter.py b/test/test_flop_counter.py
index 74bc666db6..1a9a757f9f 100644
--- a/test/test_flop_counter.py
+++ b/test/test_flop_counter.py
@@ -248,8 +248,8 @@ class TestFlopCounter(TestCase):
self.assertExpectedInline(get_total_flops(mode), """5""")
- def count(*args, out):
- return out.numel()
+ def count(*args, out_val):
+ return out_val.numel()
count._get_raw = True
mode = FlopCounterMode(custom_mapping={torch.ops.aten.add: count})
@@ -328,6 +328,17 @@ class TestFlopCounter(TestCase):
self.assertExpectedInline(str(flops_fw_bw_math), """805306368""")
self.assertExpectedInline(str(flops_fw_bw_efficient), """939524096""")
+ def test_addmm_out(self):
+ def f(x):
+ y = torch.zeros(10, 10)
+ return torch.mm(x, x, out=y)
+
+ mode = FlopCounterMode()
+ with mode:
+ f(torch.randn(10, 10))
+
+ self.assertExpectedInline(get_total_flops(mode), """2000""")
+
def test_hook_registration(self):
model = torch.nn.Linear(100, 100)
x = torch.randn(3, 100)
diff --git a/torch/utils/flop_counter.py b/torch/utils/flop_counter.py
index c76a9a2432..fcad5d1fd3 100644
--- a/torch/utils/flop_counter.py
+++ b/torch/utils/flop_counter.py
@@ -24,8 +24,8 @@ flop_registry: Dict[Any, Any] = {}
def shape_wrapper(f):
@wraps(f)
- def nf(*args, out=None, **kwargs):
- args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out))
+ def nf(*args, out_val=None, **kwargs):
+ args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out_val))
return f(*args, out_shape=out_shape, **kwargs)
return nf
@@ -542,7 +542,7 @@ class FlopCounterMode(TorchDispatchMode):
func_packet = func._overloadpacket
if func_packet in self.flop_registry:
flop_count_func = self.flop_registry[func_packet]
- flop_count = flop_count_func(*args, **kwargs, out=out) # type: ignore[operator]
+ flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator]
if len(set(self.parents)) != len(self.parents):
print(
"The module hierarchy tracking seems to be messed up." | 2.41.0 |
a5e7a01b5368b8ba11edcb62942630a1474e6e3 | Wed, 10 Apr 2024 11:02:32 -0700 | [PATCH 0015/1000] [custom_op] Schema inference now includes default values (#123453) | If the function has default values, we should be able to do schema inference and put the default values into the schema. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123453 Approved by: https://github.com/albanD | diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 7479225785..10cb60e8ae 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -688,20 +688,6 @@ class TestCustomOp(CustomOpTestCaseBase):
infer_schema(foo)
- with self.assertRaisesRegex(ValueError, "default value"):
-
- def foo(x: Optional[Tensor] = None):
- raise NotImplementedError()
-
- infer_schema(foo)
-
- with self.assertRaisesRegex(ValueError, "default value"):
-
- def foo(x: Optional[Tensor] = None):
- raise NotImplementedError()
-
- infer_schema(foo)
-
with self.assertRaisesRegex(ValueError, "unsupported"):
def foo(x: Tensor) -> Tuple[Tensor, ...]:
@@ -2151,6 +2137,25 @@ class TestCustomOpAPI(TestCase):
self.assertEqual(z, x + y)
self.assertTrue(cpu_called)
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ def test_default_values(self):
+ defaults = []
+
+ @torch.library.custom_op("_torch_testing::f", mutates_args=())
+ def f(
+ x: Tensor,
+ a: Optional[int] = None,
+ b: float = 3.14,
+ c: bool = True,
+ d: int = 3,
+ ) -> Tensor:
+ defaults.extend([a, b, c, d])
+ return x.clone()
+
+ x = torch.randn(3)
+ f(x)
+ self.assertEqual(defaults, [None, 3.14, True, 3])
+
def test_mutated_error(self):
with self.assertRaisesRegex(
ValueError, r".*{'y'} in mutates_args were not found"
diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py
index b4ea032380..fefd7cedf9 100644
--- a/torch/_custom_op/impl.py
+++ b/torch/_custom_op/impl.py
@@ -801,19 +801,22 @@ def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str:
f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}."
)
- if param.default is not inspect.Parameter.empty:
- error_fn(
- f"Parameter {name} has a default value; this is not supported. "
- f"If you want to use default values then create a function with "
- f"default values that invokes the custom op."
- )
schema_type = SUPPORTED_PARAM_TYPES[param.annotation]
if name in mutates_args:
if not schema_type.startswith("Tensor"):
error_fn(f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated")
schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}"
seen_args.add(name)
- params.append(f"{schema_type} {name}")
+ if param.default is inspect.Parameter.empty:
+ params.append(f"{schema_type} {name}")
+ else:
+ if param.default is not None and not isinstance(param.default, (int, float, bool)):
+ error_fn(
+ f"Parameter {name} has an unsupported default value (we only support "
+ f"int, float, bool, None). Please file an issue on GitHub so we can "
+ f"prioritize this."
+ )
+ params.append(f"{schema_type} {name}={param.default}")
mutates_args_not_seen = set(mutates_args) - seen_args
if len(mutates_args_not_seen) > 0:
error_fn(f"{mutates_args_not_seen} in mutates_args were not found in " | 2.41.0 |
b4419dc4d9a4e5555de2a4def0eb77f10c8832a | Wed, 10 Apr 2024 11:02:32 -0700 | [PATCH 0016/1000] Refresh OpOverloadPacket if a new OpOverload gets added (#123578) | If a user accesses an OpOverloadPacket, then creates a new OpOverload, then uses the OpOverloadPacket, the new OpOverload never gets hit. This is because OpOverloadPacket caches OpOverloads when it is constructed. This PR fixes the problem by "refreshing" the OpOverloadPacket if a new OpOverload gets constructed and the OpOverloadPacket exists. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123578 Approved by: https://github.com/albanD ghstack dependencies: #123453 | diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 10cb60e8ae..86c21f228d 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -2393,6 +2393,30 @@ Please use `add.register_fake` to add an fake impl.""",
y = f(x)
self.assertEqual(y, x.sin())
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ def test_overloading(self):
+ called_f = 0
+ called_f1 = 0
+
+ @torch.library.custom_op("_torch_testing::f", mutates_args=())
+ def f(x: Tensor) -> Tensor:
+ nonlocal called_f
+ called_f += 1
+ return x.clone()
+
+ x = torch.randn(2, 3)
+ torch.ops._torch_testing.f(x)
+ self.assertEqual(called_f, 1)
+
+ @torch.library.custom_op("_torch_testing::f.overload", mutates_args=())
+ def f1(x: Tensor, y: Tensor) -> Tensor:
+ nonlocal called_f1
+ called_f1 += 1
+ return x.clone()
+
+ torch.ops._torch_testing.f(x, x)
+ self.assertEqual(called_f1, 1)
+
def test_disallows_output_aliasing(self):
@torch.library.custom_op("_torch_testing::f", mutates_args=())
def f(x: Tensor) -> Tensor:
diff --git a/torch/_ops.py b/torch/_ops.py
index 08abfecb5a..7b081e1360 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -931,8 +931,10 @@ class _OpNamespace(types.ModuleType):
# for overloads and raise an exception if there are more than one.
namespace_name = self.name
qualified_op_name = f"{namespace_name}::{op_name}"
+ op_module = self.__module__ + "." + namespace_name
+
try:
- op, overload_names = torch._C._jit_get_operation(qualified_op_name)
+ op, overload_names = _get_packet(qualified_op_name, op_module)
if op is None:
raise AttributeError(
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
@@ -944,10 +946,6 @@ class _OpNamespace(types.ModuleType):
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
) from e
- # let the script frontend know that op is identical to the builtin op
- # with qualified_op_name
- torch.jit._builtins._register_builtin(op, qualified_op_name)
- op.__module__ = self.__module__ + "." + namespace_name
opoverloadpacket = OpOverloadPacket(
qualified_op_name, op_name, op, overload_names
)
@@ -959,6 +957,22 @@ class _OpNamespace(types.ModuleType):
return opoverloadpacket
+def _get_packet(qualname, op_module):
+ op, overload_names = torch._C._jit_get_operation(qualname)
+ if op is not None:
+ op.__module__ = op_module
+ # let the script frontend know that op is identical to the builtin op
+ # with qualified_op_name
+ torch.jit._builtins._register_builtin(op, qualname)
+ return op, overload_names
+
+
+def _refresh_packet(packet):
+ op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
+ packet._op = op
+ packet._overload_names = overload_names
+
+
class _PyOpNamespace(_OpNamespace):
def __init__(self, name, ops):
super().__init__(name)
diff --git a/torch/library.py b/torch/library.py
index 88c72047ed..a7488c81d6 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -107,7 +107,18 @@ class Library:
if isinstance(tags, torch.Tag):
tags = (tags,)
result = self.m.define(schema, alias_analysis, tuple(tags))
- qualname = self.ns + "::" + schema.split("(")[0]
+ name = schema.split("(")[0]
+ qualname = self.ns + "::" + name
+
+ # If the OpOverloadPacket exists already, then this means we're adding a
+ # new OpOverload for it. Refresh the packet to include the new OpOverload.
+ packet_name = name.split(".")[0] if "." in name else name
+ if hasattr(torch.ops, self.ns):
+ ns = getattr(torch.ops, self.ns)
+ if hasattr(ns, packet_name):
+ packet = getattr(ns, packet_name)
+ torch._ops._refresh_packet(packet)
+
self._op_defs.add(qualname)
_defs.add(qualname)
return result | 2.41.0 |
38729c0cdf3ce4274f4d68f8e46e5a1cd36cbe8 | Wed, 10 Apr 2024 11:02:33 -0700 | [PATCH 0017/1000] Switch quantized_decomposed over to new custom ops API (#123454) | We are taking API feedback. Changes: - I removed some of the default values (they weren't being used). - I was unable to convert the last op (which is essentially an autograd.Function registered as CompositeImplicitAutograd). That one is "incorrectly registered"; I punt fixing it to the future. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/123454 Approved by: https://github.com/andrewor14 ghstack dependencies: #123453, #123578 | diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py
index fefd7cedf9..6f25e2b9af 100644
--- a/torch/_custom_op/impl.py
+++ b/torch/_custom_op/impl.py
@@ -882,6 +882,11 @@ SUPPORTED_RETURN_TYPES = {
def parse_return(annotation, error_fn):
+ if annotation == inspect.Signature.empty:
+ error_fn(
+ "There was no return annotation. Please add one."
+ )
+
if annotation is None:
return "()"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 18dd61c37c..67f7b3f509 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -4,11 +4,11 @@ from typing import Optional, Tuple
import torch
from torch._refs import _unsqueeze_multiple
from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
-from torch.library import impl, Library
+from torch.library import custom_op, Library, impl
# Note: decomposed means decomposed quantized tensor, using decomposed so that the
# name is not too long
-quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
+ns = "quantized_decomposed"
_DTYPE_TO_QVALUE_BOUNDS = {
torch.uint8: (0, 255),
@@ -31,11 +31,8 @@ def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
"quant_max out of bound for dtype, " \
f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
-quantized_decomposed_lib.define(
- "quantize_per_tensor(Tensor input, float scale, int zero_point, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor", mutates_args=())
def quantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -67,8 +64,8 @@ def quantize_per_tensor(
inv_scale = 1.0 / scale
return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor", "Meta")
-def quantize_per_tensor_meta(
+@quantize_per_tensor.register_fake
+def _(
input: torch.Tensor,
scale: float,
zero_point: int,
@@ -81,11 +78,7 @@ def quantize_per_tensor_meta(
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor.tensor", mutates_args=())
def quantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -103,7 +96,7 @@ def quantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
+@quantize_per_tensor_tensor.register_fake
def quantize_per_tensor_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -120,11 +113,7 @@ def quantize_per_tensor_tensor_meta(
return torch.empty_like(input, dtype=dtype)
# TODO: remove other variants and keep this one
-quantized_decomposed_lib.define(
- "quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
- "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_tensor.tensor2", mutates_args=())
def quantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -142,8 +131,8 @@ def quantize_per_tensor_tensor2(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
-@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta")
-def quantize_per_tensor_tensor2_meta(
+@quantize_per_tensor_tensor2.register_fake
+def _(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -157,11 +146,7 @@ def quantize_per_tensor_tensor2_meta(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-quantized_decomposed_lib.define(
- "dequantize_per_tensor(Tensor input, float scale, int zero_point, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor", mutates_args=())
def dequantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -209,7 +194,7 @@ def dequantize_per_tensor(
else:
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
-@impl(quantized_decomposed_lib, "dequantize_per_tensor", "Meta")
+@dequantize_per_tensor.register_fake
def dequantize_per_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -224,11 +209,7 @@ def dequantize_per_tensor_meta(
out_dtype = torch.float32
return torch.empty_like(input, dtype=out_dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor.tensor", mutates_args=())
def dequantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -248,8 +229,8 @@ def dequantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
-def dequantize_per_tensor_tensor_meta(
+@dequantize_per_tensor_tensor.register_fake
+def dequantize_per_tensor_tensor_fake(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -270,11 +251,7 @@ def dequantize_per_tensor_tensor_meta(
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
# TODO: remove other variants and keep this one
-quantized_decomposed_lib.define(
- "dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
- "Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_tensor.tensor2", mutates_args=())
def dequantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -295,8 +272,8 @@ def dequantize_per_tensor_tensor2(
return dequantize_per_tensor(
input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta")
-def dequantize_per_tensor_tensor2_meta(
+@dequantize_per_tensor_tensor2.register_fake
+def _(
input,
scale,
zero_point,
@@ -306,13 +283,9 @@ def dequantize_per_tensor_tensor2_meta(
*,
out_dtype: Optional[torch.dtype] = None
) -> torch.Tensor:
- return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
-
-quantized_decomposed_lib.define(
- "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
- "float eps, ScalarType dtype) -> (Tensor, Tensor)")
+ return dequantize_per_tensor_tensor_fake(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
-@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::choose_qparams.tensor", mutates_args=())
def choose_qparams_tensor(
input: torch.Tensor,
qmin: int,
@@ -347,11 +320,7 @@ def choose_qparams_tensor(
return determine_qparams(
min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)
-quantized_decomposed_lib.define(
- "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
- "float eps, ScalarType dtype) -> (Tensor, Tensor)")
-
-@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::choose_qparams_symmetric.tensor", mutates_args=())
def choose_qparams_symmetric_tensor(
input: torch.Tensor,
qmin: int,
@@ -393,8 +362,8 @@ def choose_qparams_symmetric_tensor(
qscheme=torch.per_tensor_symmetric
)
-@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
-def choose_qparams_tensor_meta(
+@choose_qparams_tensor.register_fake
+def _(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -410,8 +379,8 @@ def choose_qparams_tensor_meta(
{quant_min} max: {quant_max}"
return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
-@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
-def choose_qparams_symmetric_tensor_meta(
+@choose_qparams_symmetric_tensor.register_fake
+def _(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -428,11 +397,7 @@ def _permute_to_axis_zero(x, axis):
y = x.permute(tuple(new_axis_list))
return y, new_axis_list
-quantized_decomposed_lib.define(
- "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-
-@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_channel", mutates_args=())
def quantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -477,7 +442,7 @@ def quantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out.to(dtype)
-@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
+@quantize_per_channel.register_fake
def quantize_per_channel_meta(
input: torch.Tensor,
scales: torch.Tensor,
@@ -498,11 +463,7 @@ def quantize_per_channel_meta(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-quantized_decomposed_lib.define(
- "dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, "
- "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
-
-@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_channel", mutates_args=())
def dequantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -560,8 +521,8 @@ def dequantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out
-@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
-def dequantize_per_channel_meta(
+@dequantize_per_channel.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: Optional[torch.Tensor],
@@ -580,16 +541,7 @@ def dequantize_per_channel_meta(
return torch.empty_like(input, dtype=out_dtype)
-quantized_decomposed_lib.define(
- "choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::choose_qparams_per_token", mutates_args=())
def choose_qparams_per_token(
input: torch.Tensor,
dtype: torch.dtype,
@@ -623,12 +575,8 @@ def choose_qparams_per_token(
return scales, zero_points
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token",
- "Meta",
-)
-def choose_qparams_per_token_meta(
+@choose_qparams_per_token.register_fake
+def _(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -639,16 +587,7 @@ def choose_qparams_per_token_meta(
# TODO: move this to https://github.com/pytorch/pytorch/blob/main/torch/ao/quantization/fx/_decomposed.py
-quantized_decomposed_lib.define(
- "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::choose_qparams_per_token_asymmetric", mutates_args=())
def choose_qparams_per_token_asymmetric(
input: torch.Tensor,
dtype: torch.dtype,
@@ -691,12 +630,8 @@ def choose_qparams_per_token_asymmetric(
return scale.to(torch.float32), zero_point.to(torch.float32)
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
- "Meta",
-)
-def choose_qparams_per_token_asymmetric_meta(
+@choose_qparams_per_token_asymmetric.register_fake
+def _(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -716,13 +651,7 @@ def _per_token_quant_qparam_dim_check(input, scales, zero_points):
), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}"
-quantized_decomposed_lib.define(
- "quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
- "int quant_min, int quant_max, ScalarType dtype) -> Tensor"
-)
-
-
-@impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::quantize_per_token", mutates_args=())
def quantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -730,7 +659,7 @@ def quantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
-):
+) -> torch.Tensor:
"""Per token quantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -757,8 +686,8 @@ def quantize_per_token(
return input
-@impl(quantized_decomposed_lib, "quantize_per_token", "Meta")
-def quantize_per_token_meta(
+@quantize_per_token.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -770,13 +699,7 @@ def quantize_per_token_meta(
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
- "int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
-)
-
-
-@impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd")
+@custom_op(f"{ns}::dequantize_per_token", mutates_args=())
def dequantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -784,8 +707,8 @@ def dequantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- output_dtype: torch.dtype = torch.float32,
-):
+ output_dtype: torch.dtype,
+) -> torch.Tensor:
"""Per token dequantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -809,8 +732,8 @@ def dequantize_per_token(
return input
-@impl(quantized_decomposed_lib, "dequantize_per_token", "Meta")
-def dequantize_per_token_meta(
+@dequantize_per_token.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -824,16 +747,7 @@ def dequantize_per_token_meta(
return torch.empty_like(input, dtype=output_dtype)
-quantized_decomposed_lib.define(
- "quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, "
- "int quant_max, ScalarType dtype, int group_size) -> Tensor"
-)
-
-
-# TODO: dtype is ignored for now
-@impl(
- quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd"
-)
+@custom_op(f"{ns}::quantize_per_channel_group", mutates_args=())
def quantize_per_channel_group(
input: torch.Tensor,
scales: torch.Tensor,
@@ -841,8 +755,8 @@ def quantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size=128,
-):
+ group_size: int,
+) -> torch.Tensor:
assert group_size > 1
# needed for GPTQ single column quantize
if group_size > input.shape[-1] and scales.shape[-1] == 1:
@@ -870,16 +784,16 @@ def quantize_per_channel_group(
return input_int8
-@impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta")
-def quantize_per_channel_group_meta(
+@quantize_per_channel_group.register_fake
+def _(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size=128,
-):
+ group_size,
+) -> torch.Tensor:
"""Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -908,17 +822,7 @@ def quantize_per_channel_group_meta(
return torch.empty_like(input, dtype=dtype)
-quantized_decomposed_lib.define(
- "dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, "
- "int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
-)
-
-
-@impl(
- quantized_decomposed_lib,
- "dequantize_per_channel_group",
- "CompositeExplicitAutograd",
-)
+@custom_op(f"{ns}::dequantize_per_channel_group", mutates_args=())
def dequantize_per_channel_group(
w_int8: torch.Tensor,
scales: torch.Tensor,
@@ -926,9 +830,9 @@ def dequantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size: int = 128,
- output_dtype: torch.dtype = torch.float32,
-):
+ group_size: int,
+ output_dtype: torch.dtype,
+) -> torch.Tensor:
"""Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -965,6 +869,10 @@ def dequantize_per_channel_group(
return w_dq
+quantized_decomposed_lib = Library(ns, "DEF")
+
+# TODO: Migrate this to the new torch.library.custom_ops API. This requires a refactor
+# of the autograd.Function. We leave this work to the future.
quantized_decomposed_lib.define(
"fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
"int quant_min, int quant_max) -> Tensor") | 2.41.0 |
34e56fa3352aefa208b33b0a86aaabed8033f7a | Wed, 10 Apr 2024 15:10:59 -0700 | [PATCH 0018/1000] inductor: log unique id to match output_code to aot graphs (#118647) | I found it helpful to be able to see, given some inductor output code, which AOT graph it came from. When you have large models with multiple graphs floating around this can be difficult, so I added the aot_config.aot_id to the printed inductor output. Pull Request resolved: https://github.com/pytorch/pytorch/pull/118647 Approved by: https://github.com/ezyang | diff --git a/torch/_functorch/_aot_autograd/logging_utils.py b/torch/_functorch/_aot_autograd/logging_utils.py
index 28f82555ac..414166cbdd 100644
--- a/torch/_functorch/_aot_autograd/logging_utils.py
+++ b/torch/_functorch/_aot_autograd/logging_utils.py
@@ -46,12 +46,22 @@ def track_graph_compiling(aot_config, graph_name):
global graph_being_compiled
# TODO: Don't shove the aot_id in here; set it in the context
graph_being_compiled = [f"{aot_config.aot_id}_{graph_name}"]
+ old_name = None
+ if tracing_context := torch._guards.TracingContext.try_get():
+ old_name = tracing_context.aot_graph_name
+ tracing_context.aot_graph_name = graph_being_compiled
+ has_tracing_context = True
+ else:
+ has_tracing_context = False
try:
yield
finally:
global nth_graph
nth_graph += 1
graph_being_compiled = []
+ if has_tracing_context:
+ if tracing_context := torch._guards.TracingContext.try_get():
+ tracing_context.aot_graph_name = old_name
# Set up hooks so that during backward the fx's stack_trace is properly set
diff --git a/torch/_guards.py b/torch/_guards.py
index 09ed4a85b3..5f4c6d9941 100644
--- a/torch/_guards.py
+++ b/torch/_guards.py
@@ -615,6 +615,8 @@ class TracingContext:
self.loc_in_frame = None
# this is only set after aot_autograd
self.fw_metadata = None
+ # this is only set after aot_autograd
+ self.aot_graph_name = None
self.params_flat = None
# this is for extended return calling convention from backend
# compiler to aot_autograd
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 1098112680..3ffab72a19 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -473,8 +473,13 @@ class WrapperCodeGen(CodeGen):
self.header.writeline(f"{name} = None # {hashed}")
def write_header(self) -> None:
+ context = torch._guards.TracingContext.try_get()
+ aot_config_comment = ""
+ if context is not None and context.aot_graph_name is not None:
+ aot_config_comment = f"# AOT ID: {context.aot_graph_name}"
self.header.splice(
f"""
+ {aot_config_comment}
from ctypes import c_void_p, c_long
import torch
import math | 2.41.0 |
83900887f2fb5c7a04e7fd78ad8de7a20f356d4 | Wed, 10 Apr 2024 14:19:07 -0700 | [PATCH 0019/1000] [quant] Enable backward for choose_qparams_per_token_asymmetric (#123452) | Summary: When running the backward for this op, we get the error: ``` RuntimeError: derivative for aten::aminmax is not implemented ``` This commit replaces this call with separate amin and amax calls instead, which do have implemented derivatives. Test Plan: python test/test_quantization.py -k test_decomposed_choose_qparams_per_token_asymmetric_backward Reviewers: jerryzh168, digantdesai Subscribers: jerryzh168, digantdesai, supriyar Differential Revision: [D55805170](https://our.internmc.facebook.com/intern/diff/D55805170) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123452 Approved by: https://github.com/digantdesai, https://github.com/jerryzh168 | diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py
index b2bd97bdc3..228f1f8ee7 100644
--- a/test/quantization/core/test_quantized_tensor.py
+++ b/test/quantization/core/test_quantized_tensor.py
@@ -1602,6 +1602,14 @@ class TestQuantizedTensor(TestCase):
self.assertEqual(quantized_X.int_repr(), quantized_decomposed_X)
self.assertEqual(dequantized_X, dequantized_decomposed_X)
+ def test_decomposed_choose_qparams_per_token_asymmetric_backward(self):
+ # register the ops
+ import torch.ao.quantization.fx._decomposed
+ x = torch.randn(2, 3).requires_grad_()
+ (s, zp) = torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric(x, torch.int8)
+ out = x.div(s).add(zp).round()
+ out.sum().backward()
+
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 67f7b3f509..94fdd8a6f5 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -606,7 +606,8 @@ def choose_qparams_per_token_asymmetric(
"""
# Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
qmin, qmax = -128, 127
- min_val, max_val = torch.aminmax(input, dim=-1, keepdim=True)
+ min_val = torch.amin(input, dim=-1, keepdim=True)
+ max_val = torch.amax(input, dim=-1, keepdim=True)
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
eps = torch.finfo(torch.float32).eps # use xnnpack eps? | 2.41.0 |
fa36ef09210b67022439b49eee01d7b63bd6d96 | Wed, 10 Apr 2024 19:31:01 -0400 | [PATCH 0020/1000] Natively support int truncation, don't guard on positive/negative (#122827) | This doesn't entirely fix the original problem that prompted this, but it seems to just be getting stuck in export constraint formatting now which seems like progress to me. Signed-off-by: Edward Z. Yang <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/122827 Approved by: https://github.com/avikchaudhuri | diff --git a/test/export/test_export.py b/test/export/test_export.py
index 80a6f0b993..d04ab18384 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -1086,6 +1086,36 @@ class TestExport(TestCase):
inps = (torch.ones(6, 4), torch.tensor(5), torch.tensor(4))
self._test_export_same_as_eager(list_tensor_map, inps)
+ @unittest.expectedFailure
+ def test_crop_like(self):
+ # https://fb.workplace.com/groups/1405155842844877/posts/8195050017188725/
+
+ # Minimal crop code copied from https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/functional
+ class CropLike(torch.nn.Module):
+ def forward(self, image, crop_height, crop_width):
+ c, image_height, image_width = image.shape
+ crop_top = int(round((image_height - crop_height) / 2.0))
+ crop_left = int(round((image_width - crop_width) / 2.0))
+ return image[
+ ...,
+ crop_top : crop_top + crop_height,
+ crop_left : crop_left + crop_width,
+ ]
+
+ crop = CropLike()
+ imagew = Dim("width")
+ imageh = Dim("height")
+ dynamic_dims = {
+ "image": {0: None, 1: imageh, 2: imagew},
+ "crop_height": None,
+ "crop_width": None,
+ }
+ args = (torch.rand(3, 512, 512), 150, 150)
+ ecrop = export(crop, args=args, dynamic_shapes=dynamic_dims)
+
+ args = (torch.rand(3, 700, 700), 150, 150)
+ self.assertEqual(ecrop.module()(*args), ecrop(*args))
+
def test_export_func_with_kwargs(self):
class Module(torch.nn.Module):
def forward(self, arg1, arg2, kw1, kw2):
diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py
index 674839be4c..752638104d 100644
--- a/test/test_dynamic_shapes.py
+++ b/test/test_dynamic_shapes.py
@@ -404,13 +404,13 @@ class TestPySymInt(TestCase):
r = sym_int(a1 / 2)
self.assertEqual(guard_int(r), 3)
self.assertIsInstance(r, torch.SymInt, msg=type(r))
- self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(floor(s1/2), 3)""")
+ self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(Trunc(s1/2), 3)""")
a3 = create_symint(shape_env, 3)
r = sym_int(2.0 * torch.sym_float(a3))
self.assertEqual(guard_int(r), 6)
self.assertIsInstance(r, torch.SymInt, msg=type(r))
- self.assertExpectedInline(str(shape_env.guards[2][0]), """Eq(2*s2, 6)""")
+ self.assertExpectedInline(str(shape_env.guards[2][0]), """Eq(Trunc(2.0*s2), 6)""")
def test_sym_sqrt(self):
shape_env = ShapeEnv()
@@ -432,6 +432,18 @@ class TestPySymInt(TestCase):
self.assertIsInstance(r, torch.SymInt, msg=type(r))
self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(3*s0, 15)""")
+ def test_sym_trunc(self):
+ shape_env = ShapeEnv()
+ a0 = create_symint(shape_env, 5)
+ r = math.trunc(a0 / 2)
+ self.assertEqual(r, 2)
+ self.assertIsInstance(r, torch.SymInt, msg=type(r))
+ self.assertExpectedInline(str(shape_env.guards[0][0]), """Eq(Trunc(s0/2), 2)""")
+ r = torch.sym_int(torch.sym_sqrt(a0))
+ self.assertEqual(r, 2)
+ self.assertIsInstance(r, torch.SymInt, msg=type(r))
+ self.assertExpectedInline(str(shape_env.guards[1][0]), """Eq(Trunc(OpaqueUnaryFn_sqrt(s0)), 2)""")
+
def test_sym_ceil(self):
shape_env = ShapeEnv()
a0 = create_symint(shape_env, 5)
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index 67445dae25..74eac04572 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1898,7 +1898,6 @@ symbolic_tensor_failures = {
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition
- xfail('nn.functional.fractional_max_pool2d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('nn.functional.fractional_max_pool3d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('resize_as_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
diff --git a/torch/__init__.py b/torch/__init__.py
index ec5234bd69..3a10130d5f 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -339,6 +339,9 @@ class SymFloat:
def __ge__(self, other) -> builtins.bool:
raise AssertionError("type stub not overridden")
+ def __trunc__(self):
+ raise AssertionError("type stub not overridden")
+
def __sym_max__(self, other):
raise AssertionError("type stub not overridden")
@@ -465,7 +468,7 @@ def sym_int(a):
if isinstance(a, SymInt):
return a
elif isinstance(a, SymFloat):
- return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
+ return math.trunc(a)
return py_int(a) # type: ignore[operator]
def sym_max(a, b):
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index e34d3311d1..4caa4dc367 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -449,6 +449,10 @@ class PythonPrinter(ExprPrinter):
assert len(expr.args) == 1
return f"math.floor({self._print(expr.args[0])})"
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ return f"math.trunc({self._print(expr.args[0])})"
+
def _print_ceiling(self, expr):
assert len(expr.args) == 1
return f"math.ceil({self._print(expr.args[0])})"
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index 0fe4b7261a..26a68ea837 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -562,6 +562,11 @@ class CppPrinter(ExprPrinter):
r = f"std::floor({self._print(expr.args[0])})"
return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ r = f"std::trunc({self._print(expr.args[0])})"
+ return f"static_cast<{INDEX_TYPE}>({r})" if expr.is_integer else r
+
def _print_Pow(self, expr):
# Uses float constants to perform FP div
base, exp = expr.args
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 508382f4f3..0133c3585d 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -305,6 +305,12 @@ class TritonPrinter(PythonPrinter):
f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
+ def _print_Trunc(self, expr):
+ assert len(expr.args) == 1
+ return (
+ f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
+ )
+
def _print_ceiling(self, expr):
assert len(expr.args) == 1
return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
diff --git a/torch/fx/experimental/sym_node.py b/torch/fx/experimental/sym_node.py
index 783196d39c..8ec9b816be 100644
--- a/torch/fx/experimental/sym_node.py
+++ b/torch/fx/experimental/sym_node.py
@@ -233,6 +233,9 @@ class SymNode:
def round(self, ndigits=None) -> "SymNode":
return self._round(ndigits) # type: ignore[attr-defined]
+ def trunc(self) -> "SymNode":
+ return self._trunc() # type: ignore[attr-defined]
+
def add(self, other) -> "SymNode":
return self._add(other) # type: ignore[attr-defined]
@@ -454,6 +457,7 @@ METHOD_TO_OPERATOR = {
"ceil": math.ceil,
"eq": operator.eq,
"floor": math.floor,
+ "trunc": math.trunc,
"floordiv": operator.floordiv,
"ge": operator.ge,
"gt": operator.gt,
@@ -486,6 +490,7 @@ unary_magic_methods = {
"neg",
"sym_not",
"pos",
+ "trunc",
}
@@ -548,7 +553,7 @@ for name in math_op_names:
always_float_magic_methods.add(sym_name)
-always_int_magic_methods = {"ceil", "floor"}
+always_int_magic_methods = {"ceil", "floor", "trunc"}
always_bool_magic_methods = {
"eq",
"ne",
@@ -653,6 +658,12 @@ def _sympy_floor(a):
return _floor_ceil_helper(a, sympy.floor)
+def _sympy_trunc(a):
+ from torch.utils._sympy.functions import Trunc
+
+ return Trunc(a)
+
+
def _sympy_ceil(a):
import sympy
@@ -774,6 +785,7 @@ magic_methods = {
"le": _sympy_le,
"ge": _sympy_ge,
"floor": _sympy_floor,
+ "trunc": _sympy_trunc,
"sym_float": _sympy_sym_float,
"ceil": _sympy_ceil,
"neg": operator.neg,
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 06356f70cd..b634f6e313 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -1717,7 +1717,7 @@ class DimConstraints:
elif left.isdigit():
relation_with_digit(right, flip(op), int(left))
else:
- assert op == "=="
+ assert op == "==", t
results[left]["eq"] = sympy.sympify(right)
buf = ""
diff --git a/torch/utils/_sympy/functions.py b/torch/utils/_sympy/functions.py
index 48ad414512..427333b07c 100644
--- a/torch/utils/_sympy/functions.py
+++ b/torch/utils/_sympy/functions.py
@@ -328,6 +328,17 @@ class IsNonOverlappingAndDenseIndicator(sympy.Function):
return None
+class Trunc(sympy.Function):
+ is_integer = True
+
+ @classmethod
+ def eval(cls, number):
+ if number.is_integer:
+ return number
+ elif isinstance(number, sympy.Number):
+ return sympy.Integer(math.trunc(float(number)))
+
+
class Round(sympy.Function):
is_integer = True
diff --git a/torch/utils/_sympy/interp.py b/torch/utils/_sympy/interp.py
index 8f67f891f9..806e91cfe2 100644
--- a/torch/utils/_sympy/interp.py
+++ b/torch/utils/_sympy/interp.py
@@ -24,6 +24,7 @@ from .functions import (
Round,
RoundDecimal,
TrueDiv,
+ Trunc,
Where,
)
@@ -51,6 +52,7 @@ def handlers():
TrueDiv: "truediv",
FloorDiv: "floordiv",
CleanDiv: "div",
+ Trunc: "trunc",
Where: "where",
sympy.Add: "add",
sympy.Mul: "mul",
diff --git a/torch/utils/_sympy/value_ranges.py b/torch/utils/_sympy/value_ranges.py
index 7d03dfd6ee..a056db6dbb 100644
--- a/torch/utils/_sympy/value_ranges.py
+++ b/torch/utils/_sympy/value_ranges.py
@@ -745,6 +745,13 @@ class SymPyValueRangeAnalysis:
def atan(x):
return ValueRanges.increasing_map(x, OpaqueUnaryFn_atan)
+ @staticmethod
+ def trunc(x):
+ def trunc(x):
+ return sympy.Integer(x) if x.is_finite else x
+
+ return ValueRanges.increasing_map(x, trunc)
+
class ValueRangeAnalysis(SymPyValueRangeAnalysis):
def __init__(self):
@@ -829,10 +836,7 @@ class ValueRangeAnalysis(SymPyValueRangeAnalysis):
if x == ValueRanges.unknown():
return x
- def trunc(x):
- return sympy.Integer(x) if x.is_finite else x
-
- return ValueRanges.increasing_map(x, trunc)
+ return cls.trunc(x)
@classmethod
def sub(cls, a, b): | 2.41.0 |
02374cc091e549c586b72c9b252d33256ec921e | Thu, 11 Apr 2024 17:34:47 +0000 | [PATCH 0023/1000] [CI] show doc coverage repro instructions (#123688)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit | remind devs they can reproduce the doc coverage error locally with following msg ```You can reproduce locally by running 'cd pytorch/docs && make coverage && cat build/coverage/python.txt'``` I spent 20min to figure out how to test locally so want to enrich the error msg <img width="542" alt="Screenshot 2024-04-09 at 5 22 45 PM" src="https://github.com/pytorch/pytorch/assets/134637289/2c619d9d-74b5-4bda-8903-999ef5c255c2"> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123688 Approved by: https://github.com/clee2000 | diff --git a/.ci/pytorch/python_doc_push_script.sh b/.ci/pytorch/python_doc_push_script.sh
index ce14ac1d02..d4076d3469 100755
--- a/.ci/pytorch/python_doc_push_script.sh
+++ b/.ci/pytorch/python_doc_push_script.sh
@@ -105,6 +105,7 @@ if [ "$is_main_doc" = true ]; then
echo undocumented objects found:
cat build/coverage/python.txt
echo "Make sure you've updated relevant .rsts in docs/source!"
+ echo "You can reproduce locally by running 'cd docs && make coverage && cat build/coverage/python.txt'"
exit 1
fi
else | 2.41.0 |
9c565b24e6c305c09c8c908e27f4023f41dd567 | Wed, 10 Apr 2024 18:54:51 -0700 | [PATCH 0024/1000] [inductor] Write generated files from parent process (#123409) | Before this PR we would pass generated source code over a pipe to the compile worker then the compile worker would write out the file. Doing it this way is faster and results in smaller messages to the workers (and lets us skip creating the workers in the warm start case). Pull Request resolved: https://github.com/pytorch/pytorch/pull/123409 Approved by: https://github.com/desertfire | diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 98cf75fc23..4e84838504 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,12 +59,7 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import (
- cache_dir,
- clear_on_fresh_inductor_cache,
- developer_warning,
- is_linux,
-)
+from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -2021,7 +2016,7 @@ def custom_op_wrapper(op: str, *args):
@clear_on_fresh_inductor_cache
class CppCodeCache:
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@@ -2032,13 +2027,17 @@ class CppCodeCache:
@classmethod
def _load_library(cls, path: str, key: str) -> Union[CDLL, ModuleType]:
try:
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
except (ImportError, OSError) as e:
if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
# hacky workaround for fbcode/buck
global _libgomp
_libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
- return cls._load_library_inner(path, key)
+ result = cls._load_library_inner(path, key)
+ result.key = key # type: ignore[union-attr]
+ return result
if "failed to map segment from shared object" in str(e):
raise OSError(
f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
@@ -2049,42 +2048,68 @@ class CppCodeCache:
raise
@classmethod
- def load(cls, source_code: str, cuda: bool = False) -> Union[CDLL, ModuleType]:
- cls.cpp_compile_command_flags.update({"cuda": cuda})
- picked_vec_isa = pick_vec_isa()
- cpp_command = repr(
- cpp_compile_command(
- "i", "o", vec_isa=picked_vec_isa, **cls.cpp_compile_command_flags
- )
- )
+ def load_async(cls, source_code: str, cuda=False, submit_fn=None):
+ compile_command = {
+ **cls.cpp_compile_command_flags,
+ "cuda": cuda,
+ "vec_isa": pick_vec_isa(),
+ }
+ cpp_command = repr(cpp_compile_command("i", "o", **compile_command))
key, input_path = write(source_code, "cpp", extra=cpp_command)
+
if key not in cls.cache:
from filelock import FileLock
- lock_dir = get_lock_dir()
- lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
- with lock:
- output_path = input_path[:-3] + "so"
- if not os.path.exists(output_path):
- cmd = shlex.split(
- cpp_compile_command(
- input=input_path,
- output=output_path,
- vec_isa=picked_vec_isa,
- **cls.cpp_compile_command_flags,
- )
- )
- compile_file(input_path, output_path, cmd)
- cls.cache[key] = cls._load_library(output_path, key)
- cls.cache[key].key = key # type: ignore[union-attr]
+ lock_path = os.path.join(get_lock_dir(), key + ".lock")
+ output_path = input_path[:-3] + "so"
+ future: Optional[Future[Any]] = None
+ lib = None
+ worker_fn = functools.partial(
+ _worker_compile_cpp,
+ lock_path,
+ input_path,
+ output_path,
+ cpp_compile_command(
+ input=input_path, output=output_path, **compile_command
+ ),
+ )
+
+ def load_fn():
+ nonlocal lib
+ if lib is None:
+ if future is not None:
+ future.result()
+ worker_fn()
+ lib = cls._load_library(output_path, key)
+ assert lib is not None
+ return lib
+
+ if submit_fn is not None:
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ future = submit_fn(worker_fn)
+
+ cls.cache[key] = load_fn
return cls.cache[key]
+ @classmethod
+ def load(cls, source_code: str, cuda: bool = False):
+ return cls.load_async(source_code, cuda)()
+
+
+def _worker_compile_cpp(lock_path, input_path, output_path, cmd):
+ from filelock import FileLock
+
+ with FileLock(lock_path, timeout=LOCK_TIMEOUT):
+ if not os.path.exists(output_path):
+ compile_file(input_path, output_path, shlex.split(cmd))
+
# Customized Python binding for cpp kernels
@clear_on_fresh_inductor_cache
class CppPythonBindingsCodeCache(CppCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
@@ -2176,12 +2201,13 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return module
@classmethod
- def load_pybinding(
+ def load_pybinding_async(
cls,
argtypes: List[str],
source_code: str,
cuda: bool = False,
num_outputs: int = -1,
+ submit_fn=None,
) -> Any:
"""
Wrap a C++ function in fast Python bindings.
@@ -2209,14 +2235,26 @@ class CppPythonBindingsCodeCache(CppCodeCache):
cls.entry_function,
cls.entry_function,
)
- result = cls.load(source_code + suffix, cuda)
- assert isinstance(result, ModuleType)
- return getattr(result, cls.entry_function)
+ get_result = cls.load_async(source_code + suffix, cuda, submit_fn=submit_fn)
+ result = None
+
+ def future():
+ nonlocal result
+ if result is None:
+ result = get_result()
+ assert isinstance(result, ModuleType)
+ return getattr(result, cls.entry_function)
+
+ return future
+
+ @classmethod
+ def load_pybinding(cls, *args, **kwargs) -> Any:
+ return cls.load_pybinding_async(*args, **kwargs)()
@clear_on_fresh_inductor_cache
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
- cache: Dict[str, Union[CDLL, ModuleType]] = {}
+ cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
@@ -2277,6 +2315,10 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
+def _reload_python_module_in_subproc(key, path):
+ return PyCodeCache.load_by_key_path(key, path)
+
+
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2330,6 +2372,11 @@ class PyCodeCache:
for k, v in attrs.items():
setattr(mod, k, v)
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
+
return cls.cache[key]
@classmethod
@@ -2361,11 +2408,25 @@ class PyCodeCache:
return parse_stack_trace(entry)
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
+
+
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
mod = PyCodeCache.load(source_code)
- return getattr(mod, kernel_name)
+ return cls._mod_to_kernel(mod, kernel_name)
+
+ @classmethod
+ def _mod_to_kernel(cls, mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
def _cuda_compiler() -> Optional[str]:
@@ -2652,6 +2713,7 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
[email protected]_cache(None)
def _set_triton_ptxas_path() -> None:
if os.environ.get("TRITON_PTXAS_PATH") is not None:
return
@@ -2666,54 +2728,50 @@ def _set_triton_ptxas_path() -> None:
warnings.warn(f"{ptxas_path} exists but is not an executable")
-def _worker_compile(
- kernel_name: str,
- source_code: str,
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
cc: int,
device: torch.device,
device_interface: Type[DeviceInterface],
-) -> None:
+):
+ _set_triton_ptxas_path()
device_interface.Worker.set_device(device.index)
- kernel = TritonCodeCache.load(kernel_name, source_code)
+ kernel = load_kernel()
kernel.precompile(warm_cache_only_with_cc=cc)
-def _load_kernel(kernel_name: str, source_code: str) -> ModuleType:
- _set_triton_ptxas_path()
- kernel = TritonCodeCache.load(kernel_name, source_code)
- kernel.precompile()
- return kernel
+class CodeCacheFuture:
+ def result(self):
+ raise NotImplementedError()
-class TritonFuture:
+class TritonFuture(CodeCacheFuture):
kernel: ModuleType
def __init__(
self,
- kernel_name: str,
- source_code: str,
- future: Future[Any],
+ kernel: Any,
+ future: Optional[Future[Any]],
) -> None:
- self.kernel_name = kernel_name
- self.source_code = source_code
+ self.kernel = kernel
self.future = future
# @dynamo_utils.dynamo_timed
def result(self) -> ModuleType:
- t0 = time()
- if hasattr(self, "kernel"):
- return self.kernel
- # If the worker failed this will throw an exception.
- self.future.result()
- kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
- latency = time() - t0
- if latency > 50:
- developer_warning(
- f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
- )
- developer_warning(self.source_code)
- del self.kernel_name, self.source_code, self.future
- return kernel
+ if self.future is not None:
+ # If the worker failed this will throw an exception.
+ self.future.result()
+ self.future = None
+ self.kernel.precompile()
+ return self.kernel
+
+
+class LambdaFuture(CodeCacheFuture):
+ def __init__(self, result_fn):
+ self.result_fn = result_fn
+
+ def result(self):
+ return self.result_fn()
# If this process dies abnormally (e.g. segfault)
@@ -2747,10 +2805,21 @@ _pool_set: Set[ProcessPoolExecutor] = set()
def shutdown_compile_workers() -> None:
"""Shut down all outstanding compile-worker pools."""
- global _pool_set
for pool in _pool_set:
pool.shutdown()
+ after_fork()
+
+
+def after_fork():
+ """Reset pools to initial state without shutting them down"""
_pool_set.clear()
+ AsyncCompile.process_pool.cache_clear()
+
+
+try:
+ os.register_at_fork(after_in_child=after_fork)
+except AttributeError:
+ pass # register_at_fork does not exists on windows
class AsyncCompile:
@@ -2825,21 +2894,26 @@ class AsyncCompile:
return task()
return cls.pool().submit(task)
- def triton(
- self, kernel_name: str, source_code: str, device_str: str = "cuda"
- ) -> Union[TritonFuture, ModuleType]:
+ def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
_compile_start()
+ _set_triton_ptxas_path()
+ kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
device_interface = get_interface_for_device(device_str)
device = torch.device(device_str, device_interface.current_device())
cc = device_interface.get_compute_capability(device)
future = self.process_pool().submit(
- _worker_compile, kernel_name, source_code, cc, device, device_interface
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ cc,
+ device,
+ device_interface,
)
- return TritonFuture(kernel_name, source_code, future)
+ return TritonFuture(kernel, future)
else:
- return _load_kernel(kernel_name, source_code)
+ kernel.precompile()
+ return kernel
def multi_kernel(self, *args, **kwargs) -> Any:
from torch._inductor.codegen.multi_kernel import MultiKernelCall
@@ -2847,18 +2921,21 @@ class AsyncCompile:
# no need to call this in parallel since the sub-kernels are already parallel tasks
return MultiKernelCall(*args, **kwargs)
- def cpp(self, source_code: str) -> ModuleType:
- def task():
+ def cpp(self, source_code: str):
+ if config.compile_threads <= 1:
return CppCodeCache.load(source_code).kernel
+ else:
+ get_result = CppCodeCache.load_async(source_code, submit_fn=self.submit)
+ return LambdaFuture(lambda: get_result().kernel)
- return self.submit(task)
-
- def cpp_pybinding(self, argtypes: List[str], source_code: str) -> ModuleType:
- return self.submit(
- functools.partial(
- CppPythonBindingsCodeCache.load_pybinding, argtypes, source_code
+ def cpp_pybinding(self, argtypes: List[str], source_code: str):
+ if config.compile_threads <= 1:
+ return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
+ else:
+ get_result = CppPythonBindingsCodeCache.load_pybinding_async(
+ argtypes, source_code, submit_fn=self.submit
)
- )
+ return LambdaFuture(get_result)
def cuda(self, source_code, dst_file_ext):
def task():
@@ -2871,7 +2948,7 @@ class AsyncCompile:
[
value
for key, value in scope.items()
- if isinstance(value, (Future, TritonFuture))
+ if isinstance(value, (Future, CodeCacheFuture))
]
)
pbar = tqdm(
@@ -2884,18 +2961,18 @@ class AsyncCompile:
for key, result in scope.items():
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
pbar.set_postfix_str(key)
- if isinstance(result, (Future, TritonFuture)):
+ if isinstance(result, (Future, CodeCacheFuture)):
scope[key] = result.result()
pbar.update(1)
_compile_end()
-if os.environ.get("TORCH_TNT_IN_USE", "0") == "1":
- # When TorchTNT is used, calling warm_pool() here will cause the
- # compile workers created not being able to be shut down inside
- # shutdown_compile_workers(). This may cause significant QPS drop.
- log.info("Do not call AsyncCompile.warm_pool() because TorchTNT is in use.")
+if (
+ os.environ.get("TORCH_TNT_IN_USE", "0") == "1"
+ or os.environ.get("TORCH_WARM_POOL", "1") != "1"
+):
+ pass
elif sys.version_info >= (3, 12):
log.info("AsyncCompile.warm_pool() is broken on 3.12+.")
else: | 2.41.0 |
c451798cc5a7882e95b01600aa643b042b11b1e | Wed, 10 Apr 2024 12:50:21 -0700 | [PATCH 0025/1000] [inductor] Disable channels_last heuristic when channels==1 (#123758) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123758 Approved by: https://github.com/shunting314 | diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index 9cc0e9b93a..80a0fed789 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -1630,6 +1630,19 @@ class CPUReproTests(TestCase):
self.common(fn, (value, mask))
assert metrics.generated_cpp_vec_kernel_count >= 1
+ def test_channels_last_view_as_complex(self):
+ # https://github.com/pytorch/pytorch/issues/122448#issuecomment-2046169554
+
+ def reduce_example(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
+ """Applies the rotary embedding to the query and key tensors."""
+ x_out = torch.view_as_complex(torch.stack([x.float(), y.float()], dim=-1))
+ return x_out
+
+ args = [torch.randn(1, 1, 1, 128), torch.randn(1, 1, 1, 128)]
+ expected = reduce_example(*args)
+ actual = torch.compile(reduce_example, fullgraph=True)(*args)
+ self.assertEqual(expected, actual)
+
def test_load_same_bool_tensor_twice(self):
@torch._dynamo.optimize("inductor")
def fn(a, b):
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 1233753dc9..e783009a45 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -2518,7 +2518,7 @@ class Layout(IRNode):
def is_channels_last_contiguous(self):
ndim = len(self.size)
- if ndim not in [4, 5]:
+ if ndim not in [4, 5] or self.size[1] == 1:
return False
for left, right, size in zip(
self.stride, make_channels_last_strides_for(self.size), self.size # type: ignore[arg-type] | 2.41.0 |
9249a218b941f7f9cb5391692bacec013039cfc | Thu, 11 Apr 2024 01:43:47 +0000 | [PATCH 0028/1000] Avoid COW materialization in backward ops (3) (#123797) | Affected ops: * conv ops * glu * prelu * scaled_dot_product_attention * threshold * logsigmoid * binary_cross_entropy * gelu * unfold * smooth_l1_loss * embedding Part of #97856 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123797 Approved by: https://github.com/ezyang | diff --git a/aten/src/ATen/native/Activation.cpp b/aten/src/ATen/native/Activation.cpp
index 34617ef47f..75f373811c 100644
--- a/aten/src/ATen/native/Activation.cpp
+++ b/aten/src/ATen/native/Activation.cpp
@@ -411,8 +411,8 @@ TORCH_IMPL_FUNC(gelu_backward_out_cpu) (
auto approximate_type = get_gelutype_enum(approximate);
#if AT_MKLDNN_ENABLED()
if (use_mkldnn(self) && (approximate_type == GeluType::None)) {
- const ideep::tensor& x = itensor_from_tensor(self);
- ideep::tensor grady = itensor_from_tensor(grad);
+ const ideep::tensor& x = itensor_from_tensor(self, /*from_const_data_ptr*/true);
+ ideep::tensor grady = itensor_from_tensor(grad, /*from_const_data_ptr*/true);
ideep::tensor gradx = itensor_from_tensor(grad_input);
ideep::eltwise_backward::compute(x, grady, gradx,
ideep::algorithm::eltwise_gelu_erf, /*alpha*/ 0.0);
@@ -730,9 +730,9 @@ std::tuple<Tensor, Tensor> _prelu_kernel_backward(const Tensor& grad_out, const
auto iter = TensorIteratorConfig()
.add_output(grad_self)
.add_output(grad_weight)
- .add_input(self)
- .add_input(weight)
- .add_input(grad_out)
+ .add_const_input(self)
+ .add_const_input(weight)
+ .add_const_input(grad_out)
.build();
prelu_backward_stub(iter.device_type(), iter);
return {grad_self, grad_weight};
diff --git a/aten/src/ATen/native/ConvolutionMM2d.cpp b/aten/src/ATen/native/ConvolutionMM2d.cpp
index a0165fcda0..6f8a3477c2 100644
--- a/aten/src/ATen/native/ConvolutionMM2d.cpp
+++ b/aten/src/ATen/native/ConvolutionMM2d.cpp
@@ -285,8 +285,8 @@ static void slow_conv2d_update_output_frame(
template <typename scalar_t>
void slow_conv2d_backward_update_grad_input_frame(
TensorAccessor<scalar_t, 3> grad_input,
- TensorAccessor<scalar_t, 3> grad_output,
- TensorAccessor<scalar_t, 2> weight,
+ TensorAccessor<const scalar_t, 3> grad_output,
+ TensorAccessor<const scalar_t, 2> weight,
scalar_t *fgrad_input,
int64_t kernel_height,
int64_t kernel_width,
@@ -405,9 +405,9 @@ void slow_conv2d_backward_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND2(
kBFloat16, kHalf, input.scalar_type(), "slow_conv2d_cpu_grad_input", [&] {
- auto grad_output_a = grad_output.accessor<scalar_t, 4>();
+ auto grad_output_a = grad_output.accessor<const scalar_t, 4>();
auto grad_input_a = grad_input.accessor<scalar_t, 4>();
- auto weight_a = weight.accessor<scalar_t, 2>();
+ auto weight_a = weight.accessor<const scalar_t, 2>();
at::parallel_for(0, batch_size, 0, [&](int64_t start, int64_t end) {
auto fgrad_input = std::make_unique<scalar_t[]>(fgrad_input_size);
@@ -434,8 +434,8 @@ void slow_conv2d_backward_out_cpu_template(
template <typename scalar_t>
void slow_conv2d_backward_weight_frame(
TensorAccessor<scalar_t, 2> grad_weight,
- TensorAccessor<scalar_t, 3> grad_output,
- TensorAccessor<scalar_t, 2> finput,
+ TensorAccessor<const scalar_t, 3> grad_output,
+ TensorAccessor<const scalar_t, 2> finput,
bool is_channels_last) {
// Compute grad_weight += grad_output.reshape({grad_output.shape(0), -1}) * finput.T
// Note gemm expects fortran order, so all 3 matrices are transposed.
@@ -519,9 +519,9 @@ static void slow_conv2d_backward_weight_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND2(
kBFloat16, kHalf, input.scalar_type(), "slow_conv2d_cpu_grad_weight", [&] {
- auto grad_output_a = grad_output.accessor<scalar_t, 4>();
+ auto grad_output_a = grad_output.accessor<const scalar_t, 4>();
auto grad_weight_2d_a = grad_weight_2d.accessor<scalar_t, 2>();
- auto finput_a = finput.accessor<scalar_t, 3>();
+ auto finput_a = finput.accessor<const scalar_t, 3>();
for (const auto t : c10::irange(batch_size)) {
auto grad_output_t = grad_output_a[t];
diff --git a/aten/src/ATen/native/ConvolutionMM3d.cpp b/aten/src/ATen/native/ConvolutionMM3d.cpp
index 76865e24f8..1d5e7a8333 100644
--- a/aten/src/ATen/native/ConvolutionMM3d.cpp
+++ b/aten/src/ATen/native/ConvolutionMM3d.cpp
@@ -311,8 +311,8 @@ static void slow_conv3d_update_output_frame(
template <typename scalar_t>
void slow_conv3d_backward_update_grad_input_frame(
TensorAccessor<scalar_t, 4> grad_input,
- TensorAccessor<scalar_t, 4> grad_output,
- TensorAccessor<scalar_t, 2> weight,
+ TensorAccessor<const scalar_t, 4> grad_output,
+ TensorAccessor<const scalar_t, 2> weight,
TensorAccessor<scalar_t, 2> fgrad_input,
int64_t kernel_depth,
int64_t kernel_height,
@@ -431,9 +431,9 @@ void slow_conv3d_backward_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND2(
kBFloat16, kHalf, input.scalar_type(), "slow_conv3d_cpu_grad_input", [&] {
auto grad_input_a = grad_input.accessor<scalar_t, 5>();
- auto grad_output_a = grad_output_contiguous.accessor<scalar_t, 5>();
+ auto grad_output_a = grad_output_contiguous.accessor<const scalar_t, 5>();
auto fgrad_input_a = fgrad_input.accessor<scalar_t, 3>();
- auto weight_2d_a = weight2d.accessor<scalar_t, 2>();
+ auto weight_2d_a = weight2d.accessor<const scalar_t, 2>();
at::parallel_for(0, batch_size, CONV3D_GRAIN_SALT,
[&](int64_t start, int64_t end) {
@@ -464,8 +464,8 @@ void slow_conv3d_backward_out_cpu_template(
template <typename scalar_t>
void slow_conv3d_backward_weight_frame(
TensorAccessor<scalar_t, 2> grad_weight,
- TensorAccessor<scalar_t, 4> grad_output,
- TensorAccessor<scalar_t, 2> finput,
+ TensorAccessor<const scalar_t, 4> grad_output,
+ TensorAccessor<const scalar_t, 2> finput,
int64_t groups) {
// Compute grad_weight += grad_output.reshape({grad_output.shape(0), -1}) * finput.T
// Note gemm expects fortran order, so all 3 matrices are transposed.
@@ -538,8 +538,8 @@ static void slow_conv3d_backward_parameters_out_cpu_template(
AT_DISPATCH_FLOATING_TYPES_AND2(
kBFloat16, kHalf, input.scalar_type(), "slow_conv3d_cpu_grad_weight", [&] {
auto grad_weight_2d_a = grad_weight_2d.accessor<scalar_t, 2>();
- auto grad_output_a = grad_output_contiguous.accessor<scalar_t, 5>();
- auto finput_a = finput.accessor<scalar_t, 3>();
+ auto grad_output_a = grad_output_contiguous.accessor<const scalar_t, 5>();
+ auto finput_a = finput.accessor<const scalar_t, 3>();
for (const auto t : c10::irange(batch_size)) {
auto grad_output_t = grad_output_a[t];
auto finput_t = finput_a[t];
diff --git a/aten/src/ATen/native/Embedding.cpp b/aten/src/ATen/native/Embedding.cpp
index 9140a85d1e..705b08ab39 100644
--- a/aten/src/ATen/native/Embedding.cpp
+++ b/aten/src/ATen/native/Embedding.cpp
@@ -124,18 +124,18 @@ Tensor embedding_dense_backward_cpu(
auto add_iter = TensorIteratorConfig()
.add_output(grad_weight)
.add_input(grad_weight)
- .add_input(grad)
+ .add_const_input(grad)
.resize_outputs(false)
.declare_static_shape(grad.sizes(), /*squash_dims=*/0)
.build();
const auto gW_data = reinterpret_cast<char*>(grad_weight.data_ptr());
- const auto gO_data = reinterpret_cast<char*>(grad.data_ptr());
+ const auto gO_data = reinterpret_cast<const char*>(grad.const_data_ptr());
const auto gW_stride = grad_weight.strides()[0] * grad_weight.element_size();
const auto gO_stride = grad.strides()[0] * grad.element_size();
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cpu", [&] () {
- auto indices_data = indices_contig.data_ptr<index_t>();
+ auto indices_data = indices_contig.const_data_ptr<index_t>();
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
std::unique_ptr<index_t[]> counts;
@@ -164,7 +164,7 @@ Tensor embedding_dense_backward_cpu(
// grad_weight[k].add_(grad[i], scale);
iter.unsafe_replace_operand(0, gW_data + k * gW_stride);
iter.unsafe_replace_operand(1, gW_data + k * gW_stride);
- iter.unsafe_replace_operand(2, gO_data + i * gO_stride);
+ iter.unsafe_replace_operand(2, const_cast<char*>(gO_data + i * gO_stride));
add_stub(kCPU, iter, scale);
}
}
diff --git a/aten/src/ATen/native/GatedLinearUnit.cpp b/aten/src/ATen/native/GatedLinearUnit.cpp
index 73028d12f9..3a4aaab632 100644
--- a/aten/src/ATen/native/GatedLinearUnit.cpp
+++ b/aten/src/ATen/native/GatedLinearUnit.cpp
@@ -71,9 +71,9 @@ Tensor& glu_backward_cpu_out(const Tensor& grad_output, const Tensor& input,
// for second gradinput half, can get a better performance by fusion
auto iter = at::TensorIteratorConfig()
.add_output(gradInputsecondHalf)
- .add_input(gradInputfirstHalf)
- .add_input(firstHalf)
- .add_input(grad_output)
+ .add_const_input(gradInputfirstHalf)
+ .add_const_input(firstHalf)
+ .add_const_input(grad_output)
.build();
glu_backward_stub(iter.device_type(), iter);
gradInputfirstHalf.mul_(grad_output);
@@ -99,10 +99,10 @@ Tensor glu_jvp(
auto dglu = at::empty_like(glu);
auto iter = at::TensorIteratorConfig()
.add_output(dglu)
- .add_input(glu)
- .add_input(b)
- .add_input(da)
- .add_input(db)
+ .add_const_input(glu)
+ .add_const_input(b)
+ .add_const_input(da)
+ .add_const_input(db)
.build();
glu_jvp_stub(iter.device_type(), iter);
return dglu;
diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp
index 4ee3148a07..46a1b0be64 100644
--- a/aten/src/ATen/native/Loss.cpp
+++ b/aten/src/ATen/native/Loss.cpp
@@ -323,9 +323,9 @@ Tensor& binary_cross_entropy_backward_out_cpu(const Tensor& grad, const Tensor&
auto iter = TensorIteratorConfig()
.add_output(grad_input_squeezed)
- .add_owned_input(at::squeeze(grad))
- .add_owned_input(at::squeeze(input))
- .add_owned_input(at::squeeze(target))
+ .add_owned_const_input(at::squeeze(grad))
+ .add_owned_const_input(at::squeeze(input))
+ .add_owned_const_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES(grad_input.scalar_type(), "binary_cross_entropy_backward", [&] {
@@ -435,9 +435,9 @@ Tensor& smooth_l1_loss_backward_out(const Tensor& grad_output, const Tensor& inp
auto norm = reduction == Reduction::Mean ? 1. / input.numel() : 1.;
auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
- .add_input(input)
- .add_input(target)
- .add_input(grad_output)
+ .add_const_input(input)
+ .add_const_input(target)
+ .add_const_input(grad_output)
.promote_inputs_to_common_dtype(true)
.cast_common_dtype_to_outputs(true)
.enforce_safe_casting_to_output(true)
diff --git a/aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp b/aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp
index 567a3754e9..fbac5d4cc7 100644
--- a/aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp
+++ b/aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp
@@ -356,7 +356,7 @@ void slow_conv_transpose2d_out_cpu_template(
// Unpack columns back into input:
col2im<scalar_t>(
- columns_n.data_ptr<scalar_t>(),
+ columns_n.const_data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
@@ -511,7 +511,7 @@ static void slow_conv_transpose2d_backward_out_cpu_template(
if (need_columns) {
// Extract columns:
im2col<scalar_t>(
- grad_output_n.data_ptr<scalar_t>(),
+ grad_output_n.const_data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
@@ -529,8 +529,8 @@ static void slow_conv_transpose2d_backward_out_cpu_template(
use_channels_last);
}
- auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
- : grad_output_n.data_ptr<scalar_t>();
+ auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>()
+ : grad_output_n.const_data_ptr<scalar_t>();
if (use_channels_last) {
int64_t m = n_input_plane;
@@ -709,7 +709,7 @@ void slow_conv_transpose2d_acc_grad_parameters_cpu(
if (need_columns) {
// Extract columns:
im2col<scalar_t>(
- grad_output_n.data_ptr<scalar_t>(),
+ grad_output_n.const_data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
@@ -727,8 +727,8 @@ void slow_conv_transpose2d_acc_grad_parameters_cpu(
use_channels_last);
}
- auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>()
- : grad_output_n.data_ptr<scalar_t>();
+ auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>()
+ : grad_output_n.const_data_ptr<scalar_t>();
if (use_channels_last) {
int64_t m = kernel_height * kernel_width * n_output_plane;
diff --git a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp
index a9f02117dd..624e820c7b 100644
--- a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp
+++ b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp
@@ -329,7 +329,7 @@ void slow_conv_transpose3d_out_cpu_template(
// Unpack columns back into input:
at::native::col2vol<scalar_t>(
- columns.data_ptr<scalar_t>(),
+ columns.const_data_ptr<scalar_t>(),
n_output_plane,
output_depth,
output_height,
@@ -562,8 +562,8 @@ void slow_conv_transpose3d_backward_out_cpu_template(
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
- auto gemm_in_ptr = need_columns ? grad_columns.data_ptr<scalar_t>()
- : grad_output_n.data_ptr<scalar_t>();
+ auto gemm_in_ptr = need_columns ? grad_columns.const_data_ptr<scalar_t>()
+ : grad_output_n.const_data_ptr<scalar_t>();
cpublas::gemm(
TransposeType::NoTranspose,
TransposeType::NoTranspose,
@@ -782,8 +782,8 @@ void slow_conv_transpose3d_acc_grad_parameters_cpu(
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
- auto gemm_in_ptr = need_columns ? columns.data_ptr<scalar_t>()
- : grad_output_n.data_ptr<scalar_t>();
+ auto gemm_in_ptr = need_columns ? columns.const_data_ptr<scalar_t>()
+ : grad_output_n.const_data_ptr<scalar_t>();
cpublas::gemm(
TransposeType::Transpose,
TransposeType::NoTranspose,
diff --git a/aten/src/ATen/native/UnfoldBackward.h b/aten/src/ATen/native/UnfoldBackward.h
index 7ff39f84c6..44e05c1259 100644
--- a/aten/src/ATen/native/UnfoldBackward.h
+++ b/aten/src/ATen/native/UnfoldBackward.h
@@ -100,8 +100,8 @@ static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_out(
.check_all_same_dtype(false)
.resize_outputs(false)
.add_owned_output(grad_out_restrided)
- .add_owned_input(grad_in_restrided)
- .add_owned_input(idx_dim_restrided)
+ .add_owned_const_input(grad_in_restrided)
+ .add_owned_const_input(idx_dim_restrided)
.build();
return iter;
diff --git a/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp b/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp
index 6590edde5b..cb96f24ebd 100644
--- a/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp
+++ b/aten/src/ATen/native/cpu/FlashAttentionKernel.cpp
@@ -493,15 +493,15 @@ void cpu_flash_attention_backward(
scalar_t* grad_q_data = grad_q.data_ptr<scalar_t>();
scalar_t* grad_k_data = grad_k.data_ptr<scalar_t>();
scalar_t* grad_v_data = grad_v.data_ptr<scalar_t>();
- scalar_t* grad_out_data = grad_out.data_ptr<scalar_t>();
- scalar_t* q_data = query.data_ptr<scalar_t>();
- scalar_t* k_data = key.data_ptr<scalar_t>();
- scalar_t* v_data = value.data_ptr<scalar_t>();
- accum_t* mask_data = has_attn_mask
- ? attn_mask.value().data_ptr<accum_t>()
+ const scalar_t* grad_out_data = grad_out.const_data_ptr<scalar_t>();
+ const scalar_t* q_data = query.const_data_ptr<scalar_t>();
+ const scalar_t* k_data = key.const_data_ptr<scalar_t>();
+ const scalar_t* v_data = value.const_data_ptr<scalar_t>();
+ const accum_t* mask_data = has_attn_mask
+ ? attn_mask.value().const_data_ptr<accum_t>()
: nullptr;
- scalar_t* out_data = out.data_ptr<scalar_t>();
- accum_t* lse_data = logsumexp.data_ptr<accum_t>();
+ const scalar_t* out_data = out.const_data_ptr<scalar_t>();
+ const accum_t* lse_data = logsumexp.const_data_ptr<accum_t>();
accum_t* buf_data = buf.data_ptr<accum_t>();
scalar_t* buf_reduced_data = is_reduced_type ? buf_reduced.data_ptr<scalar_t>() : nullptr;
diff --git a/aten/src/ATen/native/cuda/Activation.cpp b/aten/src/ATen/native/cuda/Activation.cpp
index 37a75981e5..6bbfd985d3 100644
--- a/aten/src/ATen/native/cuda/Activation.cpp
+++ b/aten/src/ATen/native/cuda/Activation.cpp
@@ -44,8 +44,8 @@ Tensor& glu_backward_cuda_out(const Tensor& grad_output, const Tensor& input,
const auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
- .add_input(input)
- .add_input(grad_output)
+ .add_const_input(input)
+ .add_const_input(grad_output)
.resize_outputs(false)
.declare_static_shape(iter_shape)
.build();
diff --git a/aten/src/ATen/native/cuda/DepthwiseConv2d.cu b/aten/src/ATen/native/cuda/DepthwiseConv2d.cu
index c3136435cf..69757df220 100644
--- a/aten/src/ATen/native/cuda/DepthwiseConv2d.cu
+++ b/aten/src/ATen/native/cuda/DepthwiseConv2d.cu
@@ -103,9 +103,9 @@ __global__ void conv_depthwise2d_forward_kernel(
template <int kSize, int stride, typename scalar_t, typename index_t>
__global__ void conv_depthwise2d_backward_kernel(
- const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output,
+ const PackedTensorAccessor32<const scalar_t, 4, DefaultPtrTraits> grad_output,
PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_input,
- const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight,
+ const PackedTensorAccessor32<const scalar_t, 4, DefaultPtrTraits> weight,
index_t totalElements,
const int inputChannels,
const int depthwiseMultiplier,
@@ -174,8 +174,8 @@ __global__ void conv_depthwise2d_backward_kernel(
template <typename scalar_t, typename index_t=unsigned>
__global__ void conv_depthwise2d_grad_weight_kernel(
- const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output,
- const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input,
+ const PackedTensorAccessor32<const scalar_t, 4, DefaultPtrTraits> grad_output,
+ const PackedTensorAccessor32<const scalar_t, 4, DefaultPtrTraits> input,
PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_weight,
const int batchSize,
const int inputChannels,
@@ -387,9 +387,9 @@ void conv_depthwise2d_backward_out(
const auto stream = c10::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(),
"conv_depthwise2d_backward_cuda", [&] {
- auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>();
+ auto grad_output_a = grad_output.packed_accessor32<const scalar_t, 4>();
auto grad_input_a = grad_input.packed_accessor32<scalar_t, 4>();
- auto weight_a = weight.packed_accessor32<scalar_t, 4>();
+ auto weight_a = weight.packed_accessor32<const scalar_t, 4>();
if (kW == 3 && kH == 3) {
if (dW == 1 && dH == 1){
@@ -501,8 +501,8 @@ void conv_depthwise2d_grad_weight_out(
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(),
"conv_depthwise2d_grad_weight_cuda", [&] {
- const auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>();
- const auto input_a = input.packed_accessor32<scalar_t, 4>();
+ const auto grad_output_a = grad_output.packed_accessor32<const scalar_t, 4>();
+ const auto input_a = input.packed_accessor32<const scalar_t, 4>();
const auto grad_weight_a = grad_weight.packed_accessor32<scalar_t, 4>();
using acc_t = at::acc_type<scalar_t, true>;
int warp_size = at::cuda::warp_size();
diff --git a/aten/src/ATen/native/cuda/DepthwiseConv3d.cu b/aten/src/ATen/native/cuda/DepthwiseConv3d.cu
index f7524ee323..991471a6ef 100644
--- a/aten/src/ATen/native/cuda/DepthwiseConv3d.cu
+++ b/aten/src/ATen/native/cuda/DepthwiseConv3d.cu
@@ -99,9 +99,9 @@ template <typename scalar_t, typename accscalar_t,
int kKnownStrideT, int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_input_kernel(
- const PackedTensorAccessor32<scalar_t, 5> grad_output,
+ const PackedTensorAccessor32<const scalar_t, 5> grad_output,
PackedTensorAccessor32<scalar_t, 5> grad_input,
- const PackedTensorAccessor32<scalar_t, 5> kernel,
+ const PackedTensorAccessor32<const scalar_t, 5> kernel,
int strideT_, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
int dilationT_, int dilationH_, int dilationW_) {
@@ -180,8 +180,8 @@ template <typename scalar_t, typename accscalar_t,
int kKnownStrideH, int kKnownStrideW>
__global__ void
conv_depthwise3d_cuda_backward_weight_kernel(
- const PackedTensorAccessor32<scalar_t, 5> grad_output,
- const PackedTensorAccessor32<scalar_t, 5> input,
+ const PackedTensorAccessor32<const scalar_t, 5> grad_output,
+ const PackedTensorAccessor32<const scalar_t, 5> input,
PackedTensorAccessor32<scalar_t, 5> grad_kernel,
int strideT, int strideH_, int strideW_,
int paddingT, int paddingH, int paddingW,
@@ -470,9 +470,9 @@ Tensor conv_depthwise3d_cuda(
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, (kt), (kh), (kw), (dilt), (dilh), (dilw), (dt), (dh), (dw)> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
- grad_output_.packed_accessor32<scalar_t, 5>(), \
+ grad_output_.packed_accessor32<const scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
- weight_.packed_accessor32<scalar_t, 5>(), \
+ weight_.packed_accessor32<const scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
@@ -485,9 +485,9 @@ Tensor conv_depthwise3d_cuda(
conv_depthwise3d_cuda_backward_input_kernel \
<scalar_t, accscalar_t, -1, -1, -1, -1, -1, -1, -1, -1, -1> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
- grad_output_.packed_accessor32<scalar_t, 5>(), \
+ grad_output_.packed_accessor32<const scalar_t, 5>(), \
grad_input_.packed_accessor32<scalar_t, 5>(), \
- weight_.packed_accessor32<scalar_t, 5>(), \
+ weight_.packed_accessor32<const scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
dilation[0], dilation[1], dilation[2]); \
@@ -500,8 +500,8 @@ Tensor conv_depthwise3d_cuda(
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, (dh), (dw)> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
- grad_output_.packed_accessor32<scalar_t, 5>(), \
- input_.packed_accessor32<scalar_t, 5>(), \
+ grad_output_.packed_accessor32<const scalar_t, 5>(), \
+ input_.packed_accessor32<const scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
@@ -515,8 +515,8 @@ Tensor conv_depthwise3d_cuda(
conv_depthwise3d_cuda_backward_weight_kernel \
<scalar_t, accscalar_t, -1, -1> \
<<<grid, block, smem, at::cuda::getCurrentCUDAStream()>>>( \
- grad_output_.packed_accessor32<scalar_t, 5>(), \
- input_.packed_accessor32<scalar_t, 5>(), \
+ grad_output_.packed_accessor32<const scalar_t, 5>(), \
+ input_.packed_accessor32<const scalar_t, 5>(), \
grad_weight.packed_accessor32<scalar_t, 5>(), \
stride[0], stride[1], stride[2], \
padding[0], padding[1], padding[2], \
diff --git a/aten/src/ATen/native/cudnn/Conv_v8.cpp b/aten/src/ATen/native/cudnn/Conv_v8.cpp
index 861a506660..750cbcca6b 100644
--- a/aten/src/ATen/native/cudnn/Conv_v8.cpp
+++ b/aten/src/ATen/native/cudnn/Conv_v8.cpp
@@ -376,6 +376,12 @@ void run_conv_plan(
data_ptrs[0] = x.data_ptr();
data_ptrs[1] = const_cast<void*>(y.const_data_ptr());
data_ptrs[2] = const_cast<void*>(w.const_data_ptr());
+ } else if (
+ operation ==
+ CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR) {
+ data_ptrs[0] = const_cast<void*>(x.const_data_ptr());
+ data_ptrs[1] = const_cast<void*>(y.const_data_ptr());
+ data_ptrs[2] = w.data_ptr();
} else {
data_ptrs[0] = x.data_ptr();
data_ptrs[1] = y.data_ptr();
diff --git a/aten/src/ATen/native/mkldnn/Conv.cpp b/aten/src/ATen/native/mkldnn/Conv.cpp
index 1facc3f428..3e41e2f107 100644
--- a/aten/src/ATen/native/mkldnn/Conv.cpp
+++ b/aten/src/ATen/native/mkldnn/Conv.cpp
@@ -825,8 +825,8 @@ Tensor mkldnn_convolution_backward_input(
bool is_channels_last) {
auto grad_input = at::empty({0}, grad_output.options());
- auto grad_y = itensor_from_tensor(grad_output);
- auto w = itensor_view_from_dense(weight);
+ auto grad_y = itensor_from_tensor(grad_output, /*from_const_data_ptr*/true);
+ auto w = itensor_view_from_dense(weight, /*from_const_data_ptr*/true);
ideep::tensor grad_x;
if (is_channels_last) {
@@ -865,8 +865,8 @@ std::tuple<Tensor, Tensor> mkldnn_convolution_backward_weights(
int64_t groups,
bool bias_defined,
bool is_channels_last) {
- const ideep::tensor grad_y = itensor_from_tensor(grad_output);
- const ideep::tensor x = itensor_from_tensor(input);
+ const ideep::tensor grad_y = itensor_from_tensor(grad_output, /*from_const_data_ptr*/true);
+ const ideep::tensor x = itensor_from_tensor(input, /*from_const_data_ptr*/true);
ideep::tensor grad_w, grad_b;
if (bias_defined) {
@@ -975,8 +975,8 @@ Tensor mkldnn_convolution_transpose_backward_input(
bool is_channels_last) {
auto grad_input = at::empty({0}, grad_output.options());
- auto grad_y = itensor_from_tensor(grad_output);
- auto w = itensor_view_from_dense(weight).transpose_(0, 1);
+ auto grad_y = itensor_from_tensor(grad_output, /*from_const_data_ptr*/true);
+ auto w = itensor_view_from_dense(weight, /*from_const_data_ptr*/true).transpose_(0, 1);
ideep::tensor grad_x;
if (is_channels_last) {
@@ -1016,8 +1016,8 @@ std::tuple<Tensor,Tensor> mkldnn_convolution_transpose_backward_weights(
int64_t groups,
bool bias_defined,
bool is_channels_last) {
- auto grad_y = itensor_from_tensor(grad_output);
- auto x = itensor_from_tensor(input);
+ auto grad_y = itensor_from_tensor(grad_output, /*from_const_data_ptr*/true);
+ auto x = itensor_from_tensor(input, /*from_const_data_ptr*/true);
ideep::tensor grad_w, grad_b;
if (bias_defined) {
diff --git a/aten/src/ATen/native/transformers/cuda/attention_backward.cu b/aten/src/ATen/native/transformers/cuda/attention_backward.cu
index 12c6782768..5838a25cf7 100644
--- a/aten/src/ATen/native/transformers/cuda/attention_backward.cu
+++ b/aten/src/ATen/native/transformers/cuda/attention_backward.cu
@@ -341,12 +341,12 @@ _efficient_attention_backward(
TORCH_INTERNAL_ASSERT(delta.size(2) == M);
typename Kernel::Params p;
- p.query_ptr = (scalar_t*)query.data_ptr();
- p.key_ptr = (scalar_t*)key.data_ptr();
- p.value_ptr = (scalar_t*)value.data_ptr();
- p.logsumexp_ptr = (typename Kernel::lse_scalar_t*)logsumexp.data_ptr();
- p.output_ptr = (scalar_t*)out.data_ptr();
- p.grad_output_ptr = (scalar_t*)grad_out.data_ptr();
+ p.query_ptr = (const scalar_t*)query.const_data_ptr();
+ p.key_ptr = (const scalar_t*)key.const_data_ptr();
+ p.value_ptr = (const scalar_t*)value.const_data_ptr();
+ p.logsumexp_ptr = (typename Kernel::lse_scalar_t const *)logsumexp.const_data_ptr();
+ p.output_ptr = (const scalar_t*)out.const_data_ptr();
+ p.grad_output_ptr = (const scalar_t*)grad_out.const_data_ptr();
p.grad_query_ptr = (scalar_t*)grad_q.data_ptr();
p.grad_key_ptr = (scalar_t*)grad_k.data_ptr();
p.grad_value_ptr = (scalar_t*)grad_v.data_ptr();
@@ -360,8 +360,8 @@ _efficient_attention_backward(
p.custom_mask_type = custom_mask_type;
p.scale = sdp::calculate_scale(query, scale).as_float_unchecked();
if (cu_seqlens_q.has_value()) {
- p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
- p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
+ p.cu_seqlens_q_ptr = (const int32_t*)cu_seqlens_q->const_data_ptr();
+ p.cu_seqlens_k_ptr = (const int32_t*)cu_seqlens_k->const_data_ptr();
}
if (window_size.has_value()) {
p.window_size = *window_size;
diff --git a/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h b/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h
index 355de45b25..55f3f9a1ce 100644
--- a/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h
+++ b/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h
@@ -625,16 +625,16 @@ struct AttentionBackwardKernel {
struct Params {
// Input tensors
- scalar_t* query_ptr = nullptr; // [Mq, nH, K]
- scalar_t* key_ptr = nullptr; // [Mk, nH, K]
- scalar_t* value_ptr = nullptr; // [Mk, nH, Kv]
- scalar_t* bias_ptr = nullptr;
- lse_scalar_t* logsumexp_ptr = nullptr; // [nH, Mq]
- scalar_t* output_ptr = nullptr; // [Mq, nH, Kv]
- scalar_t* grad_output_ptr = nullptr; // [Mq, nH, Kv]
+ const scalar_t* query_ptr = nullptr; // [Mq, nH, K]
+ const scalar_t* key_ptr = nullptr; // [Mk, nH, K]
+ const scalar_t* value_ptr = nullptr; // [Mk, nH, Kv]
+ const scalar_t* bias_ptr = nullptr;
+ const lse_scalar_t* logsumexp_ptr = nullptr; // [nH, Mq]
+ const scalar_t* output_ptr = nullptr; // [Mq, nH, Kv]
+ const scalar_t* grad_output_ptr = nullptr; // [Mq, nH, Kv]
accum_t* delta_ptr = nullptr; // [nH, Mq]
- int32_t* cu_seqlens_q_ptr = nullptr;
- int32_t* cu_seqlens_k_ptr = nullptr;
+ const int32_t* cu_seqlens_q_ptr = nullptr;
+ const int32_t* cu_seqlens_k_ptr = nullptr;
// Output tensors
output_t* grad_query_ptr = nullptr; // [Mq, nH, K]
@@ -1476,7 +1476,7 @@ struct AttentionBackwardKernel {
auto prologueGradV = [&](int col) {
typename MatmulGradV::Mma::IteratorB iterator_dO(
{int32_t(p.gO_strideM)},
- p.grad_output_ptr + query_start * p.gO_strideM + col,
+ const_cast<scalar_t*>(p.grad_output_ptr + query_start * p.gO_strideM + col),
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
@@ -1489,7 +1489,7 @@ struct AttentionBackwardKernel {
auto prologueGradQ = [&](int col) {
typename MatmulGradQ::Mma::IteratorB iterator_K(
{int32_t(p.k_strideM)},
- p.key_ptr + key_start * p.k_strideM + col,
+ const_cast<scalar_t*>(p.key_ptr + key_start * p.k_strideM + col),
{num_keys_in_block, p.head_dim - col},
thread_id,
no_offset);
@@ -1499,7 +1499,7 @@ struct AttentionBackwardKernel {
auto prologueGradK = [&](int col) {
typename MatmulGradK::Mma::IteratorB iterator_Q(
{int32_t(p.q_strideM)},
- p.query_ptr + query_start * p.q_strideM + col,
+ const_cast<scalar_t*>(p.query_ptr + query_start * p.q_strideM + col),
{num_queries_in_block, p.head_dim - col},
thread_id,
no_offset);
@@ -1512,13 +1512,13 @@ struct AttentionBackwardKernel {
auto prologueDOV = [&]() {
typename MatmulDOIVJ::Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
- p.grad_output_ptr + query_start * p.gO_strideM,
+ const_cast<scalar_t*>(p.grad_output_ptr + query_start * p.gO_strideM),
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
typename MatmulDOIVJ::Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
- p.value_ptr + key_start * p.v_strideM,
+ const_cast<scalar_t*>(p.value_ptr + key_start * p.v_strideM),
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
@@ -1545,7 +1545,7 @@ struct AttentionBackwardKernel {
// k_j
typename Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
- p.key_ptr + key_start * p.k_strideM,
+ const_cast<scalar_t*>(p.key_ptr + key_start * p.k_strideM),
{problem_size.m(), problem_size.k()},
thread_id,
no_offset);
@@ -1553,7 +1553,7 @@ struct AttentionBackwardKernel {
// q_i.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
- p.query_ptr + query_start * p.q_strideM,
+ const_cast<scalar_t*>(p.query_ptr + query_start * p.q_strideM),
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
@@ -1592,7 +1592,7 @@ struct AttentionBackwardKernel {
// load bias tile Bij into shared memory
typename MatmulQK::BiasLoader::GmemTileIterator bias_iter(
{cutlass::layout::RowMajor(p.bias_strideM)},
- p.bias_ptr + query_start * p.bias_strideM + key_start,
+ const_cast<scalar_t*>(p.bias_ptr + query_start * p.bias_strideM + key_start),
{num_queries_in_block, num_keys_in_block},
thread_id);
cutlass::TensorRef<scalar_t, cutlass::layout::RowMajor> bias_tensor_ref(
@@ -1784,7 +1784,7 @@ struct AttentionBackwardKernel {
};
typename Mma::IteratorB iterator_B(
{int32_t(p.gO_strideM)},
- p.grad_output_ptr + query_start * p.gO_strideM + col,
+ const_cast<scalar_t*>(p.grad_output_ptr + query_start * p.gO_strideM + col),
{num_queries_in_block, p.head_dim_value - col},
thread_id,
no_offset);
@@ -1857,7 +1857,7 @@ struct AttentionBackwardKernel {
// do_i
typename Mma::IteratorA iterator_A(
{int32_t(p.gO_strideM)},
- p.grad_output_ptr + query_start * p.gO_strideM,
+ const_cast<scalar_t*>(p.grad_output_ptr + query_start * p.gO_strideM),
{num_queries_in_block, p.head_dim_value},
thread_id,
no_offset);
@@ -1865,7 +1865,7 @@ struct AttentionBackwardKernel {
// v_j.transpose(-2, -1)
typename Mma::IteratorB iterator_B(
{int32_t(p.v_strideM)},
- p.value_ptr + key_start * p.v_strideM,
+ const_cast<scalar_t*>(p.value_ptr + key_start * p.v_strideM),
{p.head_dim_value, num_keys_in_block},
thread_id,
no_offset);
@@ -2026,7 +2026,7 @@ struct AttentionBackwardKernel {
// k_j
typename Mma::IteratorB iterator_B(
{int32_t(p.k_strideM)},
- p.key_ptr + key_start * p.k_strideM + col,
+ const_cast<scalar_t*>(p.key_ptr + key_start * p.k_strideM + col),
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
@@ -2161,7 +2161,7 @@ struct AttentionBackwardKernel {
// q_i
typename Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
- p.query_ptr + query_start * p.q_strideM + col,
+ const_cast<scalar_t*>(p.query_ptr + query_start * p.q_strideM + col),
{problem_size.k(), problem_size.n()},
thread_id,
no_offset);
@@ -2351,14 +2351,14 @@ struct AttentionBackwardKernel {
int thread_id = 32 * warp_id + lane_id;
typename MatmulQK::Mma::IteratorA iterator_A(
{int32_t(p.k_strideM)},
- p.key_ptr + key_start * p.k_strideM,
+ const_cast<scalar_t*>(p.key_ptr + key_start * p.k_strideM),
{p.num_keys - key_start, p.head_dim},
thread_id,
cutlass::MatrixCoord{0, 0});
typename MatmulQK::Mma::IteratorB iterator_B(
{int32_t(p.q_strideM)},
- p.query_ptr + query_start * p.q_strideM,
+ const_cast<scalar_t*>(p.query_ptr + query_start * p.q_strideM),
{p.head_dim, p.num_queries - query_start},
thread_id,
cutlass::MatrixCoord{0, 0});
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 36570732cd..c18c8238b2 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -13423,7 +13423,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
DecorateInfo(
@@ -13470,7 +13469,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=[
DecorateInfo(
@@ -13517,7 +13515,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
# Runs very slowly on slow-gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
@@ -13578,7 +13575,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
decorators=(
DecorateInfo(
@@ -13619,7 +13615,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}),
@@ -13651,7 +13646,6 @@ op_db: List[OpInfo] = [
gradcheck_fast_mode=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
decorators=(
DecorateInfo(
toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}),
@@ -14425,7 +14419,6 @@ op_db: List[OpInfo] = [
dtypes=floating_types_and(torch.bfloat16, torch.float16),
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
supports_out=False),
UnaryUfuncInfo(
'nn.functional.elu',
@@ -14467,7 +14460,6 @@ op_db: List[OpInfo] = [
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
- supports_cow_input_no_materialize_backward=False,
# test_reference_numerics only tests the case when the weight tensor is a scalar
sample_kwargs=sample_kwargs_prelu_scalar_weight,
error_inputs_func=error_inputs_prelu,
@@ -14595,7 +14587,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=False,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
- supports_cow_input_no_materialize_backward=False,
decorators=[DecorateInfo(toleranceOverride(
{torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ],
skips=(
@@ -14839,7 +14830,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_gradgrad=True,
- supports_cow_input_no_materialize_backward=False,
# autodiff_nonfusible_nodes=["aten::log_sigmoid"],
decorators=[
DecorateInfo(
@@ -14937,7 +14927,6 @@ op_db: List[OpInfo] = [
assert_autodiffed=False,
supports_gradgrad=True,
supports_out=False,
- supports_cow_input_no_materialize_backward=False,
sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'),
'value': -9},
{'threshold': float.fromhex('0x1.3ap-3'),
@@ -15076,7 +15065,6 @@ op_db: List[OpInfo] = [
supports_autograd=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
decorators=(
# RuntimeError: expected int at position 0, but got: Tensor
DecorateInfo(
@@ -15243,7 +15231,6 @@ op_db: List[OpInfo] = [
supports_gradgrad=True,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
autodiff_nonfusible_nodes=["aten::gelu"],
skips=(
# AssertionError: Tensor-likes are not close!
@@ -17812,7 +17799,6 @@ op_db: List[OpInfo] = [
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
- supports_cow_input_no_materialize_backward=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# Skip operator schema test because this is a functional and not an operator
@@ -17830,7 +17816,6 @@ op_db: List[OpInfo] = [
check_batched_gradgrad=False,
# See https://github.com/pytorch/pytorch/issues/66357
check_batched_forward_grad=False,
- supports_cow_input_no_materialize_backward=False,
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
@@ -18347,7 +18332,6 @@ op_db: List[OpInfo] = [
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED
# at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch.
@@ -18906,7 +18890,6 @@ op_db: List[OpInfo] = [
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
allow_cow_input_materialize_forward=[0],
- supports_cow_input_no_materialize_backward=False,
error_inputs_func=error_inputs_embedding,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True, | 2.41.0 |
e869c9bb78143c10a765eb2af64d57925633065 | Thu, 11 Apr 2024 01:43:53 +0000 | [PATCH 0029/1000] Avoid COW materialization in backward ops (4) (#123798) | Affected ops: * embedding_bag * mse_loss * huber_loss * grid_sample * ctc_loss * nll_loss * pdist * _segment_reduce Part of #97856 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123798 Approved by: https://github.com/ezyang ghstack dependencies: #123797 | diff --git a/aten/src/ATen/native/EmbeddingBag.cpp b/aten/src/ATen/native/EmbeddingBag.cpp
index 64e5d2775a..8b6c90dae2 100644
--- a/aten/src/ATen/native/EmbeddingBag.cpp
+++ b/aten/src/ATen/native/EmbeddingBag.cpp
@@ -1478,7 +1478,7 @@ static Tensor _embedding_bag_dense_backward_cpu_max(
template<typename index_t>
static std::vector<index_t> compute_counts(
int64_t num_weights,
- index_t* indices_data,
+ const index_t* indices_data,
int64_t indices_length) {
std::vector<index_t> counts(num_weights, 0);
for (const auto i : c10::irange(indices_length)) {
@@ -1499,7 +1499,7 @@ static std::vector<index_t> compute_counts(
template<typename index_t>
static std::vector<index_t> compute_counts_uniq(
int64_t num_weights,
- index_t* indices_data,
+ const index_t* indices_data,
int64_t indices_length,
const std::vector<index_t>& counts) {
std::vector<index_t> counts_uniq;
@@ -1538,11 +1538,11 @@ void _embedding_bag_dense_backward_cpu_sum_mean(
optional<Tensor> per_sample_weights;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
- scalar_t* per_sample_weights_data;
+ const scalar_t* per_sample_weights_data;
optional<int64_t> per_sample_weights_stride;
if (per_sample_weights_.defined()) {
per_sample_weights = per_sample_weights_.index_select(0, ind_sort);
- per_sample_weights_data = per_sample_weights->data_ptr<scalar_t>();
+ per_sample_weights_data = per_sample_weights->const_data_ptr<scalar_t>();
per_sample_weights_stride = per_sample_weights->strides()[0];
}
@@ -1554,9 +1554,9 @@ void _embedding_bag_dense_backward_cpu_sum_mean(
[&indices, &offset2bag, &bag_size_, &num_weights, &numel, &per_sample_weights,
&per_sample_weights_data, &per_sample_weights_stride, &mode, &scale_grad_by_freq,
&grad, &index_grad_weight, &padding_idx] {
- auto* indices_data = indices.data_ptr<index_t>();
- auto* offset2bag_data = offset2bag.data_ptr<index_t>();
- auto* bag_size_data = bag_size_.data_ptr<index_t>();
+ auto* indices_data = indices.const_data_ptr<index_t>();
+ auto* offset2bag_data = offset2bag.const_data_ptr<index_t>();
+ auto* bag_size_data = bag_size_.const_data_ptr<index_t>();
auto counts = compute_counts(num_weights, indices_data, numel);
auto next_unique_index_idx =
@@ -1590,7 +1590,7 @@ void _embedding_bag_dense_backward_cpu_sum_mean(
}
int64_t ddim = grad.size(1);
auto igwd = index_grad_weight.data_ptr<scalar_t>();
- auto gd = grad.data_ptr<scalar_t>();
+ auto gd = grad.const_data_ptr<scalar_t>();
at::native::cpublas::axpy<scalar_t>(ddim, (scalar_t)scale, gd + ddim * source, 1,
igwd + ddim * index, 1);
}
@@ -1702,11 +1702,11 @@ Tensor _embedding_bag_per_sample_weights_backward_cpu_template(
offset2bag_ = offset2bag;
}
- auto* grad_data = grad.data_ptr<scalar_t>();
+ auto* grad_data = grad.const_data_ptr<scalar_t>();
auto grad_stride0 = grad.strides()[0];
auto grad_stride1 = grad.strides()[1];
- auto* weight_data = weight.data_ptr<scalar_t>();
+ auto* weight_data = weight.const_data_ptr<scalar_t>();
auto weight_stride0 = weight.strides()[0];
auto weight_stride1 = weight.strides()[1];
@@ -1716,11 +1716,11 @@ Tensor _embedding_bag_per_sample_weights_backward_cpu_template(
[&indices, &output, &offset2bag_, &num_samples, &embedding_features,
&grad_data, &grad_stride0, &grad_stride1, &weight_data, &weight_stride0, &weight_stride1,
&padding_idx] () {
- auto* indices_data = indices.data_ptr<index_t>();
+ auto* indices_data = indices.const_data_ptr<index_t>();
// The following are contiguous
auto* output_data = output.data_ptr<scalar_t>();
- auto* offset2bag_data = offset2bag_.data_ptr<index_t>();
+ auto* offset2bag_data = offset2bag_.const_data_ptr<index_t>();
// XXX: 64 was arbitrarily chosen. There is probably a sweet spot for this number.
parallel_for(0, num_samples, 64,
@@ -1733,8 +1733,8 @@ Tensor _embedding_bag_per_sample_weights_backward_cpu_template(
if (embedding_idx != static_cast<index_t>(padding_idx)) {
output_data[sample_idx] = dot_impl<scalar_t>(
embedding_features,
- grad_data + grad_stride0 * bag_idx, grad_stride1,
- weight_data + weight_stride0 * embedding_idx, weight_stride1);
+ const_cast<scalar_t*>(grad_data + grad_stride0 * bag_idx), grad_stride1,
+ const_cast<scalar_t*>(weight_data + weight_stride0 * embedding_idx), weight_stride1);
}
}
});
diff --git a/aten/src/ATen/native/GridSampler.cpp b/aten/src/ATen/native/GridSampler.cpp
index 7f05b5fdd8..5d0259eeb1 100644
--- a/aten/src/ATen/native/GridSampler.cpp
+++ b/aten/src/ATen/native/GridSampler.cpp
@@ -268,9 +268,9 @@ namespace {
}
int64_t gGrid_sN = grad_grid.stride(0);
int64_t gGrid_sW = grad_grid.stride(3);
- scalar_t *inp_ptr = input.data_ptr<scalar_t>();
- scalar_t *grid_ptr = grid.data_ptr<scalar_t>();
- scalar_t *gOut_ptr = grad_output.data_ptr<scalar_t>();
+ const scalar_t *inp_ptr = input.const_data_ptr<scalar_t>();
+ const scalar_t *grid_ptr = grid.const_data_ptr<scalar_t>();
+ const scalar_t *gOut_ptr = grad_output.const_data_ptr<scalar_t>();
scalar_t *gInp_ptr = nullptr;
if (input_requires_grad) {
gInp_ptr = grad_input.mutable_data_ptr<scalar_t>();
@@ -279,14 +279,14 @@ namespace {
// loop over each output pixel
at::parallel_for(0, N, 0, [&](int64_t start, int64_t end) {
for (const auto n : c10::irange(start, end)) {
- scalar_t *grid_ptr_N = grid_ptr + n * grid_sN;
- scalar_t *inp_ptr_N = inp_ptr + n * inp_sN;
+ const scalar_t *grid_ptr_N = grid_ptr + n * grid_sN;
+ const scalar_t *inp_ptr_N = inp_ptr + n * inp_sN;
scalar_t *gGrid_ptr_NDHW = gGrid_ptr + n * gGrid_sN;
for (const auto d : c10::irange(out_D)) {
for (const auto h : c10::irange(out_H)) {
for (int64_t w = 0; w < out_W; ++w, gGrid_ptr_NDHW += gGrid_sW /* grad_grid is contiguous */ ) {
// get the corresponding input x, y, z co-ordinates from grid
- scalar_t *grid_ptr_NDHW = grid_ptr_N + d * grid_sD + h * grid_sH + w * grid_sW;
+ const scalar_t *grid_ptr_NDHW = grid_ptr_N + d * grid_sD + h * grid_sH + w * grid_sW;
scalar_t ix = *grid_ptr_NDHW;
scalar_t iy = grid_ptr_NDHW[grid_sCoor];
scalar_t iz = grid_ptr_NDHW[2 * grid_sCoor];
@@ -344,8 +344,8 @@ namespace {
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCDHW = gOut_ptr + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
- scalar_t *inp_ptr_NC = inp_ptr_N;
+ const scalar_t *gOut_ptr_NCDHW = gOut_ptr + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
+ const scalar_t *inp_ptr_NC = inp_ptr_N;
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
@@ -423,7 +423,7 @@ namespace {
int64_t iz_nearest = static_cast<int64_t>(std::nearbyint(iz));
// assign nearest neighbour pixel value to output pixel
- scalar_t *gOut_ptr_NCDHW = gOut_ptr + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCDHW = gOut_ptr + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
if (input_requires_grad) {
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
@@ -758,21 +758,21 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
int64_t gInp_sW = grad_input.stride(3);
int64_t gGrid_sN = grad_grid.stride(0);
int64_t gGrid_sW = grad_grid.stride(2);
- scalar_t *inp_ptr = input.data_ptr<scalar_t>();
- scalar_t *grid_ptr = grid.data_ptr<scalar_t>();
- scalar_t *gOut_ptr = grad_output.data_ptr<scalar_t>();
+ const scalar_t *inp_ptr = input.const_data_ptr<scalar_t>();
+ const scalar_t *grid_ptr = grid.const_data_ptr<scalar_t>();
+ const scalar_t *gOut_ptr = grad_output.const_data_ptr<scalar_t>();
scalar_t *gInp_ptr = grad_input.mutable_data_ptr<scalar_t>();
scalar_t *gGrid_ptr = grad_grid.data_ptr<scalar_t>();
// loop over each output pixel
at::parallel_for(0, N, 0, [&](int64_t start, int64_t end) {
for (const auto n : c10::irange(start, end)) {
- scalar_t *grid_ptr_N = grid_ptr + n * grid_sN;
- scalar_t *inp_ptr_N = inp_ptr + n * inp_sN;
+ const scalar_t *grid_ptr_N = grid_ptr + n * grid_sN;
+ const scalar_t *inp_ptr_N = inp_ptr + n * inp_sN;
scalar_t *gGrid_ptr_NHW = gGrid_ptr + n * gGrid_sN;
for (const auto h : c10::irange(out_H)) {
for (int64_t w = 0; w < out_W; ++w, gGrid_ptr_NHW += gGrid_sW /* grad_grid is contiguous */ ) {
// get the corresponding input x, y co-ordinates from grid
- scalar_t *grid_ptr_NHW = grid_ptr_N + h * grid_sH + w * grid_sW;
+ const scalar_t *grid_ptr_NHW = grid_ptr_N + h * grid_sH + w * grid_sW;
scalar_t x = *grid_ptr_NHW;
scalar_t y = grid_ptr_NHW[grid_sCoor];
@@ -804,9 +804,9 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
- scalar_t *inp_ptr_NC = inp_ptr_N;
+ const scalar_t *inp_ptr_NC = inp_ptr_N;
// calculate bilinear weighted pixel value and set output pixel
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
@@ -848,7 +848,7 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
int64_t iy_nearest = static_cast<int64_t>(std::nearbyint(iy));
// assign nearest neighbour pixel value to output pixel
- scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
// calculate and set grad_input
@@ -883,9 +883,9 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
scalar_t gix = static_cast<scalar_t>(0);
scalar_t giy = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
- scalar_t *inp_ptr_NC = inp_ptr_N;
+ const scalar_t *inp_ptr_NC = inp_ptr_N;
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC+= inp_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp
index 46a1b0be64..231ac54f67 100644
--- a/aten/src/ATen/native/Loss.cpp
+++ b/aten/src/ATen/native/Loss.cpp
@@ -480,9 +480,9 @@ Tensor& huber_loss_backward_out(const Tensor& grad_output, const Tensor& input,
auto norm = (reduction == Reduction::Mean) ? (1. / input.numel()) : 1.;
auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
- .add_input(input)
- .add_input(target)
- .add_input(grad_output)
+ .add_const_input(input)
+ .add_const_input(target)
+ .add_const_input(grad_output)
.build();
huber_backward_stub(iter.device_type(), iter, norm, delta);
return grad_input;
@@ -498,9 +498,9 @@ Tensor& mse_loss_backward_out(const Tensor& grad_output,
auto norm = reduction == Reduction::Mean ? 2. / input.numel() : 2.;
auto iter = at::TensorIteratorConfig()
.add_output(grad_input)
- .add_input(input)
- .add_input(target)
- .add_input(grad_output)
+ .add_const_input(input)
+ .add_const_input(target)
+ .add_const_input(grad_output)
.build();
mse_backward_stub(iter.device_type(), iter, norm);
return grad_input;
diff --git a/aten/src/ATen/native/LossCTC.cpp b/aten/src/ATen/native/LossCTC.cpp
index b8e5a83809..b13ed7e2ce 100644
--- a/aten/src/ATen/native/LossCTC.cpp
+++ b/aten/src/ATen/native/LossCTC.cpp
@@ -269,13 +269,13 @@ Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_
Tensor log_beta = at::empty_like(log_alpha, LEGACY_CONTIGUOUS_MEMORY_FORMAT); // could be optimized to use only 2 rows
auto lpp = log_probs.permute({1,0,2});
- auto log_probs_a_global = lpp.accessor<scalar_t, 3>();
- auto log_alpha_a_global = log_alpha.accessor<scalar_t, 3>();
+ auto log_probs_a_global = lpp.accessor<const scalar_t, 3>();
+ auto log_alpha_a_global = log_alpha.accessor<const scalar_t, 3>();
auto log_beta_a_global = log_beta.accessor<scalar_t, 3>();
auto gp = grad.permute({1,0,2});
auto grad_a_global = gp.accessor<scalar_t, 3>();
- auto targets_data = targets.data_ptr<target_t>();
- auto grad_out_a = grad_out.accessor<scalar_t, 1>();
+ auto targets_data = targets.const_data_ptr<target_t>();
+ auto grad_out_a = grad_out.accessor<const scalar_t, 1>();
auto create_fill_iterator = [](const Tensor& tensor, IntArrayRef squash_dims) {
return TensorIteratorConfig()
diff --git a/aten/src/ATen/native/SegmentReduce.cpp b/aten/src/ATen/native/SegmentReduce.cpp
index 606a1f7e32..3c7b539ee4 100644
--- a/aten/src/ATen/native/SegmentReduce.cpp
+++ b/aten/src/ATen/native/SegmentReduce.cpp
@@ -211,8 +211,8 @@ void _segment_reduce_cpu_lengths_backward_kernel1(
data_contig.scalar_type(),
"_segment_reduce_cpu",
[&]() {
- auto* output_data = output_contig.data_ptr<scalar_t>();
- auto* grad_data = grad_contig.data_ptr<scalar_t>();
+ auto* output_data = output_contig.const_data_ptr<scalar_t>();
+ auto* grad_data = grad_contig.const_data_ptr<scalar_t>();
auto* grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
const auto* values_data = data_contig.const_data_ptr<scalar_t>();
// Used to calculate exclusive prod
diff --git a/aten/src/ATen/native/cpu/GridSamplerKernel.cpp b/aten/src/ATen/native/cpu/GridSamplerKernel.cpp
index 2a7b673b4c..0a704e5419 100644
--- a/aten/src/ATen/native/cpu/GridSamplerKernel.cpp
+++ b/aten/src/ATen/native/cpu/GridSamplerKernel.cpp
@@ -588,7 +588,7 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bilinear,
template<bool input_requires_grad>
inline void backward(TensorAccessor<scalar_t, 3>* gInp_slice_ptr,
TensorAccessor<scalar_t, 3>& gGrid_slice,
- const TensorAccessor<scalar_t, 3>& gOut_slice,
+ const TensorAccessor<const scalar_t, 3>& gOut_slice,
const TensorAccessor<const scalar_t, 3>& inp_slice,
int64_t offset, const Vec& grid_x, const Vec& grid_y,
int64_t len) const {
@@ -762,7 +762,7 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Nearest,
template<bool input_requires_grad>
inline void backward(TensorAccessor<scalar_t, 3>* gInp_slice_ptr,
TensorAccessor<scalar_t, 3>& gGrid_slice,
- const TensorAccessor<scalar_t, 3>& gOut_slice,
+ const TensorAccessor<const scalar_t, 3>& gOut_slice,
const TensorAccessor<const scalar_t, 3>& /*inp_slice*/,
int64_t offset, const Vec& grid_x, const Vec& grid_y,
int64_t len) const {
@@ -950,7 +950,7 @@ struct ApplyGridSample<scalar_t, 2, GridSamplerInterpolation::Bicubic,
template<bool input_requires_grad>
inline void backward(TensorAccessor<scalar_t, 3>* gInp_slice_ptr,
TensorAccessor<scalar_t, 3>& gGrid_slice,
- const TensorAccessor<scalar_t, 3>& gOut_slice,
+ const TensorAccessor<const scalar_t, 3>& gOut_slice,
const TensorAccessor<const scalar_t, 3>& inp_slice,
int64_t offset, const Vec& grid_x, const Vec& grid_y,
int64_t len) const {
@@ -1276,7 +1276,7 @@ void grid_sampler_2d_backward_cpu_kernel_impl(
auto gGrid_acc = grad_grid.accessor<scalar_t, 4>();
auto inp_acc = input.accessor<const scalar_t, 4>();
auto grid_acc = grid.accessor<const scalar_t, 4>();
- auto gOut_acc = grad_output.accessor<scalar_t, 4>();
+ auto gOut_acc = grad_output.accessor<const scalar_t, 4>();
if (input_requires_grad) {
auto gInp_acc = grad_input.accessor<scalar_t, 4>();
if (align_corners) {
diff --git a/aten/src/ATen/native/cuda/GridSampler.cu b/aten/src/ATen/native/cuda/GridSampler.cu
index 639222a207..2c9128eee2 100644
--- a/aten/src/ATen/native/cuda/GridSampler.cu
+++ b/aten/src/ATen/native/cuda/GridSampler.cu
@@ -311,9 +311,9 @@ namespace {
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_2d_backward_kernel(
const index_t nthreads,
- TensorInfo<scalar_t, index_t> grad_output,
- TensorInfo<scalar_t, index_t> input,
- TensorInfo<scalar_t, index_t> grid,
+ TensorInfo<const scalar_t, index_t> grad_output,
+ TensorInfo<const scalar_t, index_t> input,
+ TensorInfo<const scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
@@ -385,11 +385,11 @@ namespace {
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
- scalar_t *inp_ptr_NC = input.data + n * inp_sN;
+ const scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
- scalar_t gOut = *gOut_ptr_NCHW;
+ const scalar_t gOut = *gOut_ptr_NCHW;
if (input_requires_grad) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
@@ -435,7 +435,7 @@ namespace {
index_t iy_nearest = static_cast<index_t>(std::nearbyint(iy));
// assign nearest neighbour pixel value to output pixel
- scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
@@ -474,12 +474,12 @@ namespace {
scalar_t gix = static_cast<scalar_t>(0);
scalar_t giy = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
- scalar_t *inp_ptr_NC = input.data + n * inp_sN;
+ const scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) {
- scalar_t gOut = *gOut_ptr_NCHW;
+ const scalar_t gOut = *gOut_ptr_NCHW;
#pragma unroll 4
for (index_t i = 0; i < 4; ++i) {
@@ -517,9 +517,9 @@ namespace {
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_3d_backward_kernel(
const index_t nthreads,
- TensorInfo<scalar_t, index_t> grad_output,
- TensorInfo<scalar_t, index_t> input,
- TensorInfo<scalar_t, index_t> grid,
+ TensorInfo<const scalar_t, index_t> grad_output,
+ TensorInfo<const scalar_t, index_t> input,
+ TensorInfo<const scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
@@ -630,12 +630,12 @@ namespace {
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
- scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset;
if (input_requires_grad) {
NC_offset = n * gInp_sN;
}
- scalar_t *inp_ptr_NC = input.data + n * inp_sN;
+ const scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
@@ -725,7 +725,7 @@ namespace {
auto iz_nearest = static_cast<index_t>(std::nearbyint(iz));
// assign nearest neighbour pixel value to output pixel
- scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
+ const scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
@@ -868,9 +868,9 @@ void launch_grid_sampler_2d_backward_kernel(
grid_sampler_2d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
- getTensorInfo<scalar_t, int>(grad_output),
- getTensorInfo<scalar_t, int>(input),
- getTensorInfo<scalar_t, int>(grid),
+ getTensorInfo<const scalar_t, int>(grad_output),
+ getTensorInfo<const scalar_t, int>(input),
+ getTensorInfo<const scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
@@ -883,9 +883,9 @@ void launch_grid_sampler_2d_backward_kernel(
grid_sampler_2d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
- getTensorInfo<scalar_t, int64_t>(grad_output),
- getTensorInfo<scalar_t, int64_t>(input),
- getTensorInfo<scalar_t, int64_t>(grid),
+ getTensorInfo<const scalar_t, int64_t>(grad_output),
+ getTensorInfo<const scalar_t, int64_t>(input),
+ getTensorInfo<const scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
@@ -927,9 +927,9 @@ void launch_grid_sampler_3d_backward_kernel(
grid_sampler_3d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
- getTensorInfo<scalar_t, int>(grad_output),
- getTensorInfo<scalar_t, int>(input),
- getTensorInfo<scalar_t, int>(grid),
+ getTensorInfo<const scalar_t, int>(grad_output),
+ getTensorInfo<const scalar_t, int>(input),
+ getTensorInfo<const scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
@@ -942,9 +942,9 @@ void launch_grid_sampler_3d_backward_kernel(
grid_sampler_3d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
- getTensorInfo<scalar_t, int64_t>(grad_output),
- getTensorInfo<scalar_t, int64_t>(input),
- getTensorInfo<scalar_t, int64_t>(grid),
+ getTensorInfo<const scalar_t, int64_t>(grad_output),
+ getTensorInfo<const scalar_t, int64_t>(input),
+ getTensorInfo<const scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
diff --git a/aten/src/ATen/native/cudnn/GridSampler.cpp b/aten/src/ATen/native/cudnn/GridSampler.cpp
index 085e3c06b2..af6b13567e 100644
--- a/aten/src/ATen/native/cudnn/GridSampler.cpp
+++ b/aten/src/ATen/native/cudnn/GridSampler.cpp
@@ -167,15 +167,15 @@ std::tuple<Tensor, Tensor> cudnn_grid_sampler_backward(
desc.desc(),
&one,
idesc.desc(),
- input->data_ptr(),
+ input->const_data_ptr(),
&zero,
gdesc.desc(),
grad_input_t.data_ptr(),
&one,
odesc.desc(),
- grad_output->data_ptr(),
+ grad_output->const_data_ptr(),
// intriguingly, the outputs don't need descriptors
- grid->data_ptr(),
+ grid->const_data_ptr(),
&zero,
grad_grid_t.data_ptr()));
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index c18c8238b2..c8292207e6 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -18929,7 +18929,6 @@ op_db: List[OpInfo] = [
supports_out=False,
supports_gradgrad=False,
allow_cow_input_materialize_forward=[0],
- supports_cow_input_no_materialize_backward=False,
),
OpInfo(
"nn.functional.multi_head_attention_forward",
@@ -18999,7 +18998,6 @@ op_db: List[OpInfo] = [
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- supports_cow_input_no_materialize_backward=False,
dtypes=floating_types_and(torch.float16),
backward_dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
@@ -19019,7 +19017,6 @@ op_db: List[OpInfo] = [
sample_inputs_func=sample_inputs_grid_sample,
reference_inputs_func=reference_inputs_grid_sample,
supports_gradgrad=False,
- supports_cow_input_no_materialize_backward=False,
gradcheck_nondet_tol=1e-15),
# TODO: delete this OpInfo once we add meta support for grid_sampler_3d
OpInfo(
@@ -19029,7 +19026,6 @@ op_db: List[OpInfo] = [
supports_out=False,
sample_inputs_func=sample_inputs_grid_sampler_2d,
supports_gradgrad=False,
- supports_cow_input_no_materialize_backward=False,
gradcheck_nondet_tol=1e-15),
OpInfo(
"argwhere",
@@ -19384,7 +19380,6 @@ op_db: List[OpInfo] = [
"nn.functional.ctc_loss",
dtypes=floating_types(),
supports_out=False,
- supports_cow_input_no_materialize_backward=False,
sample_inputs_func=sample_inputs_ctc_loss,
skips=(
# https://github.com/pytorch/pytorch/issues/67462
@@ -19432,7 +19427,6 @@ op_db: List[OpInfo] = [
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
assert_jit_shape_analysis=True,
- supports_cow_input_no_materialize_backward=False,
skips=(
# RuntimeError:
# undefined value tensor:
@@ -19489,7 +19483,6 @@ op_db: List[OpInfo] = [
dtypes=floating_types_and(torch.float16, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
- supports_cow_input_no_materialize_backward=False,
sample_inputs_func=sample_inputs_huber_loss,
error_inputs_func=error_inputs_huber_loss,
skips=(
@@ -19507,7 +19500,6 @@ op_db: List[OpInfo] = [
dtypes=floating_types(),
supports_out=False,
supports_gradgrad=False,
- supports_cow_input_no_materialize_backward=False,
skips=(
DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'),
)
@@ -19710,7 +19702,6 @@ op_db: List[OpInfo] = [
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
- supports_cow_input_no_materialize_backward=False,
sample_inputs_func=sample_inputs_segment_reduce,
skips=(
# FIXME: CUDA driver API confirmed a leak in
@@ -19731,7 +19722,6 @@ op_db: List[OpInfo] = [
supports_out=False,
# RuntimeError: derivative for aten::_segment_reduce_backward is not implemented
supports_gradgrad=False,
- supports_cow_input_no_materialize_backward=False,
sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'),
skips=(
# FIXME: CUDA driver API confirmed a leak in | 2.41.0 |
16ca546aa833b2a337e556dfb0b759f2b759a25 | Thu, 11 Apr 2024 19:10:56 +0000 | [PATCH 0030/1000] Adding health check server hook in torch elastic (#122750) (#123504) | Summary: Building hook for external mechanism to monitor the health of torch elastic launcher. Health check server takes dependency on FileTimerServer to check if launcher is healthy or not. It will be always healthy if FileTimerServer is disabled. Implementation of start_healthcheck_server is unsupported, however tcp/http server can be started on specific port which can monitor the aliveness of worker_watchdog and accordingly take the action. Test Plan: buck test mode/opt caffe2/test/distributed/elastic/agent/server/test:local_agent_test Differential Revision: D55837899 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123504 Approved by: https://github.com/kurman | diff --git a/docs/source/elastic/agent.rst b/docs/source/elastic/agent.rst
index db1465d71e..ac42403761 100644
--- a/docs/source/elastic/agent.rst
+++ b/docs/source/elastic/agent.rst
@@ -74,3 +74,21 @@ will internally create a unique file name and set it to the environment
variable ```TORCHELASTIC_TIMER_FILE```, and this environment variable will
be propagated to the worker processes to allow them to connect to the same
named pipe that ```LocalElasticAgent``` uses.
+
+
+Health Check Server
+-------------------
+
+A health check monitoring server can be enabled in ```LocalElasticAgent```
+if an environment variable ``TORCHELASTIC_HEALTH_CHECK_PORT`` has been defined
+in the ```LocalElasticAgent``` process.
+Adding interface for health check server which can be extended by starting tcp/http
+server on the specified port number.
+Additionally, health check server will have callback to check watchdog is alive.
+
+.. automodule:: torch.distributed.elastic.agent.server.health_check_server
+
+.. autoclass:: HealthCheckServer
+ :members:
+
+.. autofunction:: torch.distributed.elastic.agent.server.health_check_server.create_healthcheck_server
diff --git a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
index 38443956f2..038f4cdcec 100644
--- a/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
+++ b/test/distributed/elastic/agent/server/test/local_elastic_agent_test.py
@@ -32,6 +32,7 @@ from torch.distributed.elastic.agent.server.api import (
)
from torch.distributed.elastic.agent.server.local_elastic_agent import (
LocalElasticAgent,
+ TORCHELASTIC_HEALTH_CHECK_PORT,
TORCHELASTIC_TIMER_FILE,
)
from torch.distributed.elastic.multiprocessing import DefaultLogsSpecs, Std
@@ -589,6 +590,53 @@ class LocalElasticAgentTest(unittest.TestCase):
backend="c10d", test_to_run=self.run_agent_local_watchdog_setup_disabled
)
+ def run_agent_healthcheck_setup_enabled(self):
+ # Set the env for healthcheck
+ healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
+ os.environ[healthcheck_port_env_name] = "12345"
+ # Run the agent
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, True))
+ spec = self.get_worker_spec(node_conf, max_restarts=2)
+ agent = self.get_agent(spec, node_config=node_conf)
+ res = agent.run()
+ self.assertFalse(res.is_failed())
+
+ def run_agent_healthcheck_setup_disabled(self):
+ # Do not set the env for healthcheck
+ healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
+ if healthcheck_port_env_name in os.environ:
+ del os.environ[healthcheck_port_env_name]
+ # Run the agent
+ node_conf = Conf(entrypoint=_check_local_watchdog_setup, local_world_size=1, args=(TORCHELASTIC_HEALTH_CHECK_PORT, False))
+ spec = self.get_worker_spec(node_conf, max_restarts=2)
+ agent = self.get_agent(spec, node_config=node_conf)
+ res = agent.run()
+ self.assertFalse(res.is_failed())
+
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ def test_run_agent_healthcheck_setup_enabled_etcd(self):
+ self.run_test_with_backend(
+ backend="etcd", test_to_run=self.run_agent_healthcheck_setup_enabled
+ )
+
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ def test_run_agent_healthcheck_setup_enabled_c10d(self):
+ self.run_test_with_backend(
+ backend="c10d", test_to_run=self.run_agent_healthcheck_setup_enabled
+ )
+
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ def test_run_agent_healthcheck_setup_disabled_etcd(self):
+ self.run_test_with_backend(
+ backend="etcd", test_to_run=self.run_agent_healthcheck_setup_disabled
+ )
+
+ @skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
+ def test_run_agent_healthcheck_setup_disabled_c10d(self):
+ self.run_test_with_backend(
+ backend="c10d", test_to_run=self.run_agent_healthcheck_setup_disabled
+ )
+
@skip_but_pass_in_sandcastle_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_run_check_env_function_etcd(self):
self.run_test_with_backend(
diff --git a/torch/distributed/elastic/agent/server/health_check_server.py b/torch/distributed/elastic/agent/server/health_check_server.py
new file mode 100644
index 0000000000..0c2dea63a2
--- /dev/null
+++ b/torch/distributed/elastic/agent/server/health_check_server.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Callable
+
+from torch.distributed.elastic.utils.logging import get_logger
+
+log = get_logger(__name__)
+
+
+class HealthCheckServer:
+ """
+ Interface for health check monitoring server, which can be extended
+ by starting tcp/http server on the specified port.
+
+ Args:
+
+ alive_callback: Callable[[], int], callback to last progress time of agent
+
+ port: int, port number to start tcp/http server
+
+ timeout: int, timeout seconds to decide agent is alive/dead
+ """
+
+ _alive_callback: Callable[[], int]
+ _port: int
+ _timeout: int
+
+ def __init__(
+ self, alive_callback: Callable[[], int], port: int, timeout: int
+ ) -> None:
+ self._alive_callback = alive_callback
+ self._port = port
+ self._timeout = timeout
+
+ def start(self) -> None:
+ """
+ Unsupported functionality for Pytorch, doesn't start any health check server
+ """
+ log.warning("No health check server started")
+
+ def stop(self) -> None:
+ """
+ Function to stop health check server
+ """
+ log.info("Stopping noop health check server.")
+
+
+def create_healthcheck_server(
+ alive_callback: Callable[[], int],
+ port: int,
+ timeout: int,
+) -> HealthCheckServer:
+ """
+ creates health check server object
+ """
+ return HealthCheckServer(alive_callback, port, timeout)
diff --git a/torch/distributed/elastic/agent/server/local_elastic_agent.py b/torch/distributed/elastic/agent/server/local_elastic_agent.py
index 349af333f7..60469c09dd 100644
--- a/torch/distributed/elastic/agent/server/local_elastic_agent.py
+++ b/torch/distributed/elastic/agent/server/local_elastic_agent.py
@@ -12,6 +12,7 @@ import os
import signal
import socket
from string import Template
+import time
import uuid
from typing import Any, Dict, Optional, Tuple
@@ -25,6 +26,10 @@ from torch.distributed.elastic.agent.server.api import (
WorkerSpec,
WorkerState,
)
+from torch.distributed.elastic.agent.server.health_check_server import (
+ create_healthcheck_server,
+ HealthCheckServer,
+)
from torch.distributed.elastic.events.api import EventMetadataValue
from torch.distributed.elastic.metrics.api import prof
from torch.distributed.elastic.multiprocessing import PContext, start_processes, LogsSpecs
@@ -37,9 +42,11 @@ __all__ = [
"LocalElasticAgent",
"TORCHELASTIC_ENABLE_FILE_TIMER",
"TORCHELASTIC_TIMER_FILE",
+ "TORCHELASTIC_HEALTH_CHECK_PORT",
]
TORCHELASTIC_ENABLE_FILE_TIMER = "TORCHELASTIC_ENABLE_FILE_TIMER"
+TORCHELASTIC_HEALTH_CHECK_PORT = "TORCHELASTIC_HEALTH_CHECK_PORT"
TORCHELASTIC_TIMER_FILE = "TORCHELASTIC_TIMER_FILE"
class LocalElasticAgent(SimpleElasticAgent):
@@ -146,6 +153,7 @@ class LocalElasticAgent(SimpleElasticAgent):
self._log_line_prefix_template = log_line_prefix_template
self._worker_watchdog: Optional[timer.FileTimerServer] = None
self._logs_specs = logs_specs
+ self._health_check_server: Optional[HealthCheckServer] = None
def _setup_local_watchdog(self, envs: Dict[int, Dict[str, str]]) -> None:
@@ -171,6 +179,37 @@ class LocalElasticAgent(SimpleElasticAgent):
for worker_env in envs.values():
worker_env[watchdog_file_env_name] = watchdog_file_path
+ @staticmethod
+ def _get_current_time_secs() -> int:
+ return int(time.time())
+
+ def _setup_healthcheck(self) -> None:
+ healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
+ healthcheck_port = os.getenv(healthcheck_port_env_name)
+ if healthcheck_port is not None:
+ logger.info(
+ "Found healthcheck port %s: %s",
+ healthcheck_port_env_name,
+ healthcheck_port,
+ )
+ if self._worker_watchdog is None:
+ logger.info("FileTimerServer doesn't exist, using current time as dummy callback")
+ alive_callback = LocalElasticAgent._get_current_time_secs
+ else:
+ alive_callback = self._worker_watchdog.get_last_progress_time
+
+ self._health_check_server = create_healthcheck_server(
+ alive_callback=alive_callback,
+ port=int(healthcheck_port),
+ timeout=60,
+ )
+ self._health_check_server.start()
+ else:
+ logger.info(
+ "Environment variable '%s' not found. Do not start health check.",
+ healthcheck_port_env_name,
+ )
+
def _get_fq_hostname(self) -> str:
return socket.getfqdn(socket.gethostname())
@@ -273,6 +312,7 @@ class LocalElasticAgent(SimpleElasticAgent):
args[local_rank] = tuple(worker_args)
self._setup_local_watchdog(envs=envs)
+ self._setup_healthcheck()
assert spec.entrypoint is not None
assert self._logs_specs is not None
@@ -292,6 +332,9 @@ class LocalElasticAgent(SimpleElasticAgent):
if self._worker_watchdog is not None:
self._worker_watchdog.stop()
self._worker_watchdog = None
+ if self._health_check_server is not None:
+ self._health_check_server.stop()
+ self._health_check_server = None
if self._pcontext:
self._pcontext.close(death_sig)
if self._rdzv_handler:
diff --git a/torch/distributed/elastic/timer/file_based_local_timer.py b/torch/distributed/elastic/timer/file_based_local_timer.py
index a52a386645..2842c72f37 100644
--- a/torch/distributed/elastic/timer/file_based_local_timer.py
+++ b/torch/distributed/elastic/timer/file_based_local_timer.py
@@ -174,6 +174,7 @@ class FileTimerServer:
# For test only. Process all requests and stop the server.
self._run_once = False
self._log_event = log_event if log_event is not None else lambda name, request: None
+ self._last_progress_time = int(time.time())
def start(self) -> None:
@@ -237,6 +238,7 @@ class FileTimerServer:
self._run_watchdog(fd)
if run_once:
break
+ self._last_progress_time = int(time.time())
except Exception:
logger.exception("Error running watchdog")
@@ -343,3 +345,6 @@ class FileTimerServer:
except Exception:
logger.exception("Error terminating pid=%s", worker_pid)
return False
+
+ def get_last_progress_time(self) -> int:
+ return self._last_progress_time | 2.41.0 |
a7fd20aa11c02a10b1223caeefb645de482bcbc | Wed, 10 Apr 2024 21:58:49 -0700 | [PATCH 0031/1000] [dynamo] Support autograd.FunctionCtx.needs_input_grad (#123700) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123700 Approved by: https://github.com/anijain2305 | diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py
index 8b77af1a35..beca818570 100644
--- a/test/dynamo/test_autograd_function.py
+++ b/test/dynamo/test_autograd_function.py
@@ -818,6 +818,30 @@ class AutogradFunctionTests(torch._dynamo.test_case.TestCase):
foo(torch.randn(2, requires_grad=True))
self.assertEqual(cnts.frame_count, 1)
+ def test_needs_input_grad(self):
+ cnt = torch._dynamo.testing.CompileCounter()
+
+ class NeedsInputGradFunc(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, foo):
+ result = foo + foo
+ ctx.save_for_backward(result)
+ return result
+
+ @staticmethod
+ @torch.compile(backend=cnt, fullgraph=True)
+ def backward(ctx, grad_output):
+ (result,) = ctx.saved_tensors
+ if ctx.needs_input_grad[0]:
+ return grad_output * result.sin()
+ return None
+
+ x = torch.randn(10, requires_grad=True)
+ NeedsInputGradFunc.apply(x).sum().backward()
+ self.assertEqual(x.grad.shape, x.shape)
+ self.assertEqual(cnt.frame_count, 1)
+ self.assertEqual(cnt.op_count, 2)
+
def test_repeated_save_for_backward_calls(self):
from torch.autograd import Function
diff --git a/test/dynamo_expected_failures/TestAutograd.test_hook_none b/test/dynamo_expected_failures/TestAutograd.test_hook_none
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index b0be0d79fb..471fff7d8d 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1459,7 +1459,7 @@ class AutogradFunctionApplyVariable(VariableTracker):
)
fwd_src = AttrSource(self.parent_source, member="forward")
- ctx = AutogradFunctionContextVariable.create(tx)
+ ctx = AutogradFunctionContextVariable.create(tx, args, kwargs)
if isinstance(self.fwd_graph, types.FunctionType):
fwd_fn = UserFunctionVariable(self.fwd_graph, source=fwd_src)
fwd_args = [ctx, *args]
diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py
index f58db0f24a..97498b1c2f 100644
--- a/torch/_dynamo/variables/misc.py
+++ b/torch/_dynamo/variables/misc.py
@@ -395,7 +395,7 @@ class AutogradFunctionVariable(VariableTracker):
source = None
fn = self.fn_cls.forward
- ctx = AutogradFunctionContextVariable.create(tx)
+ ctx = AutogradFunctionContextVariable.create(tx, args, kwargs)
args = [ctx, *args]
if isinstance(fn, types.FunctionType):
return variables.UserFunctionVariable(fn, source=source).call_function(
@@ -469,15 +469,23 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
inference=False,
proxy=None,
saved_tensors=None,
+ needs_input_grad=None,
**kwargs,
):
super().__init__(value=value, value_type=value_type, **kwargs)
self.inference = inference
self.proxy = proxy
self.saved_tensors = saved_tensors
+ self.needs_input_grad = needs_input_grad
@staticmethod
- def create(tx):
+ def create(tx, args=None, kwargs=None):
+ needs_input_grad = None
+ if args and not kwargs:
+ needs_input_grad = tuple(
+ isinstance(x, variables.TensorVariable) and x.requires_grad
+ for x in args
+ )
proxy = tx.output.create_proxy(
"call_function", torch.autograd.function.FunctionCtx, tuple(), {}
)
@@ -489,10 +497,12 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
inference=True,
proxy=proxy,
saved_tensors=SavedTensorBox(),
+ needs_input_grad=needs_input_grad,
),
{},
)
proxy.node.meta["example_value"] = out.value
+
return out
def as_proxy(self):
@@ -534,6 +544,15 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable):
)
if name == "saved_tensors" and self.saved_tensors is not None:
return variables.TupleVariable(list(self.saved_tensors.tensors))
+ if name == "needs_input_grad":
+ if self.needs_input_grad is not None:
+ return variables.ConstantVariable.create(self.needs_input_grad)
+ if self.source:
+ from .builder import VariableBuilder
+
+ return VariableBuilder(tx, AttrSource(self.source, "needs_input_grad"))(
+ self.value.needs_input_grad
+ )
return super().var_getattr(tx, name)
| 2.41.0 |
82d20c207a5fdbc48263734ccc82db6b4ad24c0 | Thu, 11 Apr 2024 19:37:11 +0000 | [PATCH 0032/1000] [NEON] Remove implicit type promotion in `Vectorized<c10::Half>::operator!=` (#123864) | To make code compilable with `gcc`, which `clang` does not allow transparent type promotion between vectorized NEON types of the same sizes, see https://godbolt.org/z/xoasoGM81 as an example Pull Request resolved: https://github.com/pytorch/pytorch/pull/123864 Approved by: https://github.com/malfet | diff --git a/aten/src/ATen/cpu/vec/vec256/vec256_half_neon.h b/aten/src/ATen/cpu/vec/vec256/vec256_half_neon.h
index bb8716ecb7..aaf1d5995f 100644
--- a/aten/src/ATen/cpu/vec/vec256/vec256_half_neon.h
+++ b/aten/src/ATen/cpu/vec/vec256/vec256_half_neon.h
@@ -565,9 +565,9 @@ class Vectorized<c10::Half> {
}
Vectorized<c10::Half> operator!=(const Vectorized<c10::Half>& other) const {
- float32x4_t r0 = vreinterpretq_f16_u16(
+ float16x8_t r0 = vreinterpretq_f16_u16(
vmvnq_u16(vceqq_f16(values.val[0], other.values.val[0])));
- float32x4_t r1 = vreinterpretq_f16_u16(
+ float16x8_t r1 = vreinterpretq_f16_u16(
vmvnq_u16(vceqq_f16(values.val[1], other.values.val[1])));
return Vectorized<c10::Half>(r0, r1);
} | 2.41.0 |
a013f69bb9647a8defba8d0733475a6fdcd6a73 | Wed, 10 Apr 2024 15:10:59 -0700 | [PATCH 0034/1000] dynamo assertion that graph has no fake-tensor constants should check for subclasses (#118644) | This would have caught some of the nasty errors in https://github.com/pytorch/pytorch/pull/118191 Pull Request resolved: https://github.com/pytorch/pytorch/pull/118644 Approved by: https://github.com/tugsbayasgalan, https://github.com/zou3519 ghstack dependencies: #118647 | diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index 809adc6b04..ab43664f5c 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -1889,7 +1889,7 @@ def get_real_value(node, tracer):
def assert_no_fake_params_or_buffers(gm):
- from torch._subclasses.fake_tensor import FakeTensorConfig
+ from torch._subclasses.fake_tensor import FakeTensorConfig, is_fake
def stack_or_hint(t):
if FakeTensorConfig.debug:
@@ -1900,12 +1900,12 @@ def assert_no_fake_params_or_buffers(gm):
return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors."
for name, buffer in gm.named_buffers():
- assert not isinstance(
- buffer, torch._subclasses.FakeTensor
+ assert not is_fake(
+ buffer
), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}"
for name, param in gm.named_parameters():
- assert not isinstance(
- param, torch._subclasses.FakeTensor
+ assert not is_fake(
+ param
), f"Unexpected fake param {name} {stack_or_hint(param)}"
| 2.41.0 |
cd06f56b1d12fbb052eb5cd66756f8a5b90096a | Thu, 11 Apr 2024 20:24:47 +0000 | [PATCH 0035/1000] [ez] test_profiler in serial (#123665) | Add test_profiler to the serial list since we keep needing to reopen disable issues and I think its due to being incompatible with parallelism Pull Request resolved: https://github.com/pytorch/pytorch/pull/123665 Approved by: https://github.com/ZainRizvi, https://github.com/huydhn | diff --git a/test/run_test.py b/test/run_test.py
index cd77683c7c..4d0b8b58e9 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -248,6 +248,7 @@ CI_SERIAL_LIST = [
"inductor/test_torchinductor", # OOM on test_large_block_sizes
"inductor/test_torchinductor_dynamic_shapes", # OOM on test_large_block_sizes
"inductor/test_torchinductor_codegen_dynamic_shapes", # OOM on test_large_block_sizes
+ "test_profiler", # test_source_multithreaded is probably not compatible with parallelism
]
# A subset of onnx tests that cannot run in parallel due to high memory usage.
ONNX_SERIAL_LIST = [ | 2.41.0 |
70bf23b7b74bd7a94f5095fb54857873aa1957b | Wed, 10 Apr 2024 16:51:37 -0700 | [PATCH 0036/1000] [dynamo] apply certain bytecode cleaning transformations unconditionally (#123785) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123785 Approved by: https://github.com/jansel | diff --git a/torch/_dynamo/bytecode_transformation.py b/torch/_dynamo/bytecode_transformation.py
index 32c77298d3..30b7866ade 100644
--- a/torch/_dynamo/bytecode_transformation.py
+++ b/torch/_dynamo/bytecode_transformation.py
@@ -1172,14 +1172,14 @@ def cleaned_instructions(code, safe=False) -> List[Instruction]:
if not safe:
if sys.version_info < (3, 11):
remove_load_call_method(instructions)
- else:
- remove_jump_if_none(instructions)
- update_offsets(instructions)
- devirtualize_jumps(instructions)
if sys.version_info < (3, 12):
explicit_super(code, instructions)
- else:
- remove_binary_store_slice(instructions)
+ if sys.version_info >= (3, 11):
+ remove_jump_if_none(instructions)
+ update_offsets(instructions)
+ devirtualize_jumps(instructions)
+ if sys.version_info >= (3, 12):
+ remove_binary_store_slice(instructions)
return instructions
| 2.41.0 |
7fac76fc259394136bc77b3e39d5705919e5c4c | Thu, 11 Apr 2024 20:52:02 +0000 | [PATCH 0037/1000] [DCP fixes for _load_state_dict_keys and supports nested keys (#123679) | Fixes some issues with `_load_state_dict_keys`, including: * updates broken test, which was failing due to incorrect parameters * adds support for specifying nested keys e.g. (load state dict keys can now specify `something like "optimizer.state"`, which loads all keys under `optimzier.state`. * updates call site to use the private implementation of `_load_state_dict`, which properly handles empty state dicts (otherwise the keys are ignored) Big shout out to @diego-urgell who not only identified current issues, but recommended the right solutions! Pull Request resolved: https://github.com/pytorch/pytorch/pull/123679 Approved by: https://github.com/diego-urgell, https://github.com/wz337 | diff --git a/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py b/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
index 03553adaf1..8d4733b827 100644
--- a/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
+++ b/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
@@ -351,8 +351,20 @@ class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
self._verify_msd(model_sd, dist_msd)
# another way
- loaded_model_sd = _load_state_dict_from_keys("model", model_sd)
- self._verify_msd(model_sd, loaded_model_sd)
+ loaded_model_sd = _load_state_dict_from_keys(
+ "model", checkpoint_id=self.temp_dir
+ )["model"]
+ self._verify_msd(model_sd, loaded_model_sd, offload_to_cpu=True)
+
+ loaded_optim_state = _load_state_dict_from_keys(
+ "optimizer.state", checkpoint_id=self.temp_dir
+ )["optimizer"]["state"]
+ self.assertNotIn("param_groups", loaded_optim_state)
+ for k, v in dist_optim.state_dict()["state"].items():
+ for optim_key in ["exp_avg", "exp_avg_sq", "step"]:
+ self._compare_tensor(
+ loaded_optim_state[k][optim_key], v[optim_key], offload_to_cpu=True
+ )
class TestNoCPU(DTensorTestBase):
diff --git a/torch/distributed/checkpoint/default_planner.py b/torch/distributed/checkpoint/default_planner.py
index bebdeab1a4..5ace17fbf4 100644
--- a/torch/distributed/checkpoint/default_planner.py
+++ b/torch/distributed/checkpoint/default_planner.py
@@ -234,6 +234,29 @@ class _EmptyStateDictLoadPlanner(DefaultLoadPlanner):
self.keys = keys
super().__init__(*args, **kwargs)
+ def _should_include_key(self, key: str, metadata: Metadata) -> bool:
+ if self.keys is None:
+ return True
+
+ if key in self.keys:
+ True
+
+ unflattened_keys: List[str] = []
+ planner_data = metadata.planner_data.get(key)
+ for unflattened_key in planner_data:
+ if unflattened_keys:
+ unflattened_keys.append(
+ ".".join([unflattened_keys[-1], unflattened_key])
+ )
+
+ else:
+ unflattened_keys.append(unflattened_key)
+
+ if any(unflattened_key in self.keys for unflattened_key in unflattened_keys):
+ return True
+
+ return False
+
def set_up_planner(
self,
state_dict: STATE_DICT_TYPE,
@@ -244,7 +267,7 @@ class _EmptyStateDictLoadPlanner(DefaultLoadPlanner):
# rebuild the state dict from the metadata
for k, v in metadata.state_dict_metadata.items():
- if self.keys and k not in self.keys:
+ if not self._should_include_key(k, metadata):
continue
if isinstance(v, TensorStorageMetadata):
diff --git a/torch/distributed/checkpoint/state_dict_loader.py b/torch/distributed/checkpoint/state_dict_loader.py
index b3b9faba66..b7e1337e6c 100644
--- a/torch/distributed/checkpoint/state_dict_loader.py
+++ b/torch/distributed/checkpoint/state_dict_loader.py
@@ -1,6 +1,6 @@
import os
import warnings
-from typing import Any, cast, Dict, Optional, Union
+from typing import Any, cast, Dict, Optional, Set, Union
import torch
import torch.distributed as dist
@@ -228,7 +228,7 @@ def _load_state_dict(
def _load_state_dict_from_keys(
- keys: Optional[set] = None,
+ keys: Optional[Union[Set[str], str]] = None,
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_reader: Optional[StorageReader] = None,
@@ -261,7 +261,9 @@ def _load_state_dict_from_keys(
Rank 0 is assumed to be the coordinator rank.
Args:
- state_dict (Dict[str, Any]): The state_dict to save.
+ keys (Optional[Union[Set[str], str]]):
+ Loads any key specified in this set. If no keys are specified, the entire checkpoint
+ is loaded.
checkpoint_id (Union[str, os.PathLike, None]):
The ID of this checkpoint instance. The meaning of the checkpoint_id
depends on the storage. It can be a path to a folder or to a file.
@@ -283,12 +285,26 @@ def _load_state_dict_from_keys(
"torch.distributed.checkpoint._load_state_dict_from_keys"
)
+ no_dist = not (dist.is_available() and dist.is_initialized())
+ if no_dist:
+ warnings.warn(
+ "torch.distributed is unavailable or uninitialized, assuming the intent is to load in a single process."
+ )
+
+ storage_reader = cast(
+ StorageReader, _storage_setup(storage_reader, checkpoint_id, reader=True)
+ )
+
+ if isinstance(keys, str):
+ keys = {keys}
+
sd: Dict[str, Any] = {}
- load(
- sd,
+ _load_state_dict(
+ state_dict=sd,
storage_reader=storage_reader,
- planner=_EmptyStateDictLoadPlanner(keys=keys or set()),
process_group=process_group,
+ no_dist=no_dist,
+ planner=_EmptyStateDictLoadPlanner(keys=keys or set()),
)
return sd
diff --git a/torch/distributed/checkpoint/state_dict_saver.py b/torch/distributed/checkpoint/state_dict_saver.py
index 3a2656c9fc..b79ce3a496 100644
--- a/torch/distributed/checkpoint/state_dict_saver.py
+++ b/torch/distributed/checkpoint/state_dict_saver.py
@@ -163,7 +163,7 @@ def async_save(
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
- """Asynchronous version of ``save_state_dict``. This code first de-stages the state_dict on CPU, and then calls
+ """Asynchronous version of ``save``. This code first de-stages the state_dict on CPU, and then calls
`save` in a separate thread.
.. warning::
diff --git a/torch/testing/_internal/distributed/common_state_dict.py b/torch/testing/_internal/distributed/common_state_dict.py
index aa05792dc1..b1cfda3e1c 100644
--- a/torch/testing/_internal/distributed/common_state_dict.py
+++ b/torch/testing/_internal/distributed/common_state_dict.py
@@ -21,9 +21,13 @@ from torch.distributed.checkpoint.state_dict import (
class VerifyStateDictMixin:
- def _compare_tensor(self, orig_tensor, dist_tensor):
+ def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False):
if isinstance(dist_tensor, (DTensor, ShardedTensor)):
dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey")
+
+ if offload_to_cpu:
+ orig_tensor = orig_tensor.cpu()
+ dist_tensor = dist_tensor.cpu()
self.assertTrue(isinstance(dist_tensor, torch.Tensor))
self.assertTrue(torch.allclose(orig_tensor, dist_tensor))
@@ -32,6 +36,7 @@ class VerifyStateDictMixin:
msd: Dict[str, Any],
dist_msd: Dict[str, Any],
options: StateDictOptions = StateDictOptions(),
+ offload_to_cpu=False,
) -> None:
if not options.ignore_frozen_params:
self.assertEqual(len(msd), len(dist_msd))
@@ -39,7 +44,7 @@ class VerifyStateDictMixin:
dist_param = dist_msd.get(fqn, None)
if not options.ignore_frozen_params:
self.assertIsNotNone(dist_param, f"{fqn=}")
- self._compare_tensor(param, dist_param)
+ self._compare_tensor(param, dist_param, offload_to_cpu)
elif dist_param is None:
self.assertFalse(param.requires_grad, f"{fqn=}")
| 2.41.0 |
3070e2753cfab9b09449f50aad9154bc92eaef5 | Tue, 9 Apr 2024 11:23:37 -0700 | [PATCH 0038/1000] [DCP] Adds better handling in logging of specific kwargs (#123658) | Adds additional signpost integrations to DCP Logger, to add support for MLU and metric collection. Differential Revision: [D55803461](https://our.internmc.facebook.com/intern/diff/D55803461/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123658 Approved by: https://github.com/fegin | diff --git a/torch/distributed/c10d_logger.py b/torch/distributed/c10d_logger.py
index 3d59bdcd76..5d2aa9b629 100644
--- a/torch/distributed/c10d_logger.py
+++ b/torch/distributed/c10d_logger.py
@@ -47,15 +47,16 @@ _c10d_logger = _get_or_create_logger()
def _get_msg_dict(func_name, *args, **kwargs) -> Dict[str, Any]:
if dist.is_initialized():
+ group = kwargs.get("group") or kwargs.get("process_group")
msg_dict = {
"func_name": f"{func_name}",
"args": f"{args}, {kwargs}",
"pg_name": f"{dist._get_process_group_name(kwargs.get('pg'))}", # type: ignore[arg-type]
- "backend": f"{dist.get_backend(kwargs.get('group') or kwargs.get('process_group'))}",
+ "backend": f"{dist.get_backend(group)}",
"world_size": f"{dist.get_world_size()}",
- "group_size": f"{dist.get_world_size(kwargs.get('group'))}",
+ "group_size": f"{dist.get_world_size(group)}",
"global_rank": f"{dist.get_rank()}",
- "local_rank": f"{dist.get_rank(kwargs.get('group'))}",
+ "local_rank": f"{dist.get_rank(group)}",
}
if msg_dict["backend"] == "nccl":
nccl_version = torch.cuda.nccl.version()
diff --git a/torch/distributed/checkpoint/logger.py b/torch/distributed/checkpoint/logger.py
index 9d5ae88230..99030db864 100644
--- a/torch/distributed/checkpoint/logger.py
+++ b/torch/distributed/checkpoint/logger.py
@@ -25,10 +25,11 @@ def _msg_dict_from_dcp_method_args(*args, **kwargs) -> Dict[str, Any]:
# checkpoint ID can be passed in through the serializer or through the checkpoint id directly
storage_writer = kwargs.get("storage_writer", None)
storage_reader = kwargs.get("storage_reader", None)
- if kwargs.get("checkpoint_id") is None and (
- serializer := storage_writer or storage_reader
- ):
- msg_dict["checkpoint_id"] = getattr(serializer, "checkpoint_id", None)
+ checkpoint_id = kwargs.get("checkpoint_id", None)
+ if not checkpoint_id and (serializer := storage_writer or storage_reader):
+ checkpoint_id = getattr(serializer, "checkpoint_id", None)
+
+ msg_dict["checkpoint_id"] = str(checkpoint_id)
return msg_dict
| 2.41.0 |
75a32b9f91d8d3abe215c69dbf62c2f995d183f | Thu, 11 Apr 2024 10:52:47 -0700 | [PATCH 0039/1000] [FSDP2] Fixed `is_last_backward` for 1f1b (#123857) | `FSDPState` only uses `TrainingState.PRE_BACKWARD` as a backward training state, not `TrainingState.POST_BACKWARD`, because the FSDP state itself does not run post-backward (only its `FSDPParamGroup`, which may not exist if the state does not manage any parameters). This meant that when `is_last_backward=False`, the `FSDPState` was incorrectly still in `TrainingState.PRE_BACKWARD`, and the next `_pre_forward` would not run due to the early return logic for activation checkpointing: https://github.com/pytorch/pytorch/blob/7c451798cc5a7882e95b01600aa643b042b11b1e/torch/distributed/_composable/fsdp/_fsdp_state.py#L148-L151 We fix this by always transitioning to `TrainingState.IDLE` at the end of the current backward task, regardless of `is_last_backward`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123857 Approved by: https://github.com/weifengpy | diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py
index 3ab1b0a546..3b9406b3ef 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_training.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py
@@ -708,6 +708,51 @@ class TestFullyShardGradientAccumulation(FSDPTest):
# gradient accumulation with and without communication
_optim.zero_grad(set_to_none=(iter_idx % 2))
+ @skip_if_lt_x_gpu(2)
+ def test_1f1b_microbatching(self):
+ torch.manual_seed(42)
+ model_args = ModelArgs(dropout_p=0.0)
+ model = Transformer(model_args)
+ ref_model = copy.deepcopy(model).cuda()
+ ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
+ for module in model.modules():
+ if isinstance(module, TransformerBlock):
+ fully_shard(module, reshard_after_forward=False)
+ fully_shard(model, reshard_after_forward=False)
+ optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
+
+ num_microbatches = 3
+ local_batch_size = 2
+ torch.manual_seed(42 + self.rank + 1)
+ inps = [
+ torch.randint(
+ 0, model_args.vocab_size, (local_batch_size, 16), device="cuda"
+ )
+ for _ in range(num_microbatches)
+ ]
+
+ # Emulate the 1f1b pipeline schedule and only reduce gradients on the
+ # last microbatch
+ losses: List[torch.Tensor] = []
+ ref_losses: List[torch.Tensor] = []
+ for inp_idx, inp in enumerate(inps):
+ is_last_microbatch = inp_idx == num_microbatches - 1
+ model.set_requires_gradient_sync(is_last_microbatch)
+ model.set_is_last_backward(is_last_microbatch)
+ losses.append(model(inp).sum())
+ losses[-1].backward()
+ ref_losses.append(ref_model(inp).sum())
+ ref_losses[-1].backward()
+ for param in ref_model.parameters():
+ dist.all_reduce(param.grad)
+ param.grad.detach().div_(self.world_size)
+
+ for loss, ref_loss in zip(losses, ref_losses):
+ self.assertEqual(loss, ref_loss)
+ optim.step()
+ ref_optim.step()
+ check_sharded_parity(self, ref_model, model)
+
class TestFullyShard2DTraining(FSDPTest):
@property
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 6670a3e9e3..8b6a844f47 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -342,7 +342,6 @@ class FSDPParamGroup:
if self._post_reduce_view_out_event is not None:
torch.cuda.current_stream().wait_event(self._post_reduce_view_out_event)
self._post_reduce_view_out_event = None
- self._training_state = TrainingState.IDLE
self._post_forward_indices.clear()
self.all_forward_output_grad_fns.clear()
diff --git a/torch/distributed/_composable/fsdp/_fsdp_state.py b/torch/distributed/_composable/fsdp/_fsdp_state.py
index b2aba4182f..087c61dd5f 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_state.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_state.py
@@ -201,6 +201,9 @@ class FSDPState(_State):
# Run post-backward in case forward inputs did not require
# gradient so the autograd backward did not run
state._fsdp_param_group.post_backward()
+ state._training_state = TrainingState.IDLE
+ if state._fsdp_param_group:
+ state._fsdp_param_group._training_state = TrainingState.IDLE
if self._state_ctx.is_last_backward:
state._finalize_backward()
if self._state_ctx.is_last_backward:
@@ -208,7 +211,6 @@ class FSDPState(_State):
self._state_ctx.post_backward_final_callback_queued = False
def _finalize_backward(self) -> None:
- self._training_state = TrainingState.IDLE
for handle in self._pre_backward_hook_handles:
handle.remove()
self._pre_backward_hook_handles.clear() | 2.41.0 |
d225189f15c8d33ae47a8f3c3879a408b792aad | Mon, 8 Apr 2024 22:44:37 +0100 | [PATCH 0041/1000] [inductor] Change OverridesData to take callables instead of strings (#123397) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123397 Approved by: https://github.com/lezcano | diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index 4caa4dc367..b061e3ad1f 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -582,42 +582,21 @@ class OpOverrides:
def _initialize_pointwise_overrides(cls, target):
assert target in {"triton", "cpp", "cppvec"}, target
- def pointwise_factory_1(impl):
- def func(x):
- return impl.format(x=x)
-
- return func
-
- def pointwise_factory_2(impl):
- def func(x, y):
- return impl.format(x=x, y=y)
-
- return func
-
for funcname, data in pointwise_overrides_data.items():
impl = getattr(data, target)
if impl is None:
continue
-
- if isinstance(impl, str):
- nof_args = 2 if "{y}" in impl else 1
- # extend the following dictionary with factory
- # functions for a specific number of arguments as
- # needed:
- factory = {1: pointwise_factory_1, 2: pointwise_factory_2}[nof_args]
- impl = factory(impl)
-
setattr(cls, funcname, staticmethod(impl))
@dataclasses.dataclass
class OverridesData:
name: str
- cpp: Union[str, Callable[..., str]]
+ cpp: Callable[..., str]
# None when not impl in libdevice/triton
- triton: Union[Optional[str], Callable[..., str]] = None
+ triton: Optional[Callable[..., str]] = None
# None when not impl in aten/.../vec
- cppvec: Union[Optional[str], Callable[..., str]] = None
+ cppvec: Optional[Callable[..., str]] = None
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND = (
ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
@@ -626,45 +605,45 @@ class OverridesData:
pointwise_overrides_data: Dict[str, OverridesData] = dict(
airy_ai=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="airy_ai_forward({x})",
+ cpp=lambda x: f"airy_ai_forward({x})",
name="special_airy_ai",
),
bessel_j0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="bessel_j0_forward({x})",
- triton="libdevice.j0({x})",
+ cpp=lambda x: f"bessel_j0_forward({x})",
+ triton=lambda x: f"libdevice.j0({x})",
name="special_bessel_j0",
),
bessel_j1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="bessel_j1_forward({x})",
- triton="libdevice.j1({x})",
+ cpp=lambda x: f"bessel_j1_forward({x})",
+ triton=lambda x: f"libdevice.j1({x})",
name="special_bessel_j1",
),
bessel_y0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="bessel_y0_forward({x})",
- triton="libdevice.y0({x})",
+ cpp=lambda x: f"bessel_y0_forward({x})",
+ triton=lambda x: f"libdevice.y0({x})",
name="special_bessel_y0",
),
bessel_y1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="bessel_y1_forward({x})",
- triton="libdevice.y1({x})",
+ cpp=lambda x: f"bessel_y1_forward({x})",
+ triton=lambda x: f"libdevice.y1({x})",
name="special_bessel_y1",
),
digamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_digamma({x})",
- cppvec="{x}.digamma()",
+ cpp=lambda x: f"calc_digamma({x})",
+ cppvec=lambda x: f"{x}.digamma()",
name="digamma",
),
# no cpp nor triton implementation for entr, it is defined as decomposition
# erf, erfc
erfcx=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_erfcx({x})",
- triton="libdevice.erfcx({x})",
+ cpp=lambda x: f"calc_erfcx({x})",
+ triton=lambda x: f"libdevice.erfcx({x})",
name="special_erfcx",
),
fma=OverridesData(
@@ -677,173 +656,173 @@ pointwise_overrides_data: Dict[str, OverridesData] = dict(
# erfinv, exp2, expit, gammaln
igamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_igamma({x}, {y})",
+ cpp=lambda x, y: f"calc_igamma({x}, {y})",
name="igamma",
),
igammac=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_igammac({x}, {y})",
+ cpp=lambda x, y: f"calc_igammac({x}, {y})",
name="igammac",
),
gammainc=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_igamma({x}, {y})",
+ cpp=lambda x, y: f"calc_igamma({x}, {y})",
name="special_gammainc",
),
gammaincc=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_igammac({x}, {y})",
+ cpp=lambda x, y: f"calc_igammac({x}, {y})",
name="special_gammaincc",
),
i0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_i0({x})",
- triton="libdevice.cyl_bessel_i0({x})",
- cppvec="{x}.i0()",
+ cpp=lambda x: f"calc_i0({x})",
+ triton=lambda x: f"libdevice.cyl_bessel_i0({x})",
+ cppvec=lambda x: f"{x}.i0()",
name="i0",
),
i0e=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_i0e({x})",
- cppvec="{x}.i0e()",
+ cpp=lambda x: f"calc_i0e({x})",
+ cppvec=lambda x: f"{x}.i0e()",
name="special_i0e",
),
i1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_i1({x})",
- triton="libdevice.cyl_bessel_i1({x})",
+ cpp=lambda x: f"calc_i1({x})",
+ triton=lambda x: f"libdevice.cyl_bessel_i1({x})",
name="special_i1",
),
i1e=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_i1e({x})",
+ cpp=lambda x: f"calc_i1e({x})",
name="special_i1e",
),
log_ndtr=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_log_ndtr({x})",
+ cpp=lambda x: f"calc_log_ndtr({x})",
name="special_log_ndtr",
),
# logit
modified_bessel_i0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="modified_bessel_i0_forward({x})",
- triton="libdevice.cyl_bessel_i0({x})",
+ cpp=lambda x: f"modified_bessel_i0_forward({x})",
+ triton=lambda x: f"libdevice.cyl_bessel_i0({x})",
name="special_modified_bessel_i0",
),
modified_bessel_i1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="modified_bessel_i1_forward({x})",
- triton="libdevice.cyl_bessel_i1({x})",
+ cpp=lambda x: f"modified_bessel_i1_forward({x})",
+ triton=lambda x: f"libdevice.cyl_bessel_i1({x})",
name="special_modified_bessel_i1",
),
modified_bessel_k0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="modified_bessel_k0_forward({x})",
+ cpp=lambda x: f"modified_bessel_k0_forward({x})",
name="special_modified_bessel_k0",
),
modified_bessel_k1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="modified_bessel_k1_forward({x})",
+ cpp=lambda x: f"modified_bessel_k1_forward({x})",
name="special_modified_bessel_k1",
),
# multigamma
ndtr=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_ndtr({x})",
+ cpp=lambda x: f"calc_ndtr({x})",
name="special_ndtr",
),
ndtri=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_ndtri({x})",
+ cpp=lambda x: f"calc_ndtri({x})",
name="special_ndtri",
),
polygamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="calc_polygamma({y}, {x})",
+ cpp=lambda x, y: f"calc_polygamma({y}, {x})",
name="polygamma",
),
# psi - alias to digamma
# round
scaled_modified_bessel_k0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="scaled_modified_bessel_k0_forward({x})",
+ cpp=lambda x: f"scaled_modified_bessel_k0_forward({x})",
name="special_scaled_modified_bessel_k0",
),
scaled_modified_bessel_k1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="scaled_modified_bessel_k1_forward({x})",
+ cpp=lambda x: f"scaled_modified_bessel_k1_forward({x})",
name="special_scaled_modified_bessel_k1",
),
# sinc
spherical_bessel_j0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="spherical_bessel_j0_forward({x})",
+ cpp=lambda x: f"spherical_bessel_j0_forward({x})",
name="special_spherical_bessel_j0",
),
zeta=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="zeta({x}, {y})",
+ cpp=lambda x, y: f"zeta({x}, {y})",
name="special_zeta",
),
chebyshev_polynomial_t=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="chebyshev_polynomial_t_forward({x}, {y})",
+ cpp=lambda x, y: f"chebyshev_polynomial_t_forward({x}, {y})",
name="special_chebyshev_polynomial_t",
),
chebyshev_polynomial_u=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="chebyshev_polynomial_u_forward({x}, {y})",
+ cpp=lambda x, y: f"chebyshev_polynomial_u_forward({x}, {y})",
name="special_chebyshev_polynomial_u",
),
chebyshev_polynomial_v=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="chebyshev_polynomial_v_forward({x}, {y})",
+ cpp=lambda x, y: f"chebyshev_polynomial_v_forward({x}, {y})",
name="special_chebyshev_polynomial_v",
),
chebyshev_polynomial_w=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="chebyshev_polynomial_w_forward({x}, {y})",
+ cpp=lambda x, y: f"chebyshev_polynomial_w_forward({x}, {y})",
name="special_chebyshev_polynomial_w",
),
legendre_polynomial_p=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="legendre_polynomial_p_forward({x}, {y})",
+ cpp=lambda x, y: f"legendre_polynomial_p_forward({x}, {y})",
name="special_legendre_polynomial_p",
),
shifted_chebyshev_polynomial_t=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="shifted_chebyshev_polynomial_t_forward({x}, {y})",
+ cpp=lambda x, y: f"shifted_chebyshev_polynomial_t_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_t",
),
shifted_chebyshev_polynomial_u=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="shifted_chebyshev_polynomial_u_forward({x}, {y})",
+ cpp=lambda x, y: f"shifted_chebyshev_polynomial_u_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_u",
),
shifted_chebyshev_polynomial_v=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="shifted_chebyshev_polynomial_v_forward({x}, {y})",
+ cpp=lambda x, y: f"shifted_chebyshev_polynomial_v_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_v",
),
shifted_chebyshev_polynomial_w=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="shifted_chebyshev_polynomial_w_forward({x}, {y})",
+ cpp=lambda x, y: f"shifted_chebyshev_polynomial_w_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_w",
),
hermite_polynomial_h=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="hermite_polynomial_h_forward({x}, {y})",
+ cpp=lambda x, y: f"hermite_polynomial_h_forward({x}, {y})",
name="special_hermite_polynomial_h",
),
hermite_polynomial_he=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="hermite_polynomial_he_forward({x}, {y})",
+ cpp=lambda x, y: f"hermite_polynomial_he_forward({x}, {y})",
name="special_hermite_polynomial_he",
),
laguerre_polynomial_l=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
- cpp="laguerre_polynomial_l_forward({x}, {y})",
+ cpp=lambda x, y: f"laguerre_polynomial_l_forward({x}, {y})",
name="special_laguerre_polynomial_l",
),
) | 2.41.0 |
0b7aa201caae3b99ce03f65ac4e0893f412c4e5 | Wed, 10 Apr 2024 21:32:40 -0700 | [PATCH 0042/1000] [dynamo][cpp-guards] Introduce DictSubclassGuardManager (#123773) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123773 Approved by: https://github.com/jansel | diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index ce49e49631..79835450be 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -10167,6 +10167,23 @@ fn
res = opt_m(x)
self.assertEqual(ref, res)
+ def test_ordered_dict_move_to_end(self):
+ d = {
+ "foo": 1,
+ "bar": 2,
+ }
+
+ d = collections.OrderedDict(d)
+ d.move_to_end("foo")
+
+ @torch.compile(backend="eager")
+ def fn(x, d):
+ return x * d["foo"] * d["bar"]
+
+ fn(torch.randn(4), d)
+ with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
+ fn(torch.randn(4), d)
+
class TestTracer(JitTestCase):
def test_jit_save(self):
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index 2f9354f22f..e775d63112 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -162,7 +162,8 @@ class GuardManager:
for guard in mgr.get_leaf_guards():
body.writelines(self.get_guard_lines(guard))
- if istype(mgr, DictGuardManager):
+ # This works for both DictGuardManager and SubclassedDictGuardManager
+ if isinstance(mgr, DictGuardManager):
self.construct_dict_manager_string(mgr, body)
# General case of GuardManager/RootGuardManager
diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp
index d3af547313..9f1cfd793d 100644
--- a/torch/csrc/dynamo/guards.cpp
+++ b/torch/csrc/dynamo/guards.cpp
@@ -2104,7 +2104,7 @@ class DictGuardManager : public GuardManager {
return _key_value_managers[index];
}
- private:
+ protected: // also used by DictSubclassGuardManager
Py_ssize_t _size;
// DictGuardManager supports both exact dict type and non-exact dict type.
// Therefore, we have to compare the type to early exit.
@@ -2114,13 +2114,155 @@ class DictGuardManager : public GuardManager {
std::unordered_map<Py_ssize_t, KeyValueManager> _key_value_managers;
};
+/**
+ * The DictSubclassGuardManager is designed to work with dict subclasses,
+ * specifically focusing on OrderedDicts. Standard dictionaries leverage the
+ * PyDict_Next function to iterate over keys, values, and items. OrderedDicts,
+ * on the other hand, rely on an additional linked list structure to maintain
+ * keys order. Although PyDict_Next and OrderedDict generally yield the same
+ * order, discrepancies arise when using OrderedDict's move_to_end method (used
+ * in Pytorch hooks). `move_to_end` method only updates the linked list, leaving
+ * PyDict_Next unaffected. Therefore, to accurately capture key ordering in such
+ * cases, DictSubclassGuardManager directly invoke the .keys() method.
+ */
+
+class DictSubclassGuardManager : public DictGuardManager {
+ public:
+ DictSubclassGuardManager(
+ RootGuardManager* root,
+ std::string source,
+ py::handle example_value)
+ : DictGuardManager(root, std::move(source), example_value) {}
+
+ public:
+ bool check_nopybind(PyObject* obj) override { // borrowed ref
+ // TODO(janimesh) - Implement a fast-path using dict versions.
+
+ if (Py_TYPE(obj) != _expected_type) {
+ _fail_count += 1;
+ return false;
+ }
+
+ if (PyDict_Size(obj) != _size) {
+ _fail_count += 1;
+ return false;
+ }
+
+ if (!GuardManager::check_nopybind(obj)) { // NOLINT
+ _fail_count += 1;
+ // No need to shuffle the child guards, just return.
+ return false;
+ }
+
+ // Points to an element in the _indices vector.
+ size_t index_pointer = 0;
+ // Points to the key index in the dict
+ Py_ssize_t dict_pointer = 0;
+
+ // Use iter(obj) to iterate over the keys
+ PyObject* iterator = PyObject_GetIter(obj); // new reference
+ PyObject* key = nullptr;
+
+ while (index_pointer < _indices.size() &&
+ (key = PyIter_Next(iterator))) { // new reference
+ if (dict_pointer == _indices[index_pointer]) {
+ KeyValueManager& key_value_manager = _key_value_managers[dict_pointer];
+ std::unique_ptr<GuardManager>& key_manager = key_value_manager.first;
+ if (key_manager && !key_manager->check_nopybind(key)) {
+ return false;
+ }
+
+ PyObject* value = PyDict_GetItem(obj, key); // borrowed ref
+ std::unique_ptr<GuardManager>& value_manager = key_value_manager.second;
+ if (value_manager && !value_manager->check_nopybind(value)) {
+ return false;
+ }
+
+ index_pointer++;
+ }
+ dict_pointer++;
+ Py_DECREF(key);
+ }
+
+ Py_DECREF(iterator);
+ return true;
+ }
+
+ GuardDebugInfo check_verbose_nopybind(
+ PyObject* obj) override { // borrowed ref
+ if (Py_TYPE(obj) != _expected_type) {
+ return GuardDebugInfo(false, "TYPE_MISMATCH(" + get_source() + ")", 0);
+ }
+
+ if (PyDict_Size(obj) != _size) {
+ return GuardDebugInfo(
+ false, "len(" + get_source() + ") != " + std::to_string(_size), 0);
+ }
+
+ GuardDebugInfo debug_info =
+ GuardManager::check_verbose_nopybind(obj); // NOLINT
+ if (!debug_info.result) {
+ return debug_info;
+ }
+
+ // Points to an element in the _indices vector.
+ size_t index_pointer = 0;
+ // Points to the key index in the dict
+ Py_ssize_t dict_pointer = 0;
+
+ int num_guards_executed = 0;
+
+ // Use iter(obj) to iterate over the keys
+ PyObject* iterator = PyObject_GetIter(obj); // new reference
+ PyObject* key = nullptr;
+
+ while (index_pointer < _indices.size() &&
+ (key = PyIter_Next(iterator))) { // new reference
+ if (dict_pointer == _indices[index_pointer]) {
+ KeyValueManager& key_value_manager = _key_value_managers[dict_pointer];
+ std::unique_ptr<GuardManager>& key_manager = key_value_manager.first;
+ if (key_manager) {
+ GuardDebugInfo debug_info = key_manager->check_verbose_nopybind(key);
+ num_guards_executed += debug_info.num_guards_executed;
+ if (!debug_info.result) {
+ return GuardDebugInfo(
+ false, debug_info.verbose_code_parts, num_guards_executed);
+ }
+ }
+
+ PyObject* value = PyDict_GetItem(obj, key); // borrowed ref
+ std::unique_ptr<GuardManager>& value_manager = key_value_manager.second;
+ if (value_manager) {
+ GuardDebugInfo debug_info =
+ value_manager->check_verbose_nopybind(value);
+ num_guards_executed += debug_info.num_guards_executed;
+ if (!debug_info.result) {
+ return GuardDebugInfo(
+ false, debug_info.verbose_code_parts, num_guards_executed);
+ }
+ }
+ index_pointer++;
+ }
+ Py_DECREF(key);
+ dict_pointer++;
+ }
+
+ Py_DECREF(iterator);
+ return GuardDebugInfo(true, num_guards_executed);
+ }
+};
+
std::unique_ptr<GuardManager> make_guard_manager(
RootGuardManager* root,
std::string source,
py::handle example_value) {
// Check if example_value is a dict
if (py::isinstance<py::dict>(example_value)) {
- return std::make_unique<DictGuardManager>(
+ if (PyDict_CheckExact(example_value.ptr())) {
+ return std::make_unique<DictGuardManager>(
+ root, std::move(source), example_value);
+ }
+ return std::make_unique<DictSubclassGuardManager>(
root, std::move(source), example_value);
}
return std::make_unique<GuardManager>(root, std::move(source));
@@ -3542,6 +3684,13 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("example_value"),
py::return_value_policy::reference);
+ // Dict Guard Manager
+ py::class_< // NOLINT
+ DictSubclassGuardManager,
+ DictGuardManager,
+ std::unique_ptr<DictSubclassGuardManager>>(
+ py_m, "DictSubclassGuardManager"); // NOLINT
+
py_m.def("install_tensor_aliasing_guard", install_tensor_aliasing_guard);
py_m.def(
"install_no_tensor_aliasing_guard", install_no_tensor_aliasing_guard); | 2.41.0 |
0ccf599ccafb379346101591bf76c6653a03e7f | Thu, 11 Apr 2024 22:40:46 +0000 | [PATCH 0045/1000] [export] Restore original placeholder names (part 2: higher-order-op subgraph naming) (#123587) | Summary: note: breaking the original diff [D55225818](https://www.internalfb.com/diff/D55225818) into 3 parts (top-level renaming, higher-order-op subgraphs, constant input de/serialization) because of its size. Stacked PR to restore original names to placeholder nodes, replacing the default names arg0_1, arg1_1, ... This PR propagates node names to higher-order-op subgraph placeholders, retaining the top-level names and handling naming collisions by suffixing other non-placeholder nodes in the subgraph with an index. This is the same handling as in fx.Graph/fx.Node, but implemented separately as a pass. Since the input schemas of HOO subgraphs are very different, they are enumerated in _name_hoo_subgraph_placeholders(). Currently cond, map_impl, and wrap_with_set_grad_enabled are handled, but other ops can be easily added. Test Plan: verification checks on placeholder names for all export() calls, unit test in test/export/test_export.py Differential Revision: D55456749 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123587 Approved by: https://github.com/angelayi | diff --git a/test/export/test_export.py b/test/export/test_export.py
index 5ec2deb050..6aa4f04a42 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -3910,19 +3910,19 @@ def forward(self, b_pred, b_t, x, y):
self.assertExpectedInline(
str(exported_program.graph_module.true_graph_0.code.strip()),
"""\
-def forward(self, arg1_1, arg0_1, arg2_1):
+def forward(self, b_t, x, y):
submod_3 = self.submod_1
- add_1 = torch._higher_order_ops.wrap.wrap_with_set_grad_enabled(True, submod_3, arg1_1, arg0_1, arg2_1); submod_3 = arg1_1 = arg0_1 = arg2_1 = None
+ add_1 = torch._higher_order_ops.wrap.wrap_with_set_grad_enabled(True, submod_3, b_t, x, y); submod_3 = b_t = x = y = None
return (add_1,)""",
)
self.assertExpectedInline(
str(exported_program.graph_module.true_graph_0.submod_1.code.strip()),
"""\
-def forward(self, arg1_1, arg0_1, arg2_1):
- sub = torch.ops.aten.sub.Tensor(arg1_1, 1); arg1_1 = None
- add = torch.ops.aten.add.Tensor(sub, arg0_1); sub = arg0_1 = None
- add_1 = torch.ops.aten.add.Tensor(add, arg2_1); add = arg2_1 = None
+def forward(self, b_t, x, y):
+ sub = torch.ops.aten.sub.Tensor(b_t, 1); b_t = None
+ add = torch.ops.aten.add.Tensor(sub, x); sub = x = None
+ add_1 = torch.ops.aten.add.Tensor(add, y); add = y = None
return add_1""",
)
@@ -4256,6 +4256,83 @@ def forward(self, x):
real_names_and_ops = [(node.name, node.op) for node in ep.graph.nodes]
self.assertEqual(expected_names_and_ops, real_names_and_ops)
+ @testing.expectedFailureRetraceability
+ def test_placeholder_naming_collisions_hoo_subgraphs(self):
+ # test collisions between user inputs, top-level nodes, and HOO subgraph nodes
+ class Foo(torch.nn.Module):
+ def forward(self, x, mul, mul_1):
+ _mul = x * x
+ y = cond(
+ _mul.sum() > 0,
+ lambda x, y, z: x * y * z,
+ lambda x, y, z: x + y + z,
+ [_mul, mul, mul_1],
+ )
+ with torch.enable_grad():
+ y = y * y
+ return y
+
+ with torch.no_grad():
+ ep = torch.export._trace._export(
+ Foo(),
+ (torch.randn(4), torch.randn(4), torch.randn(4)),
+ pre_dispatch=True,
+ )
+ # test cond subgraph
+ expected_names_and_ops = [
+ ("mul_2", "placeholder"),
+ ("mul", "placeholder"),
+ ("mul_1", "placeholder"),
+ ("mul_3", "call_function"),
+ ("mul_4", "call_function"),
+ ("output", "output"),
+ ]
+ real_names_and_ops = [
+ (node.name, node.op) for node in ep.graph_module.true_graph_0.graph.nodes
+ ]
+ self.assertEqual(expected_names_and_ops, real_names_and_ops)
+ # test set_grad_enabled subgraph
+ expected_names_and_ops = [
+ ("getitem", "placeholder"),
+ ("mul_1", "call_function"),
+ ("output", "output"),
+ ]
+ real_names_and_ops = [
+ (node.name, node.op) for node in ep.graph_module.submod_1.graph.nodes
+ ]
+ self.assertEqual(expected_names_and_ops, real_names_and_ops)
+
+ # test collisions between user inputs & higher order op subgraphs
+ # (please never do this)
+ class Foo(torch.nn.Module):
+ def forward(self, input, true_graph, body_graph):
+ def map_body(x, y):
+ return x + y
+
+ x = map(map_body, input, body_graph[0])
+ x = x + true_graph[0] + true_graph[1]
+ x = cond(x.sum() > 0, lambda x: x * 2.0, lambda x: x + 2.0, [x])
+ x = cond(x.sum() > 0, lambda x: x * 2.0, lambda x: x + 2.0, [x])
+ return x
+
+ inputs = (
+ torch.randn(10, 4),
+ (torch.randn(4), torch.randn(4)),
+ (torch.randn(4),),
+ )
+ ep = export(Foo(), inputs)
+ expected_getattr_names = [
+ "body_graph_1",
+ "true_graph_2",
+ "false_graph_0",
+ "true_graph_3",
+ "false_graph_1",
+ ]
+ real_getattr_names = [
+ node.name for node in ep.graph.nodes if node.op == "get_attr"
+ ]
+ self.assertEqual(expected_getattr_names, real_getattr_names)
+
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOneOffModelExportResult(TestCase):
diff --git a/torch/_export/utils.py b/torch/_export/utils.py
index 557788558d..844af54e77 100644
--- a/torch/_export/utils.py
+++ b/torch/_export/utils.py
@@ -9,7 +9,10 @@ import torch
from torch._subclasses.fake_tensor import FakeTensor
from torch.export import ExportedProgram
-from torch.export.exported_program import _rename_without_collisions
+from torch.export.exported_program import (
+ _name_hoo_subgraph_placeholders,
+ _rename_without_collisions,
+)
from torch.export.graph_signature import ConstantArgument, InputKind, OutputKind
from torch.utils._pytree import (
_register_pytree_node,
@@ -527,9 +530,8 @@ def placeholder_naming_pass(
elif node.name in name_map:
node.name = name_map[node.name]
- # TODO(pianpwk), in immediate follow-up PR
# propagate names to higher order op subgraphs
- # name_hoo_subgraph_placeholders(gm)
+ _name_hoo_subgraph_placeholders(gm)
# re-generate graph module code
gm.recompile()
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 96f7e3605b..2c7249c3f6 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -832,23 +832,26 @@ def _verify_placeholder_names(gm: torch.fx.GraphModule, sig: ExportGraphSignatur
"""
Performs a sanity check on the placeholder node names.
- User input nodes: no restrictions, should match the original forward() signature
- - Params/buffers/constants/custom_obj nodes: should start with "p", "b", "c", "obj"
+ - Params/buffers/constants/custom_obj/token nodes: should start with prefixes defined in <placeholder_prefixes>
"""
name_to_kind = {
spec.arg.name: spec.kind
for spec in sig.input_specs
if not isinstance(spec.arg, ConstantArgument)
}
- for node in gm.graph.nodes:
- if node.op == "placeholder":
- if node.name not in name_to_kind:
- continue
- node_kind = name_to_kind[node.name]
- prefix = placeholder_prefixes[node_kind]
- if not node.name.startswith(prefix):
- raise SpecViolationError(
- f"Placeholder node name {node.name} does not follow spec for {node_kind}, name should have prefix: {prefix}"
- )
+ for mod in gm.modules():
+ if not isinstance(mod, torch.fx.GraphModule):
+ continue
+ for node in mod.graph.nodes:
+ if node.op == "placeholder":
+ if node.name not in name_to_kind:
+ continue
+ node_kind = name_to_kind[node.name]
+ prefix = placeholder_prefixes[node_kind]
+ if not node.name.startswith(prefix):
+ raise SpecViolationError(
+ f"Placeholder node name {node.name} does not follow spec for {node_kind}, name should have prefix: {prefix}"
+ )
def get_ep_stats(ep: ExportedProgram) -> Dict[str, Any]:
diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
index 37af592d32..5c18bbdeda 100644
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -141,6 +141,48 @@ def _rename_without_collisions(
return name_map[orig_name]
+def _name_hoo_subgraph_placeholders(gm: torch.fx.GraphModule) -> None:
+ """
+ Propagate placeholder names from the top-level graph into HigherOrderOp subgraphs,
+ and handle collisions with non-placeholders by count suffixing.
+ Different HOO subgraph types have different input schemas, so we first enumerate them
+ and gather the top-level named placeholder nodes.
+ """
+ # gather all HOO subgraphs and their top-level named placeholder nodes
+ subgraph_ph_tuples: List[Tuple[torch.fx.GraphModule, List[torch.fx.Node]]] = []
+ for node in gm.graph.nodes:
+ if node.op == "call_function" and isinstance(
+ node.target, torch._ops.HigherOrderOperator
+ ):
+ # HOO subgraphs have varying input schemas, so we enumerate them there
+ if node.target._name == "cond":
+ _, true_graph, false_graph, cond_args = node._args
+ subgraph_ph_tuples.append((getattr(gm, true_graph.target), cond_args))
+ subgraph_ph_tuples.append((getattr(gm, false_graph.target), cond_args))
+ elif node.target._name == "wrap_with_set_grad_enabled":
+ subgraph, phs = node._args[1], node._args[2:]
+ subgraph_ph_tuples.append((getattr(gm, subgraph.target), phs))
+ elif node.target._name == "map_impl":
+ body_graph, array, args = node._args
+ subgraph_ph_tuples.append(
+ (getattr(gm, body_graph.target), array + args)
+ )
+
+ # propagate names
+ for subgraph, hoo_phs in subgraph_ph_tuples:
+ name_map: Dict[str, str] = {}
+ for i, node in enumerate(subgraph.graph.nodes):
+ if i < len(hoo_phs): # placeholder, retain name
+ name_map[node.name] = hoo_phs[i].name
+ node.name = node.target = hoo_phs[i].name
+ else: # non-placeholder, check for collisions
+ node.name = _rename_without_collisions(name_map, node.name, node.name)
+
+ # recurse and recompile
+ _name_hoo_subgraph_placeholders(subgraph)
+ subgraph.recompile()
+
+
class ExportedProgram:
"""
Package of a program from :func:`export`. It contains
@@ -543,6 +585,9 @@ class ExportedProgram:
continue
node.name = _rename_without_collisions(name_map, node.name, node.name)
+ # propagate names to higher order op subgraphs
+ _name_hoo_subgraph_placeholders(gm)
+
# To match the output target with correct input for input mutations
# need to find the old to new placeholder map
old_new_placeholder_map = { | 2.41.0 |
04c9c5601cbb9ae65ca10484d256c3f779d2574 | Thu, 11 Apr 2024 23:45:05 +0000 | [PATCH 0046/1000] Enable UFMT on all of `test/jit` (#123623) | Partially addresses #123062 Ran lintrunner on: - `test/jit` with command: ```bash lintrunner -a --take UFMT --all-files ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123623 Approved by: https://github.com/ezyang | diff --git a/.lintrunner.toml b/.lintrunner.toml
index 7112557122..d492ed12e7 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1162,94 +1162,6 @@ exclude_patterns = [
'test/functorch/test_vmap.py',
'test/functorch/test_vmap_registrations.py',
'test/functorch/xfail_suggester.py',
- 'test/jit/__init__.py',
- 'test/jit/_imported_class_test/__init__.py',
- 'test/jit/_imported_class_test/bar.py',
- 'test/jit/_imported_class_test/foo.py',
- 'test/jit/_imported_class_test/very/__init__.py',
- 'test/jit/_imported_class_test/very/very/__init__.py',
- 'test/jit/_imported_class_test/very/very/nested.py',
- 'test/jit/fixtures_srcs/__init__.py',
- 'test/jit/fixtures_srcs/fixtures_src.py',
- 'test/jit/fixtures_srcs/generate_models.py',
- 'test/jit/fixtures_srcs/test_upgrader_models_generation.py',
- 'test/jit/myexception.py',
- 'test/jit/test_alias_analysis.py',
- 'test/jit/test_async.py',
- 'test/jit/test_aten_pow.py',
- 'test/jit/test_attr.py',
- 'test/jit/test_autodiff.py',
- 'test/jit/test_autodiff_subgraph_slicing.py',
- 'test/jit/test_await.py',
- 'test/jit/test_backend_nnapi.py',
- 'test/jit/test_backends.py',
- 'test/jit/test_batch_mm.py',
- 'test/jit/test_builtins.py',
- 'test/jit/test_class_type.py',
- 'test/jit/test_complex.py',
- 'test/jit/test_complexity.py',
- 'test/jit/test_convert_activation.py',
- 'test/jit/test_cuda.py',
- 'test/jit/test_custom_operators.py',
- 'test/jit/test_data_parallel.py',
- 'test/jit/test_dataclasses.py',
- 'test/jit/test_dce.py',
- 'test/jit/test_device_analysis.py',
- 'test/jit/test_dtype_analysis.py',
- 'test/jit/test_enum.py',
- 'test/jit/test_exception.py',
- 'test/jit/test_freezing.py',
- 'test/jit/test_functional_blocks.py',
- 'test/jit/test_fuser_common.py',
- 'test/jit/test_graph_rewrite_passes.py',
- 'test/jit/test_hash.py',
- 'test/jit/test_hooks.py',
- 'test/jit/test_hooks_modules.py',
- 'test/jit/test_ignorable_args.py',
- 'test/jit/test_ignore_context_manager.py',
- 'test/jit/test_isinstance.py',
- 'test/jit/test_jit_utils.py',
- 'test/jit/test_list_dict.py',
- 'test/jit/test_logging.py',
- 'test/jit/test_misc.py',
- 'test/jit/test_models.py',
- 'test/jit/test_module_apis.py',
- 'test/jit/test_module_containers.py',
- 'test/jit/test_module_interface.py',
- 'test/jit/test_modules.py',
- 'test/jit/test_op_decompositions.py',
- 'test/jit/test_optimize_for_mobile_preserve_debug_info.py',
- 'test/jit/test_parametrization.py',
- 'test/jit/test_pdt.py',
- 'test/jit/test_peephole.py',
- 'test/jit/test_profiler.py',
- 'test/jit/test_python_bindings.py',
- 'test/jit/test_python_builtins.py',
- 'test/jit/test_python_ir.py',
- 'test/jit/test_recursive_script.py',
- 'test/jit/test_remove_mutation.py',
- 'test/jit/test_save_load.py',
- 'test/jit/test_save_load_for_op_version.py',
- 'test/jit/test_script_profile.py',
- 'test/jit/test_scriptmod_ann.py',
- 'test/jit/test_slice.py',
- 'test/jit/test_sparse.py',
- 'test/jit/test_string_formatting.py',
- 'test/jit/test_symbolic_shape_analysis.py',
- 'test/jit/test_tensor_creation_ops.py',
- 'test/jit/test_tensor_methods.py',
- 'test/jit/test_torchbind.py',
- 'test/jit/test_tracer.py',
- 'test/jit/test_type_sharing.py',
- 'test/jit/test_types.py',
- 'test/jit/test_typing.py',
- 'test/jit/test_union.py',
- 'test/jit/test_unsupported_ops.py',
- 'test/jit/test_upgraders.py',
- 'test/jit/test_warn.py',
- 'test/jit/test_with.py',
- 'test/jit/xnnpack/test_xnnpack_delegate.py',
- 'test/jit_hooks/model.py',
'test/lazy/__init__.py',
'test/lazy/test_bindings.py',
'test/lazy/test_debug_util.py',
diff --git a/test/jit/_imported_class_test/bar.py b/test/jit/_imported_class_test/bar.py
index f6bdc59310..5e4e9839d3 100644
--- a/test/jit/_imported_class_test/bar.py
+++ b/test/jit/_imported_class_test/bar.py
@@ -1,4 +1,5 @@
import torch
+
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
diff --git a/test/jit/_imported_class_test/foo.py b/test/jit/_imported_class_test/foo.py
index fe0123be32..c2d982ba19 100644
--- a/test/jit/_imported_class_test/foo.py
+++ b/test/jit/_imported_class_test/foo.py
@@ -1,5 +1,7 @@
import torch
+
from . import bar
+
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
diff --git a/test/jit/_imported_class_test/very/very/nested.py b/test/jit/_imported_class_test/very/very/nested.py
index dcf8dcb40c..af7e0d1b09 100644
--- a/test/jit/_imported_class_test/very/very/nested.py
+++ b/test/jit/_imported_class_test/very/very/nested.py
@@ -1,4 +1,5 @@
import torch
+
# This file contains definitions of script classes.
# They are used by test_jit.py to test ScriptClass imports
diff --git a/test/jit/fixtures_srcs/fixtures_src.py b/test/jit/fixtures_srcs/fixtures_src.py
index afba17800c..c15acf5a0e 100644
--- a/test/jit/fixtures_srcs/fixtures_src.py
+++ b/test/jit/fixtures_srcs/fixtures_src.py
@@ -1,6 +1,8 @@
-import torch
from typing import Union
+import torch
+
+
class TestVersionedDivTensorExampleV7(torch.nn.Module):
def forward(self, a, b):
result_0 = a / b
@@ -8,35 +10,52 @@ class TestVersionedDivTensorExampleV7(torch.nn.Module):
result_2 = a.div(b)
return result_0, result_1, result_2
+
class TestVersionedLinspaceV7(torch.nn.Module):
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b)
return c, d
+
class TestVersionedLinspaceOutV7(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
+ def forward(
+ self,
+ a: Union[int, float, complex],
+ b: Union[int, float, complex],
+ out: torch.Tensor,
+ ):
return torch.linspace(a, b, out=out)
+
class TestVersionedLogspaceV8(torch.nn.Module):
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.logspace(a, b, steps=5)
d = torch.logspace(a, b)
return c, d
+
class TestVersionedLogspaceOutV8(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
+ def forward(
+ self,
+ a: Union[int, float, complex],
+ b: Union[int, float, complex],
+ out: torch.Tensor,
+ ):
return torch.logspace(a, b, out=out)
+
class TestVersionedGeluV9(torch.nn.Module):
def forward(self, x):
return torch._C._nn.gelu(x)
+
class TestVersionedGeluOutV9(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
return torch._C._nn.gelu(x, out=out)
+
class TestVersionedRandomV10(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
diff --git a/test/jit/fixtures_srcs/generate_models.py b/test/jit/fixtures_srcs/generate_models.py
index f99a2cf261..973a31e3c1 100644
--- a/test/jit/fixtures_srcs/generate_models.py
+++ b/test/jit/fixtures_srcs/generate_models.py
@@ -6,9 +6,11 @@ from pathlib import Path
from typing import Set
import torch
+
# Use asterisk symbol so developer doesn't need to import here when they add tests for upgraders.
from test.jit.fixtures_srcs.fixtures_src import * # noqa: F403
-from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
+from torch.jit.mobile import _export_operator_list, _load_for_lite_interpreter
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@@ -105,28 +107,41 @@ ALL_MODULES = {
Get the path to `test/jit/fixtures`, where all test models for operator changes
(upgrader/downgrader) are stored
"""
+
+
def get_fixtures_path() -> Path:
pytorch_dir = Path(__file__).resolve().parents[3]
fixtures_path = pytorch_dir / "test" / "jit" / "fixtures"
return fixtures_path
+
"""
Get all models' name in `test/jit/fixtures`
"""
+
+
def get_all_models(model_directory_path: Path) -> Set[str]:
- files_in_fixtures = model_directory_path.glob('**/*')
- all_models_from_fixtures = [fixture.stem for fixture in files_in_fixtures if fixture.is_file()]
+ files_in_fixtures = model_directory_path.glob("**/*")
+ all_models_from_fixtures = [
+ fixture.stem for fixture in files_in_fixtures if fixture.is_file()
+ ]
return set(all_models_from_fixtures)
+
"""
Check if a given model already exist in `test/jit/fixtures`
"""
+
+
def model_exist(model_file_name: str, all_models: Set[str]) -> bool:
return model_file_name in all_models
+
"""
Get the operator list given a module
"""
+
+
def get_operator_list(script_module: torch) -> Set[str]:
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -134,21 +149,25 @@ def get_operator_list(script_module: torch) -> Set[str]:
operator_list = _export_operator_list(mobile_module)
return operator_list
+
"""
Get the output model operator version, given a module
"""
+
+
def get_output_model_version(script_module: torch.nn.Module) -> int:
buffer = io.BytesIO()
torch.jit.save(script_module, buffer)
buffer.seek(0)
zipped_model = zipfile.ZipFile(buffer)
try:
- version = int(zipped_model.read('archive/version').decode("utf-8"))
+ version = int(zipped_model.read("archive/version").decode("utf-8"))
return version
except KeyError:
- version = int(zipped_model.read('archive/.data/version').decode("utf-8"))
+ version = int(zipped_model.read("archive/.data/version").decode("utf-8"))
return version
+
"""
Loop through all test modules. If the corresponding model doesn't exist in
`test/jit/fixtures`, generate one. For the following reason, a model won't be exported:
@@ -165,6 +184,8 @@ likely this script is running with the commit to make the change.
3. The model already exists in `test/jit/fixtures`.
"""
+
+
def generate_models(model_directory_path: Path):
all_models = get_all_models(model_directory_path)
for a_module, expect_operator in ALL_MODULES.items():
@@ -176,13 +197,17 @@ def generate_models(model_directory_path: Path):
"The module %s "
"is not a torch.nn.module instance. "
"Please ensure it's a subclass of torch.nn.module in fixtures_src.py"
- "and it's registered as an instance in ALL_MODULES in generated_models.py", torch_module_name)
-
+ "and it's registered as an instance in ALL_MODULES in generated_models.py",
+ torch_module_name,
+ )
# The corresponding model name is: test_versioned_div_tensor_example_v4
- model_name = ''.join([
- '_' + char.lower() if char.isupper() else char for char in torch_module_name
- ]).lstrip('_')
+ model_name = "".join(
+ [
+ "_" + char.lower() if char.isupper() else char
+ for char in torch_module_name
+ ]
+ ).lstrip("_")
# Some models may not compile anymore, so skip the ones
# that already has pt file for them.
@@ -199,7 +224,10 @@ def generate_models(model_directory_path: Path):
logger.error(
"Actual model version %s "
"is equal or larger than %s + 1. "
- "Please run the script before the commit to change operator.", actual_model_version, current_operator_version)
+ "Please run the script before the commit to change operator.",
+ actual_model_version,
+ current_operator_version,
+ )
continue
actual_operator_list = get_operator_list(script_module)
@@ -207,16 +235,23 @@ def generate_models(model_directory_path: Path):
logger.error(
"The model includes operator: %s, "
"however it doesn't cover the operator %s."
- "Please ensure the output model includes the tested operator.", actual_operator_list, expect_operator)
+ "Please ensure the output model includes the tested operator.",
+ actual_operator_list,
+ expect_operator,
+ )
continue
export_model_path = str(model_directory_path / (str(model_name) + ".ptl"))
script_module._save_for_lite_interpreter(export_model_path)
- logger.info("Generating model %s and it's save to %s", model_name, export_model_path)
+ logger.info(
+ "Generating model %s and it's save to %s", model_name, export_model_path
+ )
+
def main() -> None:
model_directory_path = get_fixtures_path()
generate_models(model_directory_path)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/test/jit/fixtures_srcs/test_upgrader_models_generation.py b/test/jit/fixtures_srcs/test_upgrader_models_generation.py
index 58267c1e0e..a23b95af9d 100644
--- a/test/jit/fixtures_srcs/test_upgrader_models_generation.py
+++ b/test/jit/fixtures_srcs/test_upgrader_models_generation.py
@@ -2,7 +2,7 @@
import torch
from test.jit.fixtures_srcs.generate_models import ALL_MODULES
-from torch.testing._internal.common_utils import TestCase, run_tests
+from torch.testing._internal.common_utils import run_tests, TestCase
class TestUpgraderModelGeneration(TestCase):
@@ -14,7 +14,9 @@ class TestUpgraderModelGeneration(TestCase):
f"The module {module_name} "
f"is not a torch.nn.module instance. "
f"Please ensure it's a subclass of torch.nn.module in fixtures_src.py"
- f"and it's registered as an instance in ALL_MODULES in generated_models.py")
+ f"and it's registered as an instance in ALL_MODULES in generated_models.py",
+ )
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/jit/myexception.py b/test/jit/myexception.py
index e60d30bd17..0f6d898c6e 100644
--- a/test/jit/myexception.py
+++ b/test/jit/myexception.py
@@ -3,5 +3,7 @@ Define exceptions used in test_exception.py. We define them in a
separate file on purpose to make sure the fully qualified exception class name
is captured correctly in suce cases.
"""
+
+
class MyKeyError(KeyError):
pass
diff --git a/test/jit/test_alias_analysis.py b/test/jit/test_alias_analysis.py
index d7f4e679d8..54518595be 100644
--- a/test/jit/test_alias_analysis.py
+++ b/test/jit/test_alias_analysis.py
@@ -1,15 +1,18 @@
# Owner(s): ["oncall: jit"]
+import torch
+from torch._C import parse_ir
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.jit_utils import JitTestCase
-from torch._C import parse_ir
-import torch
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestAliasAnalysis(JitTestCase):
def test_becomes_wildcard_annotations(self):
@@ -26,9 +29,13 @@ class TestAliasAnalysis(JitTestCase):
alias_db = graph.alias_db()
split_node = graph.findNode("aten::split")
# split input enters wildcard set, list initalized as containing wildcard set
- self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), split_node.output()))
+ self.assertTrue(
+ alias_db.may_contain_alias(next(split_node.inputs()), split_node.output())
+ )
# because %x.1 enters wildcard set, it now aliases other members of wildcard set (graph inputs)
- self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), next(graph.inputs())))
+ self.assertTrue(
+ alias_db.may_contain_alias(next(split_node.inputs()), next(graph.inputs()))
+ )
def test_nested_list_construct_not_wildcard(self):
@torch.jit.script
@@ -42,7 +49,9 @@ class TestAliasAnalysis(JitTestCase):
ten_construct = graph.findNode("aten::rand").output()
output = next(graph.outputs())
self.assertTrue(alias_db.may_contain_alias(ten_construct, output))
- self.assertFalse(alias_db.may_contain_alias(next(graph.inputs()), ten_construct))
+ self.assertFalse(
+ alias_db.may_contain_alias(next(graph.inputs()), ten_construct)
+ )
def test_recursive_calls(self):
@torch.jit.script
@@ -108,7 +117,9 @@ class TestAliasAnalysis(JitTestCase):
class MultiTmpFile:
def __init__(self, N):
self.N = N
- self.ctxs = [TemporaryFileName(mode="w", suffix=".py") for _ in range(N)]
+ self.ctxs = [
+ TemporaryFileName(mode="w", suffix=".py") for _ in range(N)
+ ]
def __enter__(self):
return [x.__enter__() for x in self.ctxs]
diff --git a/test/jit/test_async.py b/test/jit/test_async.py
index 637b82f117..29f4fb22fb 100644
--- a/test/jit/test_async.py
+++ b/test/jit/test_async.py
@@ -3,18 +3,20 @@
import os
import sys
+from typing import Any, Tuple
+
import torch
import torch.nn as nn
-from typing import Any, Tuple
-
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, _inline_everything
from typing import List
+
from torch import Tensor
from torch.jit import Future
+from torch.testing._internal.jit_utils import _inline_everything, JitTestCase
+
class TestAsync(JitTestCase):
def test_async_python(self):
@@ -51,8 +53,7 @@ class TestAsync(JitTestCase):
futures = torch.jit.annotate(List[Future[List[Tensor]]], [])
for _ in range(3):
future = torch.jit.annotate(
- Future[List[Tensor]],
- torch.jit.fork(foo, x)
+ Future[List[Tensor]], torch.jit.fork(foo, x)
)
futures.append(future)
@@ -85,7 +86,7 @@ class TestAsync(JitTestCase):
def test_async_script_capture(self):
class Mod(torch.jit.ScriptModule):
- __constants__ = ['const']
+ __constants__ = ["const"]
def __init__(self):
super().__init__()
@@ -139,7 +140,10 @@ class TestAsync(JitTestCase):
def test_async_script_no_script_mod(self):
x = torch.rand(3, 4)
- with self.assertRaisesRegexWithHighlight(RuntimeError, 'cannot call a value', 'torch.jit._fork(x'):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "cannot call a value", "torch.jit._fork(x"
+ ):
+
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(x)
@@ -213,7 +217,7 @@ class TestAsync(JitTestCase):
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1, x2)),
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1, x2=x2)),
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1=x1, x2=x2)),
- lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x2=x2, x1=x1))
+ lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x2=x2, x1=x1)),
]:
for wrapper in [
func,
@@ -234,8 +238,8 @@ class TestAsync(JitTestCase):
return torch.jit._wait(torch.jit._fork(foo, x1=x1, x2=x2))
for wrapper in [
- foo_script_args,
- foo_script_kwargs,
+ foo_script_args,
+ foo_script_kwargs,
]:
self.assertEqual(wrapper(x1, x2), y_hat)
self.assertEqual(wrapper(x1, x2=x2), y_hat)
@@ -255,7 +259,9 @@ class TestAsync(JitTestCase):
self.traced = torch.jit.trace(Traced(), (x), _force_outplace=True)
@torch.jit.script_method
- def forward(self, x: Tensor) -> Tuple[List[Tensor], Tuple[Tensor, Tensor], Tensor]:
+ def forward(
+ self, x: Tensor
+ ) -> Tuple[List[Tensor], Tuple[Tensor, Tensor], Tensor]:
future1 = torch.jit._fork(self.traced, x)
future2 = torch.jit._fork(torch.neg, x)
@@ -284,10 +290,16 @@ class TestAsync(JitTestCase):
module = torch.jit.trace(TupleCl(), (x), _force_outplace=True)
# Make sure we have forks
- self.assertGraphContainsExactly(module.graph, kind='prim::fork', num_kind_nodes=2)
+ self.assertGraphContainsExactly(
+ module.graph, kind="prim::fork", num_kind_nodes=2
+ )
# Make sure 1 ::neg is in the root graph and 2 ::negs are in the subgraphs
- self.assertGraphContainsExactly(module.graph, kind='aten::neg', num_kind_nodes=1)
- self.assertGraphContainsExactly(module.graph, kind='aten::neg', num_kind_nodes=3, consider_subgraphs=True)
+ self.assertGraphContainsExactly(
+ module.graph, kind="aten::neg", num_kind_nodes=1
+ )
+ self.assertGraphContainsExactly(
+ module.graph, kind="aten::neg", num_kind_nodes=3, consider_subgraphs=True
+ )
y = torch.neg(x)
self.assertEqual(module(x), (y, y, y, y, x, x))
@@ -311,19 +323,23 @@ class TestAsync(JitTestCase):
return torch.jit._wait(fut)
# no future
- error_msg = 'The size.*must match the size of tensor'
- with self.assertRaisesRegexWithHighlight(Exception, error_msg, 'x.t() + x'):
+ error_msg = "The size.*must match the size of tensor"
+ with self.assertRaisesRegexWithHighlight(Exception, error_msg, "x.t() + x"):
foo(x)
# one future
- with self.assertRaisesRegexWithHighlight(Exception, error_msg, 'torch.jit._fork(foo, x'):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, error_msg, "torch.jit._fork(foo, x"
+ ):
wait_script(x)
# two futures with a different error
x = torch.rand(3, 4, 5)
- with self.assertRaisesRegexWithHighlight(Exception,
- 'expects a tensor with <= 2 dimensions',
- 'torch.jit._fork(wait_script, x'):
+ with self.assertRaisesRegexWithHighlight(
+ Exception,
+ "expects a tensor with <= 2 dimensions",
+ "torch.jit._fork(wait_script, x",
+ ):
wait_script_nest(x)
def test_async_grad_guard_with_grad(self):
@@ -381,9 +397,15 @@ class TestAsync(JitTestCase):
x = torch.rand(3, 4)
self.assertEqual(fn(x), traced(x))
- self.assertGraphContainsExactly(traced.graph, kind='prim::fork', num_kind_nodes=1)
- self.assertGraphContainsExactly(traced.graph, kind='aten::wait', num_kind_nodes=1)
- self.assertGraphContainsExactly(traced.graph, kind='aten::neg', num_kind_nodes=2, consider_subgraphs=True)
+ self.assertGraphContainsExactly(
+ traced.graph, kind="prim::fork", num_kind_nodes=1
+ )
+ self.assertGraphContainsExactly(
+ traced.graph, kind="aten::wait", num_kind_nodes=1
+ )
+ self.assertGraphContainsExactly(
+ traced.graph, kind="aten::neg", num_kind_nodes=2, consider_subgraphs=True
+ )
def test_trace_fork_wait_leaking(self):
my_list = []
@@ -397,9 +419,13 @@ class TestAsync(JitTestCase):
val = torch.jit._wait(fut)
return my_list[0]
- with self.assertRaisesRegexWithHighlight(RuntimeError, 'did not have observable data dependence with trace inputs; '
- 'this probably indicates your program cannot be understood '
- 'by the tracer.', ''):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "did not have observable data dependence with trace inputs; "
+ "this probably indicates your program cannot be understood "
+ "by the tracer.",
+ "",
+ ):
traced = torch.jit.trace(fn, (torch.rand(3, 4),), check_trace=False)
def test_trace_fork_wait_inline(self):
@@ -413,9 +439,15 @@ class TestAsync(JitTestCase):
traced = torch.jit.trace(fn, (torch.rand(3, 4),))
torch._C._jit_pass_inline_fork_wait(traced.graph)
- self.assertGraphContainsExactly(traced.graph, kind='prim::fork', num_kind_nodes=0)
- self.assertGraphContainsExactly(traced.graph, kind='aten::wait', num_kind_nodes=0)
- self.assertGraphContainsExactly(traced.graph, kind='aten::add', num_kind_nodes=2)
+ self.assertGraphContainsExactly(
+ traced.graph, kind="prim::fork", num_kind_nodes=0
+ )
+ self.assertGraphContainsExactly(
+ traced.graph, kind="aten::wait", num_kind_nodes=0
+ )
+ self.assertGraphContainsExactly(
+ traced.graph, kind="aten::add", num_kind_nodes=2
+ )
def test_trace_fork_wait_list_modulecalls(self):
def add_one(input):
@@ -472,7 +504,10 @@ class TestAsync(JitTestCase):
self.checkTrace(TestModule(), (torch.randn(5, 5),))
def test_no_future_subtype_message(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, 'Future without a contained type', ''):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Future without a contained type", ""
+ ):
+
@torch.jit.script
def forward(self, x):
futs = torch.jit.annotate(List[torch.jit.Future], [])
@@ -481,6 +516,7 @@ class TestAsync(JitTestCase):
"""
Test that futures subtype each other properly.
"""
+
# Successful subtyping.
def returns_int(x: int) -> int:
return x + x + 1
@@ -495,10 +531,11 @@ class TestAsync(JitTestCase):
# Unsuccessful subtyping.
with self.assertRaisesRegexWithHighlight(
- RuntimeError,
- r"was annotated as having type Future\[float\] but is actually of type Future\[int\]",
- "fut = returns_future_float(x"
+ RuntimeError,
+ r"was annotated as having type Future\[float\] but is actually of type Future\[int\]",
+ "fut = returns_future_float(x",
):
+
def returns_future_float(x: int) -> torch.jit.Future[float]:
return torch.jit._fork(returns_int, (x))
@@ -508,8 +545,9 @@ class TestAsync(JitTestCase):
return fut.wait()
-
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
diff --git a/test/jit/test_aten_pow.py b/test/jit/test_aten_pow.py
index a287d05720..d227f25250 100644
--- a/test/jit/test_aten_pow.py
+++ b/test/jit/test_aten_pow.py
@@ -3,35 +3,40 @@
import torch
from torch.testing._internal.common_utils import TestCase
+
class TestAtenPow(TestCase):
def test_aten_pow_zero_negative_exponent(self):
- '''
+ """
1. Testing a = int, b = int
- '''
+ """
+
@torch.jit.script
def fn_int_int(a: int, b: int):
- return a ** b
+ return a**b
+
# Existing correct behaviors of aten::pow
- self.assertEqual(fn_int_int(2, 1), 2 ** 1)
- self.assertEqual(fn_int_int(2, 0), 2 ** 0)
+ self.assertEqual(fn_int_int(2, 1), 2**1)
+ self.assertEqual(fn_int_int(2, 0), 2**0)
self.assertEqual(fn_int_int(2, -2), 2 ** (-2))
self.assertEqual(fn_int_int(-2, 2), (-2) ** 2)
self.assertEqual(fn_int_int(-2, 0), (-2) ** 0)
self.assertEqual(fn_int_int(-2, -2), (-2) ** (-2))
self.assertEqual(fn_int_int(-2, -1), (-2) ** (-1))
- self.assertEqual(fn_int_int(0, 2), 0 ** 1)
- self.assertEqual(fn_int_int(0, 0), 0 ** 0)
+ self.assertEqual(fn_int_int(0, 2), 0**1)
+ self.assertEqual(fn_int_int(0, 0), 0**0)
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_int_int, 0, -2)
- '''
+ """
2. Testing a = int, b = float
- '''
+ """
+
@torch.jit.script
def fn_int_float(a: int, b: float):
- return a ** b
+ return a**b
+
# Existing correct behaviors of aten::pow
- self.assertEqual(fn_int_float(2, 2.5), 2 ** 2.5)
+ self.assertEqual(fn_int_float(2, 2.5), 2**2.5)
self.assertEqual(fn_int_float(2, -2.5), 2 ** (-2.5))
self.assertEqual(fn_int_float(2, -0.0), 2 ** (-0.0))
self.assertEqual(fn_int_float(2, 0.0), 2 ** (0.0))
@@ -40,53 +45,57 @@ class TestAtenPow(TestCase):
self.assertEqual(fn_int_float(-2, -3.0), (-2) ** (-3.0))
self.assertEqual(fn_int_float(-2, -0.0), (-2) ** (-0.0))
self.assertEqual(fn_int_float(-2, 0.0), (-2) ** (0.0))
- self.assertEqual(fn_int_float(0, 2.0), 0 ** 2.0)
- self.assertEqual(fn_int_float(0, 0.5), 0 ** 0.5)
- self.assertEqual(fn_int_float(0, 0.0), 0 ** 0.0)
+ self.assertEqual(fn_int_float(0, 2.0), 0**2.0)
+ self.assertEqual(fn_int_float(0, 0.5), 0**0.5)
+ self.assertEqual(fn_int_float(0, 0.0), 0**0.0)
self.assertEqual(fn_int_float(0, -0.0), 0 ** (-0.0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_int_float, 0, -2.5)
- '''
+ """
3. Testing a = float, b = int
- '''
+ """
+
@torch.jit.script
def fn_float_int(a: float, b: int):
- return a ** b
+ return a**b
+
# Existing correct behaviors of aten::pow
- self.assertEqual(fn_float_int(2.5, 2), 2.5 ** 2)
+ self.assertEqual(fn_float_int(2.5, 2), 2.5**2)
self.assertEqual(fn_float_int(2.5, -2), 2.5 ** (-2))
self.assertEqual(fn_float_int(2.5, -0), 2.5 ** (-0))
- self.assertEqual(fn_float_int(2.5, 0), 2.5 ** 0)
- self.assertEqual(fn_float_int(-2.5, 2), 2.5 ** 2)
+ self.assertEqual(fn_float_int(2.5, 0), 2.5**0)
+ self.assertEqual(fn_float_int(-2.5, 2), 2.5**2)
self.assertEqual(fn_float_int(-2.5, -2), (-2.5) ** (-2))
self.assertEqual(fn_float_int(-2.5, -3), (-2.5) ** (-3))
self.assertEqual(fn_float_int(-2.5, -0), (-2.5) ** (-0))
self.assertEqual(fn_float_int(-2.5, 0), (-2.5) ** 0)
- self.assertEqual(fn_float_int(0.0, 2), 0 ** 2)
- self.assertEqual(fn_float_int(0.0, 0), 0 ** 0)
+ self.assertEqual(fn_float_int(0.0, 2), 0**2)
+ self.assertEqual(fn_float_int(0.0, 0), 0**0)
self.assertEqual(fn_float_int(0.0, -0), 0 ** (-0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_float_int, 0.0, -2)
- '''
+ """
4. Testing a = float, b = float
- '''
+ """
+
@torch.jit.script
def fn_float_float(a: float, b: float):
- return a ** b
+ return a**b
+
# Existing correct behaviors of aten::pow
- self.assertEqual(fn_float_float(2.5, 2.0), 2.5 ** 2.0)
+ self.assertEqual(fn_float_float(2.5, 2.0), 2.5**2.0)
self.assertEqual(fn_float_float(2.5, -2.0), 2.5 ** (-2.0))
self.assertEqual(fn_float_float(2.5, -0.0), 2.5 ** (-0.0))
- self.assertEqual(fn_float_float(2.5, 0.0), 2.5 ** 0.0)
- self.assertEqual(fn_float_float(-2.5, 2.0), 2.5 ** 2.0)
+ self.assertEqual(fn_float_float(2.5, 0.0), 2.5**0.0)
+ self.assertEqual(fn_float_float(-2.5, 2.0), 2.5**2.0)
self.assertEqual(fn_float_float(-2.5, -2.0), (-2.5) ** (-2.0))
self.assertEqual(fn_float_float(-2.5, -3.0), (-2.5) ** (-3.0))
self.assertEqual(fn_float_float(-2.5, -0.0), (-2.5) ** (-0.0))
self.assertEqual(fn_float_float(-2.5, 0.0), (-2.5) ** 0.0)
- self.assertEqual(fn_float_float(0.0, 2.0), 0.0 ** 2.0)
- self.assertEqual(fn_float_float(0.0, 0.0), 0.0 ** 0.0)
+ self.assertEqual(fn_float_float(0.0, 2.0), 0.0**2.0)
+ self.assertEqual(fn_float_float(0.0, 0.0), 0.0**0.0)
self.assertEqual(fn_float_float(0.0, -0.0), 0.0 ** (-0.0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_float_float, 0.0, -2.0)
diff --git a/test/jit/test_attr.py b/test/jit/test_attr.py
index 368708e687..d6edc16f5c 100644
--- a/test/jit/test_attr.py
+++ b/test/jit/test_attr.py
@@ -1,20 +1,22 @@
# Owner(s): ["oncall: jit"]
+from typing import NamedTuple, Tuple
+
+import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
-import torch
-from typing import NamedTuple, Tuple
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
-
class A(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -22,7 +24,7 @@ class TestGetDefaultAttr(JitTestCase):
def forward(self, x):
y = getattr(self, "init_attr_val") # noqa: B009
- w : list[float] = [1.0]
+ w: list[float] = [1.0]
z = getattr(self, "missing", w) # noqa: B009
z.append(y)
return z
@@ -32,7 +34,7 @@ class TestGetDefaultAttr(JitTestCase):
graph = torch.jit.script(A()).graph
# The "init_attr_val" attribute exists
- FileCheck().check("prim::GetAttr[name=\"init_attr_val\"]").run(graph)
+ FileCheck().check('prim::GetAttr[name="init_attr_val"]').run(graph)
# The "missing" attribute does not exist, so there should be no corresponding GetAttr in AST
FileCheck().check_not("missing").run(graph)
# instead the getattr call will emit the default value, which is a list with one float element
@@ -46,7 +48,11 @@ class TestGetDefaultAttr(JitTestCase):
y: torch.Tensor
def fn(x: MyTuple) -> Tuple[str, torch.Tensor, int]:
- return getattr(x, "x", "fdsa"), getattr(x, "y", torch.ones((3, 3))), getattr(x, "z", 7)
+ return (
+ getattr(x, "x", "fdsa"),
+ getattr(x, "y", torch.ones((3, 3))),
+ getattr(x, "z", 7),
+ )
inp = MyTuple(x="test", y=torch.ones(3, 3) * 2)
ref = fn(inp)
diff --git a/test/jit/test_autodiff.py b/test/jit/test_autodiff.py
index a77569fb4f..752112e74d 100644
--- a/test/jit/test_autodiff.py
+++ b/test/jit/test_autodiff.py
@@ -1,10 +1,11 @@
# Owner(s): ["oncall: jit"]
+from typing import List
+
import torch
from torch.testing._internal.common_utils import skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
-from typing import List
@skipIfTorchDynamo()
@@ -119,7 +120,6 @@ class TestAutodiffJit(JitTestCase):
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
-
def test_autodiff_requires_grad_nograd(self):
@torch.jit.ignore
def python_fn(x):
diff --git a/test/jit/test_autodiff_subgraph_slicing.py b/test/jit/test_autodiff_subgraph_slicing.py
index 1fb40b6229..20d2b46f81 100644
--- a/test/jit/test_autodiff_subgraph_slicing.py
+++ b/test/jit/test_autodiff_subgraph_slicing.py
@@ -3,26 +3,38 @@
import os
import sys
import unittest
-from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
- num_profiled_runs, enable_profiling_mode_for_profiling_tests
-from torch.testing._internal.common_jit import check_against_reference
+
import torch
+from torch.testing._internal.common_jit import check_against_reference
+from torch.testing._internal.common_utils import (
+ enable_profiling_mode_for_profiling_tests,
+ GRAPH_EXECUTOR,
+ num_profiled_runs,
+ ProfilingMode,
+)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining
-from torch.testing import FileCheck
+from typing import List, Optional, Tuple
-from typing import List, Tuple, Optional
-
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
-
-
[email protected](GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't support gradients")
+from torch.testing import FileCheck
+from torch.testing._internal.jit_utils import (
+ disable_autodiff_subgraph_inlining,
+ JitTestCase,
+)
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
+
[email protected](
+ GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't support gradients"
+)
class TestAutodiffSubgraphSlicing(JitTestCase):
# TODO: It is better if we can test directly on graphs instead of the current
# end-to-end fashion.
@@ -35,11 +47,17 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
return ge.graph_for(*inputs)
def assertGraphSize(self, graph, size):
- nodes = list(filter(lambda n: (n.kind() != "prim::BailOut" and
- n.kind() != "prim::BailoutTemplate" and
- n.kind() != "prim::TypeCheck" and
- n.kind() != "prim::RequiresGradCheck"),
- graph.nodes()))
+ nodes = list(
+ filter(
+ lambda n: (
+ n.kind() != "prim::BailOut"
+ and n.kind() != "prim::BailoutTemplate"
+ and n.kind() != "prim::TypeCheck"
+ and n.kind() != "prim::RequiresGradCheck"
+ ),
+ graph.nodes(),
+ )
+ )
self.assertEqual(len(list(nodes)), size)
def test_chunk_constant_script_ad(self):
@@ -52,16 +70,21 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
output = func(input, profile_and_replay=True)
- FileCheck().check_not("prim::DifferentiableGraph").run(func.graph_for(input))
-
- @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "This threshold is only valid for Profiling Executor")
+ FileCheck().check_not("prim::DifferentiableGraph").run(
+ func.graph_for(input)
+ )
+
+ @unittest.skipIf(
+ GRAPH_EXECUTOR != ProfilingMode.PROFILING,
+ "This threshold is only valid for Profiling Executor",
+ )
def test_diff_graph_inline_threshold(self):
with enable_profiling_mode_for_profiling_tests():
NUM_RUNS = 1
with num_profiled_runs(NUM_RUNS):
+
@torch.jit.script
def foo(x):
-
# two nodes should be fused
# see https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/runtime/graph_executor_impl.h#L49
return torch.sigmoid(torch.sigmoid(x))
@@ -78,12 +101,16 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
bar(input)
bar(input)
- self.assertGraphContainsExactly(foo.graph_for(input), 'prim::DifferentiableGraph', 1)
- self.assertGraphContainsExactly(bar.graph_for(input), 'prim::DifferentiableGraph', 0)
+ self.assertGraphContainsExactly(
+ foo.graph_for(input), "prim::DifferentiableGraph", 1
+ )
+ self.assertGraphContainsExactly(
+ bar.graph_for(input), "prim::DifferentiableGraph", 0
+ )
def test_bias_as_module_attr(self):
-
with enable_profiling_mode_for_profiling_tests():
+
class M(torch.nn.Module):
def __init__(self, has_bias):
super().__init__()
@@ -99,19 +126,40 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
scripted_no_bias(x, x)
scripted_no_bias(x, x)
has_bias = M(True)
- check_against_reference(self, scripted_no_bias, no_bias, lambda x: x, (x, x,), check_types=False)
+ check_against_reference(
+ self,
+ scripted_no_bias,
+ no_bias,
+ lambda x: x,
+ (
+ x,
+ x,
+ ),
+ check_types=False,
+ )
scripted_has_bias = torch.jit.script(has_bias)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
- check_against_reference(self, scripted_has_bias, has_bias, lambda x: x, (x, x,), check_types=False)
+ check_against_reference(
+ self,
+ scripted_has_bias,
+ has_bias,
+ lambda x: x,
+ (
+ x,
+ x,
+ ),
+ check_types=False,
+ )
def test_constructed_bias(self):
-
with enable_profiling_mode_for_profiling_tests():
+
def method1(x, weight, b1, b2):
bias = b1 * b2
return torch.nn.functional.linear(x, weight, bias)
+
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
@@ -119,35 +167,58 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
b2 = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, b1, b2))
# check_types requires last_graph on scripted to be set, so we just skip it
- check_against_reference(self, scripted, method1, lambda x: x, (x, weight, b1, b2), check_types=False)
+ check_against_reference(
+ self,
+ scripted,
+ method1,
+ lambda x: x,
+ (x, weight, b1, b2),
+ check_types=False,
+ )
def test_bias_as_arg(self):
-
with enable_profiling_mode_for_profiling_tests():
+
def method1(x, weight, bias: Optional[torch.Tensor]):
return torch.nn.functional.linear(x, weight, bias).relu() + 2
+
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
bias = None
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
- check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
+ check_against_reference(
+ self,
+ scripted,
+ method1,
+ lambda x: x,
+ (x, weight, bias),
+ check_types=False,
+ )
bias = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
- check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
+ check_against_reference(
+ self,
+ scripted,
+ method1,
+ lambda x: x,
+ (x, weight, bias),
+ check_types=False,
+ )
def test_requires_grad_for_tensor_list(self):
-
with enable_profiling_mode_for_profiling_tests():
-
# output & var_list[0] should have requires_grad set to True
- def func(input0: torch.Tensor, input1: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
+ def func(
+ input0: torch.Tensor, input1: torch.Tensor
+ ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
var_list = [input0, input1]
var = torch.cat(var_list)
output = var + 1.0
return output, var_list
+
jit_f = torch.jit.script(func)
input0 = torch.randn((2,), requires_grad=True)
input1 = torch.randn((2,))
@@ -158,12 +229,14 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
assert output_ref[1][0].requires_grad == output[1][0].requires_grad
assert output_ref[1][1].requires_grad == output[1][1].requires_grad
- @unittest.skip("disable until we property handle tensor lists with undefined gradients")
+ @unittest.skip(
+ "disable until we property handle tensor lists with undefined gradients"
+ )
def test_differentiable_graph_ops_requires_grad(self):
x = torch.randn(8, 2, dtype=torch.float).requires_grad_()
y = torch.randn(8, 2, dtype=torch.float)
- def t(x : torch.Tensor, y : torch.Tensor, flag : bool):
+ def t(x: torch.Tensor, y: torch.Tensor, flag: bool):
o = x + 1.0
o1 = torch.relu(o)
o = y + 1.5
@@ -186,13 +259,14 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
return o1, o2, o3, oo1, oo2, oo3
with enable_profiling_mode_for_profiling_tests():
-
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, False)
jit_o = t_jit(x, y, False)
o = t(x, y, False)
- FileCheck().check("prim::DifferentiableGraph").run(t_jit.graph_for(x, y, False))
+ FileCheck().check("prim::DifferentiableGraph").run(
+ t_jit.graph_for(x, y, False)
+ )
# validate the differentiableGraphOps are marking proper requires_grad
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
@@ -204,22 +278,28 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
self.assertEqual(oo, jit_oo)
- @unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Simple Executor doesn't support gradients")
+ @unittest.skipIf(
+ GRAPH_EXECUTOR == ProfilingMode.PROFILING,
+ "Simple Executor doesn't support gradients",
+ )
def test_prune_grad(self):
@torch.jit.script
def t(input, bias):
return torch.nn.functional.relu(input + bias)
+
input = torch.randn(2, 8, requires_grad=True)
- bias = torch.randn(8, requires_grad=False) # bias does NOT require grad
+ bias = torch.randn(8, requires_grad=False) # bias does NOT require grad
NUM_PROFILED_RUNS = 1
with num_profiled_runs(NUM_PROFILED_RUNS):
- WARMUP = 3 # 2 runs to reach backward + 1 to optimize it
+ WARMUP = 3 # 2 runs to reach backward + 1 to optimize it
for x in range(WARMUP):
o = t(input, bias)
o.sum().backward()
fwd_plan = list(t.get_debug_state().execution_plans.values())[0]
- bwd_graph = list(fwd_plan.code.grad_executor_states()[0].execution_plans.values())[0].graph
+ bwd_graph = list(
+ fwd_plan.code.grad_executor_states()[0].execution_plans.values()
+ )[0].graph
tup = next(bwd_graph.outputs())
self.assertEqual(len(list(tup.node().inputs())), 1)
@@ -233,7 +313,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_simple_no_merge(self):
# o: autodiff supported. x: not autodiff supported.
@@ -245,8 +325,10 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
g_str = str(graph)
- FileCheck().check("aten::Int").check("aten::zeros").check_not("aten::mul").run(g_str[0:g_str.find("return")])
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ FileCheck().check("aten::Int").check("aten::zeros").check_not("aten::mul").run(
+ g_str[0 : g_str.find("return")]
+ )
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_does_not_merge_unrelated(self):
# o o
@@ -258,7 +340,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
self.assertGraphSize(graph, 3)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 2)
def test_merges_without_cycles(self):
# o --> o --> o
@@ -273,7 +355,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_merges_dense(self):
# o o
@@ -290,7 +372,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 2, 2)
self.assertGraphSize(graph, 2)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_does_not_create_cycles(self):
# o --> x --> o
@@ -303,7 +385,7 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 2)
def test_merges_up(self):
# o --> x o
@@ -317,8 +399,8 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
g_str = str(graph)
- FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ FileCheck().check_not("aten::add").run(g_str[0 : g_str.find("return")])
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_merges_down(self):
# o x --> o
@@ -335,8 +417,8 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
num_nodes = 4 if GRAPH_EXECUTOR == ProfilingMode.PROFILING else 3
# add moved down
g_str = str(graph)
- FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
+ FileCheck().check_not("aten::add").run(g_str[0 : g_str.find("return")])
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 1)
def test_respects_lexical_scoping(self):
def fn(x, k):
@@ -346,12 +428,10 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
z = y * k
return z, k
-
graph = self._perform_ad_subgraph_slicing(fn, 1, 1)
# We should not have combined the two multiplications into
# the same group; they should each be a separate DiffGraph
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 3)
-
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 3)
def test_merge_respects_aliasing(self):
def fn(x, k, cond):
@@ -368,15 +448,13 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = self._perform_ad_subgraph_slicing(fn, [2, 2], [2, 2], 1)
# z2 did did not get merged into the subgraph
- FileCheck().check("prim::If").check("aten::select").check_next("aten::select")\
- .check_next("aten::add_").check("Differentiable").run(graph)
- self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
+ FileCheck().check("prim::If").check("aten::select").check_next(
+ "aten::select"
+ ).check_next("aten::add_").check("Differentiable").run(graph)
+ self.assertGraphContainsExactly(graph, "prim::DifferentiableGraph", 2)
def test_aliased_outputs(self):
-
with enable_profiling_mode_for_profiling_tests():
-
-
# Case 1: aliasing between relu and t
# is within a DifferentiableGraph. It should be valid
# to merge both split_with_sizes in relu in one graph
@@ -389,9 +467,9 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
- FileCheck().check("with prim::DifferentiableGraph") \
- .check("aten::relu").check("aten::t") \
- .run(graph)
+ FileCheck().check("with prim::DifferentiableGraph").check(
+ "aten::relu"
+ ).check("aten::t").run(graph)
# Case 2: aliasing between relu and split_with_sizes
# are both outputs of a Diff graph. It should be invalid
@@ -410,11 +488,11 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
- FileCheck().check("Tensor = prim::DifferentiableGraph") \
- .check("with prim::DifferentiableGraph") \
- .check("Tensor = aten::relu") \
- .check_not("aten::split_with_sizes") \
- .run(graph)
+ FileCheck().check("Tensor = prim::DifferentiableGraph").check(
+ "with prim::DifferentiableGraph"
+ ).check("Tensor = aten::relu").check_not("aten::split_with_sizes").run(
+ graph
+ )
# Case 3: two aliased nodes in a graph.
# Both `split_with_sizes` should be unfused
@@ -432,11 +510,11 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
- FileCheck().check("Tensor = prim::DifferentiableGraph") \
- .check("with prim::DifferentiableGraph") \
- .check("Tensor = aten::relu") \
- .check_not("aten::split_with_sizes") \
- .run(graph)
+ FileCheck().check("Tensor = prim::DifferentiableGraph").check(
+ "with prim::DifferentiableGraph"
+ ).check("Tensor = aten::relu").check_not("aten::split_with_sizes").run(
+ graph
+ )
# Case 4: the aliased output has a descendant
# Both should be unfused. Note, %3 comes before %2
@@ -454,11 +532,9 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
- FileCheck().check("Tensor = prim::DifferentiableGraph") \
- .check("with prim::DifferentiableGraph") \
- .check("Tensor = aten::relu") \
- .check_not("aten::t") \
- .run(graph)
+ FileCheck().check("Tensor = prim::DifferentiableGraph").check(
+ "with prim::DifferentiableGraph"
+ ).check("Tensor = aten::relu").check_not("aten::t").run(graph)
# Case 5: multiple aliased groups
# Both should be unfused. Note, %3 comes before %2
@@ -478,11 +554,9 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
- FileCheck().check("Tensor = prim::DifferentiableGraph") \
- .check("with prim::DifferentiableGraph") \
- .check("Tensor = aten::relu") \
- .check_not("aten::t") \
- .run(graph)
+ FileCheck().check("Tensor = prim::DifferentiableGraph").check(
+ "with prim::DifferentiableGraph"
+ ).check("Tensor = aten::relu").check_not("aten::t").run(graph)
def test_has_profiled_info_aliasing_outputs(self):
# The expectation is that CallFunction will prevent the final profile node from
@@ -511,9 +585,6 @@ class TestAutodiffSubgraphSlicing(JitTestCase):
output = outputs[0]
self.assertEqual(False, output.requiresGrad())
- FileCheck().check("= prim::DifferentiableGraph") \
- .check("with prim::DifferentiableGraph") \
- .check(" = aten::relu") \
- .check("requires_grad=0") \
- .check("aten::relu") \
- .run(graph)
+ FileCheck().check("= prim::DifferentiableGraph").check(
+ "with prim::DifferentiableGraph"
+ ).check(" = aten::relu").check("requires_grad=0").check("aten::relu").run(graph)
diff --git a/test/jit/test_await.py b/test/jit/test_await.py
index 1500ed27b7..9d77a94698 100644
--- a/test/jit/test_await.py
+++ b/test/jit/test_await.py
@@ -1,18 +1,19 @@
# Owner(s): ["oncall: jit"]
import io
-import torch
-from torch.testing._internal.jit_utils import JitTestCase
-from torch.testing._internal.jit_utils import make_global
from typing import List, Optional, Tuple
+
+import torch
from torch import Tensor
from torch._awaits import _Await as Await
+from torch.testing._internal.jit_utils import JitTestCase, make_global
class TestAwait(JitTestCase):
def test_await_python(self):
def foo(x: int) -> int:
return x + 13
+
aw: Await[int] = torch.jit._awaitable(foo, 13)
self.assertTrue(aw.fn()(*aw.args()) == torch.jit._awaitable_wait(aw))
nw = torch.jit._awaitable_nowait(33)
@@ -22,6 +23,7 @@ class TestAwait(JitTestCase):
def test_await_type_python(self):
def foo() -> Tensor:
return torch.randn()
+
awaits = torch.jit.annotate(List[Await[Tensor]], [])
awaits.append(torch.jit._awaitable(foo))
@@ -82,9 +84,7 @@ class TestAwait(JitTestCase):
self.assertTrue(torch.allclose(torch.eye(2), script_out))
self.assertTrue(torch.allclose(script_out, out))
-
def test_await_class_arg(self):
-
class C:
def __init__(self, a: Tensor, b: Tensor):
self.__a = a
@@ -104,6 +104,7 @@ class TestAwait(JitTestCase):
_a = torch.eye(2)
c2_t = torch.jit._awaitable_wait(aw)
return _a + c2_t + x
+
inp = torch.zeros(2)
sm = torch.jit.script(fn)
@@ -120,7 +121,6 @@ class TestAwait(JitTestCase):
self._a = a
self._b = b
-
make_global(C)
# Can not stay in the class as Jit does not support Recursive annotations
@@ -143,7 +143,6 @@ class TestAwait(JitTestCase):
self.assertTrue(torch.allclose(script_out, out))
def test_await_class_return(self):
-
class C:
__slots__ = ["a", "b"]
@@ -151,7 +150,6 @@ class TestAwait(JitTestCase):
self.a = a
self.b = b
-
make_global(C)
# Can not stay in the class as Jit does not support Recursive annotations
@@ -175,7 +173,9 @@ class TestAwait(JitTestCase):
script_out = sm(inp)
self.assertTrue(torch.allclose(torch.eye(2) + 6 * torch.ones(2), script_out))
self.assertTrue(torch.allclose(script_out, out))
- self.assertGraphContainsExactly(sm.graph, kind='prim::awaitable_wait', num_kind_nodes=1)
+ self.assertGraphContainsExactly(
+ sm.graph, kind="prim::awaitable_wait", num_kind_nodes=1
+ )
def test_await_getattr_implicit_convertion(self):
class C:
@@ -186,7 +186,6 @@ class TestAwait(JitTestCase):
def b(self):
return self._b
-
make_global(C)
# Can not stay in the class as Jit does not support Recursive annotations
@@ -212,10 +211,11 @@ class TestAwait(JitTestCase):
script_out = sm(inp)
self.assertTrue(torch.allclose(torch.eye(2) + 7 * torch.ones(2), script_out))
self.assertTrue(torch.allclose(script_out, out))
- self.assertGraphContainsExactly(sm.graph, kind='prim::awaitable_wait', num_kind_nodes=2)
+ self.assertGraphContainsExactly(
+ sm.graph, kind="prim::awaitable_wait", num_kind_nodes=2
+ )
def test_await_nested(self):
-
class C:
def __init__(self, a: Tensor, b: Tensor):
self.__a = a
@@ -250,6 +250,7 @@ class TestAwait(JitTestCase):
def __init__(self, v):
self.parent = torch.jit.annotate(Optional[Tree], None)
self.v = v
+
make_global(Tree)
def delayed(t: Tree):
@@ -275,12 +276,15 @@ class TestAwait(JitTestCase):
sm = torch.jit.script(main)
out = main(inp)
script_out = sm(inp)
- self.assertTrue(torch.allclose(2 * torch.eye(2) + 2 * torch.ones(2), script_out))
+ self.assertTrue(
+ torch.allclose(2 * torch.eye(2) + 2 * torch.ones(2), script_out)
+ )
self.assertTrue(torch.allclose(script_out, out))
def test_await_eager_lazy(self):
def delayed(x: Tensor) -> Tensor:
return 2 * (x + 1)
+
t = torch.ones(2, dtype=torch.int64)
aw = torch.jit._awaitable(delayed, t)
self.assertTrue(isinstance(aw, torch._C._Await))
@@ -302,7 +306,9 @@ class TestAwait(JitTestCase):
script_out_aw = sm(inp)
script_out = torch.jit._awaitable_wait(script_out_aw)
- self.assertTrue(torch.allclose(2 * torch.eye(2) + 2 * torch.ones(2), script_out))
+ self.assertTrue(
+ torch.allclose(2 * torch.eye(2) + 2 * torch.ones(2), script_out)
+ )
self.assertTrue(torch.allclose(script_out, out))
def test_jit_trace(self):
diff --git a/test/jit/test_backend_nnapi.py b/test/jit/test_backend_nnapi.py
index 8ca4083e5f..289827ecd3 100644
--- a/test/jit/test_backend_nnapi.py
+++ b/test/jit/test_backend_nnapi.py
@@ -3,10 +3,10 @@
import os
import sys
import unittest
+from pathlib import Path
import torch
import torch._C
-from pathlib import Path
from torch.testing._internal.common_utils import IS_FBCODE, skipIfTorchDynamo
# hacky way to skip these tests in fbcode:
@@ -15,9 +15,11 @@ from torch.testing._internal.common_utils import IS_FBCODE, skipIfTorchDynamo
# it sees tests but then fails when it tries to actuall run them.
if not IS_FBCODE:
from test_nnapi import TestNNAPI
+
HAS_TEST_NNAPI = True
else:
from torch.testing._internal.common_utils import TestCase as TestNNAPI
+
HAS_TEST_NNAPI = False
@@ -39,10 +41,14 @@ without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
torch_root = Path(__file__).resolve().parent.parent.parent
-lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
+lib_path = torch_root / "build" / "lib" / "libnnapi_backend.so"
+
+
@skipIfTorchDynamo("weird py38 failures")
[email protected](not os.path.exists(lib_path),
- "Skipping the test as libnnapi_backend.so was not found")
[email protected](
+ not os.path.exists(lib_path),
+ "Skipping the test as libnnapi_backend.so was not found",
+)
@unittest.skipIf(IS_FBCODE, "test_nnapi.py not found")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
@@ -89,35 +95,44 @@ method_compile_spec must use the following format:
# No forward key
compile_spec = {"backward": {"inputs": args}}
- with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ 'method_compile_spec does not contain the "forward" key.' + errorMsgTail,
+ ):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
- with self.assertRaisesRegex(RuntimeError,
- "method_compile_spec does not contain a dictionary with an \"inputs\" key, "
- "under it's \"forward\" key."
- + errorMsgTail):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ 'method_compile_spec does not contain a dictionary with an "inputs" key, '
+ 'under it\'s "forward" key.' + errorMsgTail,
+ ):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
- with self.assertRaisesRegex(RuntimeError,
- "method_compile_spec does not contain a dictionary with an \"inputs\" key, "
- "under it's \"forward\" key."
- + errorMsgTail):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ 'method_compile_spec does not contain a dictionary with an "inputs" key, '
+ 'under it\'s "forward" key.' + errorMsgTail,
+ ):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
- with self.assertRaisesRegex(RuntimeError,
- "method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
- + errorMsgTail):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ 'method_compile_spec does not contain either a Tensor or TensorList, under it\'s "inputs" key.'
+ + errorMsgTail,
+ ):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
- with self.assertRaisesRegex(RuntimeError,
- "method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
- + errorMsgTail):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ 'method_compile_spec does not contain either a Tensor or TensorList, under it\'s "inputs" key.'
+ + errorMsgTail,
+ ):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
diff --git a/test/jit/test_backends.py b/test/jit/test_backends.py
index afd59e3e94..493d34f3eb 100644
--- a/test/jit/test_backends.py
+++ b/test/jit/test_backends.py
@@ -1,6 +1,5 @@
# Owner(s): ["oncall: jit"]
-from torch.testing._internal.jit_utils import JitTestCase
import io
import os
import sys
@@ -8,18 +7,20 @@ import unittest
import torch
import torch._C
-from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter
+from torch.testing import FileCheck
from torch.testing._internal.common_utils import (
+ find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
- TEST_WITH_ROCM,
skipIfRocm,
- find_library_location,
+ TEST_WITH_ROCM,
)
+from torch.testing._internal.jit_utils import JitTestCase
+
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
@@ -33,7 +34,9 @@ if __name__ == "__main__":
def to_test_backend(module, method_compile_spec):
- return torch._C._jit_to_backend("test_backend", module, {"forward": method_compile_spec})
+ return torch._C._jit_to_backend(
+ "test_backend", module, {"forward": method_compile_spec}
+ )
def to_test_backend_multi(module, method_compile_spec):
@@ -63,8 +66,10 @@ class BasicModule(torch.nn.Module):
# This is ignored in IS_WINDOWS or IS_MACOS cases. Hence we need the one in TestBackends.
[email protected](TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
- "Non-portable load_library call used in test")
[email protected](
+ TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
+ "Non-portable load_library call used in test",
+)
class JitBackendTestCase(JitTestCase):
"""
A common base class for JIT backend tests that contains common utility
@@ -73,7 +78,7 @@ class JitBackendTestCase(JitTestCase):
def setUp(self):
super().setUp()
- lib_file_path = find_library_location('libjitbackend_test.so')
+ lib_file_path = find_library_location("libjitbackend_test.so")
torch.ops.load_library(str(lib_file_path))
# Subclasses are expected to set up three variables in their setUp methods:
# module - a regular, Python version of the module being tested
@@ -154,13 +159,17 @@ class BasicModuleTest(JitBackendTestCase):
self.test_execution()
# Save the compile spec to compare against the version retrieved after loading.
- pre_compile_spec = self.lowered_module.__getattr__("__loweredModule__").__getattr__("__method_compile_spec")
+ pre_compile_spec = self.lowered_module.__getattr__(
+ "__loweredModule__"
+ ).__getattr__("__method_compile_spec")
# Save and load the lowered module.
self.save_load()
# Get the compile spec after loading.
- post_compile_spec = self.lowered_module.__getattr__("__loweredModule__").__getattr__("__method_compile_spec")
+ post_compile_spec = self.lowered_module.__getattr__(
+ "__loweredModule__"
+ ).__getattr__("__method_compile_spec")
# Compile specs should match.
self.assertEqual(pre_compile_spec, post_compile_spec)
@@ -195,9 +204,11 @@ class BasicModuleUnavailableTest(JitBackendTestCase):
input = torch.randn(5)
# Test exception is thrown.
- with self.assertRaisesRegexWithHighlight(Exception,
- r"Backend is not available.",
- "raise Exception(\"Backend is not available.\""):
+ with self.assertRaisesRegexWithHighlight(
+ Exception,
+ r"Backend is not available.",
+ 'raise Exception("Backend is not available."',
+ ):
backend_method = self.lowered_module.__getattr__("forward")
backend_output = backend_method(*(input, input))
@@ -207,9 +218,11 @@ class BasicModuleUnavailableTest(JitBackendTestCase):
buffer = io.BytesIO()
torch.jit.save(self.lowered_module, buffer)
buffer.seek(0)
- with self.assertRaisesRegexWithHighlight(Exception,
- r"Backend is not available.",
- "raise Exception(\"Backend is not available.\""):
+ with self.assertRaisesRegexWithHighlight(
+ Exception,
+ r"Backend is not available.",
+ 'raise Exception("Backend is not available."',
+ ):
imported = torch.jit.load(buffer)
@@ -218,6 +231,7 @@ class NestedModuleTest(JitBackendTestCase):
Tests for NestedModule that check that a module lowered to a backend can be used
as a submodule.
"""
+
class NestedModule(torch.nn.Module):
"""
A Module with one submodule that is used to test that lowered Modules
@@ -237,7 +251,9 @@ class NestedModuleTest(JitBackendTestCase):
# Both modules in self.module are regular Python modules.
self.module = NestedModuleTest.NestedModule(BasicModule())
# Both modules in self.scripted_module are ScriptModules.
- self.scripted_module = torch.jit.script(NestedModuleTest.NestedModule(BasicModule()))
+ self.scripted_module = torch.jit.script(
+ NestedModuleTest.NestedModule(BasicModule())
+ )
# First, script another instance of NestedModule with share_types=False so that it can be
# selectively lowered without modifying the type of self.scripted_module.
@@ -246,7 +262,9 @@ class NestedModuleTest(JitBackendTestCase):
{"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}},
)
# self.lowered_module is a ScriptModule, but its submodule is a lowered module.
- self.lowered_module = torch.jit.script(NestedModuleTest.NestedModule(lowered_module))
+ self.lowered_module = torch.jit.script(
+ NestedModuleTest.NestedModule(lowered_module)
+ )
def test_execution(self):
# Test execution with backend against Python and JIT.
@@ -270,6 +288,7 @@ class SelectiveLoweringTest(JitBackendTestCase):
"""
Tests for the selective lowering API.
"""
+
class OuterModule(torch.nn.Module):
def __init__(self, sub1, sub2, other):
super().__init__()
@@ -299,7 +318,10 @@ class SelectiveLoweringTest(JitBackendTestCase):
MiddleModule = SelectiveLoweringTest.MiddleModule
def script_without_type_sharing(mod):
- return torch.jit._recursive.create_script_module(mod, torch.jit._recursive.infer_methods_to_compile, share_types=False)
+ return torch.jit._recursive.create_script_module(
+ mod, torch.jit._recursive.infer_methods_to_compile, share_types=False
+ )
+
# Create Python, JIT and backend versions of a hierarchy that looks like this:
# --------- OuterModule --------
# | | |
@@ -308,13 +330,28 @@ class SelectiveLoweringTest(JitBackendTestCase):
# BasicModule BasicModule BasicModule
#
# Two BasicModules will be lowered and the third will not.
- self.module = OuterModule(MiddleModule(BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule()))
- self.scripted_module = script_without_type_sharing(OuterModule(MiddleModule(
- BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule())))
- self.lowered_module = script_without_type_sharing(OuterModule(MiddleModule(
- BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule())))
- self.lowered_module = to_test_backend_selective(self.lowered_module, {"forward": ""}, [
- "sub1.submodule", "sub2.submodule"])
+ self.module = OuterModule(
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ )
+ self.scripted_module = script_without_type_sharing(
+ OuterModule(
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ )
+ )
+ self.lowered_module = script_without_type_sharing(
+ OuterModule(
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ )
+ )
+ self.lowered_module = to_test_backend_selective(
+ self.lowered_module, {"forward": ""}, ["sub1.submodule", "sub2.submodule"]
+ )
def test_execution(self):
input = torch.randn(5)
@@ -335,93 +372,93 @@ class SelectiveLoweringTest(JitBackendTestCase):
"""
# Check that self.lowered_module was not lowered, but that it does contain test_backendLoweredModule due to it
# calling the lowered module directly.
- FileCheck() \
- .check("OuterModule") \
- .check("BasicModule") \
- .run(self.scripted_module.graph)
- FileCheck() \
- .check("OuterModule") \
- .check_not("__torch__.torch.classes.__backends__.test_backend") \
- .check("LoweredWrapper.test_backend") \
- .run(self.lowered_module.graph)
+ FileCheck().check("OuterModule").check("BasicModule").run(
+ self.scripted_module.graph
+ )
+ FileCheck().check("OuterModule").check_not(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).check("LoweredWrapper.test_backend").run(self.lowered_module.graph)
# Check that self.lowered_module.sub1/sub2 were not lowered but that BasicModule has been replaced in their graphs.
- FileCheck() \
- .check("MiddleModule") \
- .check("BasicModule") \
- .check_not("LoweredWrapper.test_backend") \
- .run(self.scripted_module.sub1.graph)
- FileCheck() \
- .check("MiddleModule") \
- .check_not("__torch__.torch.classes.__backends__.test_backend") \
- .check("LoweredWrapper.test_backend") \
- .run(self.lowered_module.sub1.graph)
-
- FileCheck() \
- .check("MiddleModule") \
- .check("BasicModule") \
- .check_not("LoweredWrapper.test_backend") \
- .run(self.scripted_module.sub2.graph)
- FileCheck() \
- .check("MiddleModule") \
- .check_not("__torch__.torch.classes.__backends__.test_backend") \
- .check("LoweredWrapper.test_backend") \
- .run(self.lowered_module.sub2.graph)
+ FileCheck().check("MiddleModule").check("BasicModule").check_not(
+ "LoweredWrapper.test_backend"
+ ).run(self.scripted_module.sub1.graph)
+ FileCheck().check("MiddleModule").check_not(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).check("LoweredWrapper.test_backend").run(self.lowered_module.sub1.graph)
+
+ FileCheck().check("MiddleModule").check("BasicModule").check_not(
+ "LoweredWrapper.test_backend"
+ ).run(self.scripted_module.sub2.graph)
+ FileCheck().check("MiddleModule").check_not(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).check("LoweredWrapper.test_backend").run(self.lowered_module.sub2.graph)
# Check that self.lowered_module.sub1/sub2.submodule were lowered. They should have a new attribute
# __loweredModule__ whose graph should mention __torch__.torch.classes.__backends__.test_backend,
# the TorchBind class for executing functions on the test JIT backend.
- FileCheck() \
- .check("LoweredModule.test_backend") \
- .check("__torch__.torch.classes.__backends__.test_backend") \
- .run(self.lowered_module.sub1.submodule.__loweredModule__.graph)
+ FileCheck().check("LoweredModule.test_backend").check(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).run(self.lowered_module.sub1.submodule.__loweredModule__.graph)
- FileCheck() \
- .check("LoweredModule.test_backend") \
- .check("__torch__.torch.classes.__backends__.test_backend") \
- .run(self.lowered_module.sub2.submodule.__loweredModule__.graph)
+ FileCheck().check("LoweredModule.test_backend").check(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).run(self.lowered_module.sub2.submodule.__loweredModule__.graph)
# Check that self.other and self.other.submodule have been left untouched by the selective lowering process.
- FileCheck() \
- .check("MiddleModule") \
- .check("BasicModule") \
- .check_not("__torch__.torch.classes.__backends__.test_backend") \
- .check_not("LoweredWrapper.test_backend") \
- .run(self.scripted_module.other.graph)
- FileCheck() \
- .check("BasicModule") \
- .check_not("__torch__.torch.classes.__backends__.test_backend") \
- .check_not("LoweredModule.test_backend") \
- .run(self.scripted_module.other.submodule.graph)
+ FileCheck().check("MiddleModule").check("BasicModule").check_not(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).check_not("LoweredWrapper.test_backend").run(self.scripted_module.other.graph)
+ FileCheck().check("BasicModule").check_not(
+ "__torch__.torch.classes.__backends__.test_backend"
+ ).check_not("LoweredModule.test_backend").run(
+ self.scripted_module.other.submodule.graph
+ )
def test_errors(self):
"""
Check errors associated with selective lowering.
"""
# Check error messages thrown when attempting to lower something that is not a ScriptModule.
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"Object .* is not a ScriptModule", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"Object .* is not a ScriptModule", ""
+ ):
to_test_backend_selective(torch.nn.ReLU(), {"forward": ""}, ["submodule"])
MiddleModule = SelectiveLoweringTest.MiddleModule
mod = MiddleModule(BasicModule())
mod.new_attr = 3
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute named new_attr is not a Module", ""):
- to_test_backend_selective(torch.jit.script(mod), {"forward": ""}, ["new_attr"])
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"Attribute named new_attr is not a Module", ""
+ ):
+ to_test_backend_selective(
+ torch.jit.script(mod), {"forward": ""}, ["new_attr"]
+ )
# Check error message thrown when module hierarchy doesn't have unique types.
OuterModule = SelectiveLoweringTest.OuterModule
- mod = OuterModule(MiddleModule(BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule()))
+ mod = OuterModule(
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ MiddleModule(BasicModule()),
+ )
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- r"Selective lowering is only supported for module hierarchies with unique types",
- ""):
- to_test_backend_selective(torch.jit.script(mod), {"forward": ""}, ["sub1.submodule"])
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ r"Selective lowering is only supported for module hierarchies with unique types",
+ "",
+ ):
+ to_test_backend_selective(
+ torch.jit.script(mod), {"forward": ""}, ["sub1.submodule"]
+ )
# This is needed for IS_WINDOWS or IS_MACOS to skip the tests.
[email protected](TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
- "Non-portable load_library call used in test")
[email protected](
+ TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
+ "Non-portable load_library call used in test",
+)
class TestBackends(JitTestCase):
"""
This class wraps and invokes all subclasses of JitBackendTestCase so that each one
@@ -461,6 +498,7 @@ class TestBackends(JitTestCase):
def test_errors(self):
self.selective_lowering_test.test_errors()
+
"""
Unit Tests for backend with compiler
This test case and the existing TestBackends are separate because they cover different aspects.
@@ -468,6 +506,8 @@ The actual backend implementation in this test is different.
It has a simple demo compiler to test the end-to-end flow in mobile.
However, this test cannot cover the selective_lowering for now, which is covered in TestBackends.
"""
+
+
class BasicModuleAdd(torch.nn.Module):
"""
A simple add Module used to test to_backend lowering machinery.
@@ -476,9 +516,12 @@ class BasicModuleAdd(torch.nn.Module):
def forward(self, x, h):
return x + h
+
# This is ignored in IS_WINDOWS or IS_MACOS cases. Hence we need the one in TestBackends.
[email protected](TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
- "Non-portable load_library call used in test")
[email protected](
+ TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
+ "Non-portable load_library call used in test",
+)
class JitBackendTestCaseWithCompiler(JitTestCase):
"""
A common base class for JIT backend tests with compilers that contains common utility
@@ -487,7 +530,7 @@ class JitBackendTestCaseWithCompiler(JitTestCase):
def setUp(self):
super().setUp()
- lib_file_path = find_library_location('libbackend_with_compiler.so')
+ lib_file_path = find_library_location("libbackend_with_compiler.so")
torch.ops.load_library(str(lib_file_path))
# Subclasses are expected to set up four variables in their setUp methods:
# module - a regular, Python version of the module being tested
@@ -524,6 +567,7 @@ class JitBackendTestCaseWithCompiler(JitTestCase):
"""
pass
+
class BasicModuleTestWithCompiler(JitBackendTestCaseWithCompiler):
"""
Tests for BasicModuleAdd.
@@ -541,7 +585,8 @@ class BasicModuleTestWithCompiler(JitBackendTestCaseWithCompiler):
},
}
self.lowered_module = torch._C._jit_to_backend(
- "backend_with_compiler_demo", self.scripted_module, compile_spec)
+ "backend_with_compiler_demo", self.scripted_module, compile_spec
+ )
# Create mobile version of BasicModuleAdd
buffer = io.BytesIO(self.lowered_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -552,6 +597,7 @@ class BasicModuleTestWithCompiler(JitBackendTestCaseWithCompiler):
input = torch.ones(1, dtype=torch.float)
self.check_forward((input, input))
+
class ErrorMessagesWithCompiler(JitBackendTestCase):
"""
Tests for errors that occur with compiler, specifically:
@@ -562,22 +608,31 @@ class ErrorMessagesWithCompiler(JitBackendTestCase):
"""
A module with an operator that is not supported.
"""
+
def forward(self, x, h):
return x * h
self._loweredmodule.forward()
def test_errors(self):
- scripted_module_n = torch.jit.script(ErrorMessagesWithCompiler.ModuleNotSupported())
+ scripted_module_n = torch.jit.script(
+ ErrorMessagesWithCompiler.ModuleNotSupported()
+ )
# Test exception is thrown when lowering a module with an unsupported operator
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- # Special escape characters are replaced with '.'
- r"""The node of aten::mul is not supported in this compiler. .*
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ # Special escape characters are replaced with '.'
+ r"""The node of aten::mul is not supported in this compiler. .*
def forward.self, x, h.:
return x . h
~~~~~ <--- HERE
self._loweredmodule.forward..
-""", ""):
- lowered_module_n = torch._C._jit_to_backend("backend_with_compiler_demo", scripted_module_n, {"forward": {"": ""}})
+""",
+ "",
+ ):
+ lowered_module_n = torch._C._jit_to_backend(
+ "backend_with_compiler_demo", scripted_module_n, {"forward": {"": ""}}
+ )
+
class CompModuleTestWithCompiler(JitBackendTestCase):
"""
@@ -588,6 +643,7 @@ class CompModuleTestWithCompiler(JitBackendTestCase):
"""
A simple subtraction Module to be used in CompModule.
"""
+
def forward(self, x, h):
return x - h
@@ -617,14 +673,19 @@ class CompModuleTestWithCompiler(JitBackendTestCase):
},
}
lowered_add = torch._C._jit_to_backend(
- "backend_with_compiler_demo", torch.jit.script(BasicModuleAdd()), compile_spec)
+ "backend_with_compiler_demo",
+ torch.jit.script(BasicModuleAdd()),
+ compile_spec,
+ )
lowered_sub = torch._C._jit_to_backend(
"backend_with_compiler_demo",
torch.jit.script(CompModuleTestWithCompiler.BasicModuleSub()),
- {"forward": {"": ""}}
+ {"forward": {"": ""}},
)
self.module = CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub)
- self.scripted_module = torch.jit.script(CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub))
+ self.scripted_module = torch.jit.script(
+ CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub)
+ )
# No backend version of CompModule currently, so this is filler.
self.lowered_module = self.scripted_module
# Create a mobile version of CompModule from JIT version
@@ -640,9 +701,12 @@ class CompModuleTestWithCompiler(JitBackendTestCase):
# Test forward.
self.check_function("forward", (input1, input2, input2))
+
# This is needed for IS_WINDOWS or IS_MACOS to skip the tests.
[email protected](IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
- "Non-portable load_library call used in test")
[email protected](
+ IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
+ "Non-portable load_library call used in test",
+)
class TestBackendsWithCompiler(JitTestCase):
"""
This class wraps and invokes all subclasses of JitBackendTestCaseWithCompiler
@@ -711,7 +775,6 @@ class CompModuleTestSameNameWithCompiler(JitBackendTestCase):
y = s * (c * d)
return y
-
def setUp(self):
super().setUp()
@@ -728,6 +791,7 @@ class CompModuleTestSameNameWithCompiler(JitBackendTestCase):
# Test forward.
self.check_function("forward", (a, b, s))
+
class AddedAttributesTest(JitBackendTestCase):
"""
Tests for adding attributes to a model after lowering.
@@ -747,11 +811,19 @@ class AddedAttributesTest(JitBackendTestCase):
input = [(torch.ones(5),)]
pre_bundled = self.lowered_module(*input[0])
# Attach bundled inputs which adds several attributes and functions to the model
- self.lowered_module = torch.utils.bundled_inputs.augment_model_with_bundled_inputs(lowered_module, input) # noqa: F821
- post_bundled = self.lowered_module(*self.lowered_module.get_all_bundled_inputs()[0])
+ self.lowered_module = (
+ torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
+ lowered_module, input # noqa: F821
+ )
+ )
+ post_bundled = self.lowered_module(
+ *self.lowered_module.get_all_bundled_inputs()[0]
+ )
# Save and load the lowered module.
self.save_load()
# Use bundled after save and load to prove its preserved
- post_load = self.lowered_module(*self.lowered_module.get_all_bundled_inputs()[0])
+ post_load = self.lowered_module(
+ *self.lowered_module.get_all_bundled_inputs()[0]
+ )
self.assertEqual(pre_bundled, post_bundled)
self.assertEqual(post_bundled, post_load)
diff --git a/test/jit/test_batch_mm.py b/test/jit/test_batch_mm.py
index 517a05e132..5605531b47 100644
--- a/test/jit/test_batch_mm.py
+++ b/test/jit/test_batch_mm.py
@@ -56,7 +56,6 @@ class TestBatchMM(JitTestCase):
actual = test_batch_mm_scripted(*tensors)
self.assertEqual(expected, actual, atol=1e-9, rtol=1e-9)
-
def test_batch_mm_permitted_mutation(self):
def test_batch_mm(
T1: torch.Tensor,
diff --git a/test/jit/test_builtins.py b/test/jit/test_builtins.py
index f68642ac3b..887ab39d0f 100644
--- a/test/jit/test_builtins.py
+++ b/test/jit/test_builtins.py
@@ -1,8 +1,8 @@
# Owner(s): ["oncall: jit"]
+import inspect
import os
import sys
-import inspect
import unittest
from typing import Dict, List
@@ -14,10 +14,12 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
class TestBuiltins(JitTestCase):
@@ -86,24 +88,27 @@ class TestBuiltins(JitTestCase):
self.checkScript(fn, ([1, 2, 3],))
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
+
@torch.jit.script
def fn(x):
- a = x ** 2
+ a = x**2
del a
return a # noqa: F821
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
+
@torch.jit.script
def fn(x):
- a = x ** 2
+ a = x**2
if a:
del a
return a
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "b"):
+
@torch.jit.script
def fn(x):
- a = x ** 2
+ a = x**2
del b # noqa: F821
return a
@@ -124,7 +129,7 @@ class TestBuiltins(JitTestCase):
self.assertEqual(py_out, jit_out)
def del_dict_multiple_operands(x: Dict[str, int]) -> Dict[str, int]:
- del x['hi'], x['there']
+ del x["hi"], x["there"]
return x
py_out = del_dict_multiple_operands({"hi": 5, "there": 6})
@@ -137,7 +142,7 @@ class TestTensorBuiltins(JitTestCase):
def should_keep(tensor, name):
if inspect.isroutine(getattr(tensor, name)):
return False
- if name.startswith('_'):
+ if name.startswith("_"):
return False
return True
@@ -145,8 +150,8 @@ class TestTensorBuiltins(JitTestCase):
keys = dir(tensor)
# real and imag are only implemented for complex tensors.
- self.assertRaises(RuntimeError, lambda: should_keep(tensor, 'imag'))
- keys.remove('imag')
+ self.assertRaises(RuntimeError, lambda: should_keep(tensor, "imag"))
+ keys.remove("imag")
properties = [p for p in keys if should_keep(tensor, p)]
@@ -158,16 +163,16 @@ class TestTensorBuiltins(JitTestCase):
EQUALITY_MISMATCH = {
# TorchScript doesn't have real enums so they return an int instead
# of the actual value
- 'dtype',
- 'layout',
+ "dtype",
+ "layout",
}
MISSING_PROPERTIES = {
- 'grad_fn',
+ "grad_fn",
# This is an undocumented property so it's not included
"output_nr",
# This has a longer implementation, maybe not worth copying to
# TorchScript if named tensors don't work there anyways
- 'names',
+ "names",
}
for p in properties:
@@ -232,7 +237,8 @@ class TestTensorBuiltins(JitTestCase):
def func():
c = 1
return c.add(1)
- with self.assertRaisesRegex(RuntimeError, 'object has no attribute or method'):
+
+ with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
torch.jit.script(func)
# testing implicit conversion of tensors to scalars to match function arguments
@@ -265,10 +271,12 @@ class TestTensorBuiltins(JitTestCase):
x = torch.zeros(10)
# float tensor, float tensor with grad, int tensor (can't set grad on int tensor)
- tensors = [torch.tensor(1.1),
- torch.tensor(1.1, requires_grad=True),
- torch.tensor(0),
- torch.tensor([2])]
+ tensors = [
+ torch.tensor(1.1),
+ torch.tensor(1.1, requires_grad=True),
+ torch.tensor(0),
+ torch.tensor([2]),
+ ]
script_funs = [tensor_to_int_script, tensor_to_float_script]
funs = [tensor_to_int, tensor_to_float]
@@ -286,4 +294,6 @@ class TestTensorBuiltins(JitTestCase):
# assert result or exception equal for each (function, inputs)
for tensor in tensors:
for i in range(len(script_funs)):
- self.assertEqual(test_func(script_funs[i], x, tensor), test_func(funs[i], x, tensor))
+ self.assertEqual(
+ test_func(script_funs[i], x, tensor), test_func(funs[i], x, tensor)
+ )
diff --git a/test/jit/test_class_type.py b/test/jit/test_class_type.py
index 80829795d0..0aa3087471 100644
--- a/test/jit/test_class_type.py
+++ b/test/jit/test_class_type.py
@@ -4,24 +4,28 @@ import io
import os
import sys
import unittest
+from typing import Any
import torch
import torch.nn as nn
from torch.testing import FileCheck
-from typing import Any
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, make_global
+from typing import Dict, Iterable, List, Optional, Tuple
+
import torch.testing._internal.jit_utils
from torch.testing._internal.common_utils import IS_SANDCASTLE, skipIfTorchDynamo
-from typing import List, Tuple, Iterable, Optional, Dict
+from torch.testing._internal.jit_utils import JitTestCase, make_global
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestClassType(JitTestCase):
def test_reference_semantics(self):
@@ -29,6 +33,7 @@ class TestClassType(JitTestCase):
Test that modifications made to a class instance in TorchScript
are visible in eager.
"""
+
class Foo:
def __init__(self, a: int):
self.a = a
@@ -92,12 +97,12 @@ class TestClassType(JitTestCase):
pass
def __contains__(self, key: str) -> bool:
- return key == 'hi'
+ return key == "hi"
@torch.jit.script
def fn():
foo = FooTest()
- return 'hi' in foo, 'no' in foo
+ return "hi" in foo, "no" in foo
self.assertEqual(fn(), (True, False))
@@ -118,7 +123,10 @@ class TestClassType(JitTestCase):
self.assertEqual(fn(1), 3)
def test_set_attr_type_mismatch(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Wrong type for attribute assignment", "self.foo = 10"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Wrong type for attribute assignment", "self.foo = 10"
+ ):
+
@torch.jit.script
class FooTest:
def __init__(self, x):
@@ -126,7 +134,10 @@ class TestClassType(JitTestCase):
self.foo = 10 # should error since int != Tensor
def test_get_attr_not_initialized(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "self.asdf"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "object has no attribute or method", "self.asdf"
+ ):
+
@torch.jit.script
class FooTest:
def __init__(self, x):
@@ -136,7 +147,10 @@ class TestClassType(JitTestCase):
return self.asdf # asdf isn't an attr
def test_set_attr_non_initialized(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Tried to set nonexistent attribute", "self.bar = y"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set nonexistent attribute", "self.bar = y"
+ ):
+
@torch.jit.script
class FooTest:
def __init__(self, x):
@@ -153,12 +167,16 @@ class TestClassType(JitTestCase):
Expected a value of type 'Optional[int]' for argument 'size' but instead found type 'Tensor'.
"""
with self.assertRaisesRegexWithHighlight(RuntimeError, "nearest", ""):
+
@torch.jit.script
def FooTest(x):
- return torch.nn.functional.interpolate(x, 'bad')
+ return torch.nn.functional.interpolate(x, "bad")
def test_type_annotations(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Expected a value of type \'bool", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Expected a value of type 'bool", ""
+ ):
+
@torch.jit.script # noqa: B903
class FooTest: # noqa: B903
def __init__(self, x: bool) -> None:
@@ -171,7 +189,10 @@ class TestClassType(JitTestCase):
fn(2)
def test_conditional_set_attr(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "assignment cannot be in a control-flow block", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "assignment cannot be in a control-flow block", ""
+ ):
+
@torch.jit.script
class FooTest:
def __init__(self, x):
@@ -236,7 +257,6 @@ class TestClassType(JitTestCase):
# classes are globally registered for now, so we need to clear the JIT
# registry to simulate loading a new model
-
buffer.seek(0)
m_loaded = torch.jit.load(buffer)
@@ -320,7 +340,7 @@ class TestClassType(JitTestCase):
self.x = x
self.y = y
- make_global(Foo) # see [local resolution in python]
+ make_global(Foo) # see [local resolution in python]
@torch.jit.script
def use_foo(foo: Foo) -> Foo:
@@ -419,15 +439,22 @@ class TestClassType(JitTestCase):
self.assertEqual(test_nested_inside_tuple(), [(1, 11), (1, 12)])
- with self.assertRaisesRegexWithHighlight(RuntimeError, "bool\' for argument \'reverse", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "bool' for argument 'reverse", ""
+ ):
+
@torch.jit.script
def test():
li = [Foo(1)]
li.sort(li)
return li
+
test()
- with self.assertRaisesRegexWithHighlight(RuntimeError, "must define a __lt__", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "must define a __lt__", ""
+ ):
+
@torch.jit.script
class NoMethod:
def __init__(self):
@@ -438,6 +465,7 @@ class TestClassType(JitTestCase):
li = [NoMethod(), NoMethod()]
li.sort()
return li
+
test()
@torch.jit.script
@@ -449,12 +477,16 @@ class TestClassType(JitTestCase):
def __lt__(self, other):
pass
- with self.assertRaisesRegexWithHighlight(RuntimeError, "must define a __lt__", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "must define a __lt__", ""
+ ):
+
@torch.jit.script
def test():
li = [WrongLt(), WrongLt()]
li.sort()
return li
+
test()
def test_class_inheritance(self):
@@ -466,18 +498,21 @@ class TestClassType(JitTestCase):
def two(self, x):
return x + self.b
- with self.assertRaisesRegexWithHighlight(RuntimeError, "does not support inheritance", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "does not support inheritance", ""
+ ):
+
@torch.jit.script
class Derived(Base):
def two(self, x):
return x + self.b + 2
-
def test_class_inheritance_implicit(self):
"""
Test that inheritance is detected in
implicit scripting codepaths (e.g. try_ann_to_type).
"""
+
class A:
def __init__(self, t):
self.t = t
@@ -502,14 +537,16 @@ class TestClassType(JitTestCase):
else:
return B.f(x.t)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "object has no attribute or method", ""
+ ):
sc = torch.jit.script(fun)
@skipIfTorchDynamo("Test does not work with TorchDynamo")
@unittest.skipIf(IS_SANDCASTLE, "Importing like this doesn't work in fbcode")
def test_imported_classes(self):
- import jit._imported_class_test.foo
import jit._imported_class_test.bar
+ import jit._imported_class_test.foo
import jit._imported_class_test.very.very.nested
class MyMod(torch.jit.ScriptModule):
@@ -593,6 +630,7 @@ class TestClassType(JitTestCase):
def one(self, x, y):
return x + y
+
# missing two
@torch.jit.script
@@ -616,6 +654,7 @@ class TestClassType(JitTestCase):
x = c[i].one(x, x)
x = c[i].two(x)
return x
+
self.checkScript(use_them, (torch.rand(3, 4),))
@torch.jit.script
@@ -626,22 +665,33 @@ class TestClassType(JitTestCase):
def inherit(x: OneTwoThree) -> OneTwo:
return as_interface(x)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "does not have method", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "does not have method", ""
+ ):
+
@torch.jit.script
def wrong1():
return as_interface(NotMember())
- with self.assertRaisesRegexWithHighlight(RuntimeError, "is not compatible with interface", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "is not compatible with interface", ""
+ ):
+
@torch.jit.script
def wrong2():
return as_interface(NotMember2())
- with self.assertRaisesRegexWithHighlight(RuntimeError, "does not have method", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "does not have method", ""
+ ):
+
@torch.jit.script
def wrong3():
return inherit(as_interface(Foo()))
- with self.assertRaisesRegexWithHighlight(RuntimeError, "is not compatible with interface", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "is not compatible with interface", ""
+ ):
@torch.jit.script
def wrong4(x: OneTwoWrong) -> int:
@@ -656,7 +706,7 @@ class TestClassType(JitTestCase):
def forward(self, x):
return self.proxy_mod.two(x)
- TestPyAssign.__annotations__ = {'proxy_mod': OneTwo}
+ TestPyAssign.__annotations__ = {"proxy_mod": OneTwo}
input = torch.rand(3, 4)
scripted_pyassign_mod = torch.jit.script(TestPyAssign())
@@ -671,10 +721,11 @@ class TestClassType(JitTestCase):
def forward(self, x):
return self.proxy_mod.two(x)
- TestPyAssignError.__annotations__ = {'proxy_mod': OneTwoThree}
+ TestPyAssignError.__annotations__ = {"proxy_mod": OneTwoThree}
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "is not compatible with interface __torch__", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "is not compatible with interface __torch__", ""
+ ):
torch.jit.script(TestPyAssignError(Foo()))
# test pure python object assignment to interface fails
@@ -682,8 +733,9 @@ class TestClassType(JitTestCase):
def __init__(self):
pass
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "the value is not a TorchScript compatible type", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "the value is not a TorchScript compatible type", ""
+ ):
torch.jit.script(TestPyAssignError(PyClass()))
# TODO test: interface-interface class-interface inheritance errors,
# NamedTuple inheritance errors
@@ -729,7 +781,7 @@ class TestClassType(JitTestCase):
return self.x * other
def __pow__(self, other: int) -> int:
- return int(self.x ** other)
+ return int(self.x**other)
def __truediv__(self, other: int) -> float:
return self.x / other
@@ -773,54 +825,89 @@ class TestClassType(JitTestCase):
def __call__(self, val: int) -> int:
return self.x * val * 3
-
make_global(Foo) # see [local resolution in python]
def add():
return MyClass(4) + 3
+
def sub(): # noqa: E306
return MyClass(4) - 3
+
def mul(): # noqa: E306
return MyClass(4) * 3
+
def pow(): # noqa: E306
return MyClass(4) ** 3
+
def truediv(): # noqa: E306
return MyClass(4) / 3
+
def ne(): # noqa: E306
return MyClass(4) != 3
+
def eq(): # noqa: E306
return MyClass(4) == 3
+
def lt(): # noqa: E306
return MyClass(4) < 3
+
def gt(): # noqa: E306
return MyClass(4) > 3
+
def le(): # noqa: E306
return MyClass(4) <= 3
+
def ge(): # noqa: E306
return MyClass(4) >= 3
+
def _and(): # noqa: E306
return MyClass(4) & 3
+
def _or(): # noqa: E306
return MyClass(4) | 3
+
def _xor(): # noqa: E306
return MyClass(4) ^ 3
+
def getitem(): # noqa: E306
return MyClass(4)[1]
+
def setitem(): # noqa: E306
a = MyClass(4)
a[1] = 5
return a.x
+
def call(): # noqa: E306
a = MyClass(5)
return a(2)
- ops = [add, sub, mul, pow, ne, eq, lt, gt, le, ge, _and, _or, _xor, getitem, setitem, call]
+ ops = [
+ add,
+ sub,
+ mul,
+ pow,
+ ne,
+ eq,
+ lt,
+ gt,
+ le,
+ ge,
+ _and,
+ _or,
+ _xor,
+ getitem,
+ setitem,
+ call,
+ ]
ops.append(truediv)
for func in ops:
self.checkScript(func, ())
- with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "object has no attribute or method", ""
+ ):
+
@torch.jit.script
def test():
return Foo(torch.tensor(1)) + Foo(torch.tensor(1))
@@ -852,7 +939,7 @@ class TestClassType(JitTestCase):
fn = torch.jit.script(test)
self.assertEqual(fn(Foo(0.5)), test(0.5))
- self.assertEqual(fn(Foo(0.)), test(0.0))
+ self.assertEqual(fn(Foo(0.0)), test(0.0))
# str has slightly different formatting
self.assertTrue("0.5" in (str(Foo(0.5))))
self.assertTrue("0." in (str(Foo(0.0))))
@@ -865,7 +952,10 @@ class TestClassType(JitTestCase):
def __bool__(self):
return (1, 2)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "expected a bool expression for condition", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "expected a bool expression for condition", ""
+ ):
+
@torch.jit.script
def test():
if BadBool():
@@ -921,6 +1011,7 @@ class TestClassType(JitTestCase):
Recursive class types not yet supported. We should give a good error message.
"""
with self.assertRaises(RuntimeError):
+
@torch.jit.script # noqa: B903
class Tree: # noqa: B903
def __init__(self):
@@ -940,7 +1031,7 @@ class TestClassType(JitTestCase):
return x, y
# Test serialization/deserialization of class constant
- for c in (2, 1.0, None, True, 'str', (2, 3), [5.9, 7.3]):
+ for c in (2, 1.0, None, True, "str", (2, 3), [5.9, 7.3]):
m = torch.jit.script(M(c))
buffer = io.BytesIO()
torch.jit.save(m, buffer)
@@ -954,28 +1045,31 @@ class TestClassType(JitTestCase):
def test_py_class_to_ivalue_missing_attribute(self):
class Foo:
- i : int
- f : float
+ i: int
+ f: float
- def __init__(self, i : int, f : float):
+ def __init__(self, i: int, f: float):
self.i = i
self.f = f
make_global(Foo) # see [local resolution in python]
@torch.jit.script
- def test_fn(x : Foo) -> float:
+ def test_fn(x: Foo) -> float:
return x.i + x.f
test_fn(Foo(3, 4.0))
- with self.assertRaisesRegexWithHighlight(RuntimeError, 'missing attribute i', ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "missing attribute i", ""
+ ):
test_fn(torch.rand(3, 4))
def test_unused_method(self):
"""
Test unused methods on scripted classes.
"""
+
@torch.jit.script
class Unused:
def __init__(self):
@@ -1028,12 +1122,13 @@ class TestClassType(JitTestCase):
Test that a scripted class can have a method that refers to the class itself
in its type annotations.
"""
+
@torch.jit.script
class Meta:
def __init__(self, a: int):
self.a = a
- def method(self, other: List['Meta']) -> 'Meta':
+ def method(self, other: List["Meta"]) -> "Meta":
return Meta(len(other))
class ModuleWithMeta(torch.nn.Module):
@@ -1051,19 +1146,20 @@ class TestClassType(JitTestCase):
"""
Test that annotating container attributes with types works correctly
"""
+
@torch.jit.script
class CompetitiveLinkingTokenReplacementUtils:
def __init__(self):
- self.my_list : List[Tuple[float, int, int]] = []
- self.my_dict : Dict[int, int] = {}
+ self.my_list: List[Tuple[float, int, int]] = []
+ self.my_dict: Dict[int, int] = {}
@torch.jit.script
def foo():
y = CompetitiveLinkingTokenReplacementUtils()
- new_dict : Dict[int, int] = {1: 1, 2: 2}
+ new_dict: Dict[int, int] = {1: 1, 2: 2}
y.my_dict = new_dict
- new_list : List[Tuple[float, int, int]] = [(1.0, 1, 1)]
+ new_list: List[Tuple[float, int, int]] = [(1.0, 1, 1)]
y.my_list = new_list
return y
@@ -1071,6 +1167,7 @@ class TestClassType(JitTestCase):
"""
Test that methods on class types can have default arguments.
"""
+
@torch.jit.script
class ClassWithDefaultArgs:
def __init__(
@@ -1105,7 +1202,9 @@ class TestClassType(JitTestCase):
return obj.int + obj.list[2] + obj.dict[1]
def override_defaults() -> int:
- obj: ClassWithDefaultArgs = ClassWithDefaultArgs(3, [9, 10, 11], (12, 13, 14), {3: 4}, "str")
+ obj: ClassWithDefaultArgs = ClassWithDefaultArgs(
+ 3, [9, 10, 11], (12, 13, 14), {3: 4}, "str"
+ )
s: int = obj.int
for x in obj.list:
@@ -1154,7 +1253,7 @@ class TestClassType(JitTestCase):
# The constructor of this class below has mutable arguments. This should throw
# an error.
- class ClassWithMutableArgs: # noqa: B903
+ class ClassWithMutableArgs: # noqa: B903
def __init__(
self,
a: List[int] = [1, 2, 3], # noqa: B006
@@ -1164,13 +1263,16 @@ class TestClassType(JitTestCase):
def should_fail():
obj: ClassWithMutableArgs = ClassWithMutableArgs()
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Mutable default parameters are not supported", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Mutable default parameters are not supported", ""
+ ):
torch.jit.script(should_fail)
def test_staticmethod(self):
"""
Test static methods on class types.
"""
+
@torch.jit.script
class ClassWithStaticMethod:
def __init__(self, a: int, b: int):
@@ -1183,22 +1285,22 @@ class TestClassType(JitTestCase):
def get_b(self):
return self.b
- def __eq__(self, other: 'ClassWithStaticMethod'):
+ def __eq__(self, other: "ClassWithStaticMethod"):
return self.a == other.a and self.b == other.b
# staticmethod that calls constructor.
@staticmethod
- def create(args: List['ClassWithStaticMethod']) -> 'ClassWithStaticMethod':
+ def create(args: List["ClassWithStaticMethod"]) -> "ClassWithStaticMethod":
return ClassWithStaticMethod(args[0].a, args[0].b)
# staticmethod that calls another staticmethod.
@staticmethod
- def create_from(a: int, b: int) -> 'ClassWithStaticMethod':
+ def create_from(a: int, b: int) -> "ClassWithStaticMethod":
a = ClassWithStaticMethod(a, b)
return ClassWithStaticMethod.create([a])
# Script function that calls staticmethod.
- def test_function(a: int, b: int) -> 'ClassWithStaticMethod':
+ def test_function(a: int, b: int) -> "ClassWithStaticMethod":
return ClassWithStaticMethod.create_from(a, b)
make_global(ClassWithStaticMethod)
@@ -1209,21 +1311,22 @@ class TestClassType(JitTestCase):
"""
Test classmethods on class types.
"""
+
@torch.jit.script
class ClassWithClassMethod:
def __init__(self, a: int):
self.a: int = a
- def __eq__(self, other: 'ClassWithClassMethod'):
+ def __eq__(self, other: "ClassWithClassMethod"):
return self.a == other.a
@classmethod
- def create(cls, a: int) -> 'ClassWithClassMethod':
+ def create(cls, a: int) -> "ClassWithClassMethod":
return cls(a)
make_global(ClassWithClassMethod)
- def test_function(a: int) -> 'ClassWithClassMethod':
+ def test_function(a: int) -> "ClassWithClassMethod":
x = ClassWithClassMethod(a)
# Support calling classmethod with an instance
# Calling with the class is not supported.
@@ -1236,6 +1339,7 @@ class TestClassType(JitTestCase):
"""
Test that a scripted class can make use of the @property decorator.
"""
+
def free_function(x: int) -> int:
return x + 1
@@ -1308,13 +1412,22 @@ class TestClassType(JitTestCase):
return self.props.attr + no_setter.attr + method_uses_property.forward()
- self.checkModule(ModuleWithProperties(5), (5, 6, 7, 8,))
+ self.checkModule(
+ ModuleWithProperties(5),
+ (
+ 5,
+ 6,
+ 7,
+ 8,
+ ),
+ )
def test_custom_delete(self):
"""
Test that del can be called on an instance of a class that
overrides __delitem__.
"""
+
class Example:
def __init__(self):
self._data: Dict[str, torch.Tensor] = {"1": torch.tensor(1.0)}
@@ -1346,7 +1459,9 @@ class TestClassType(JitTestCase):
del example[key]
return example.check(key)
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"Class does not define __delitem__", "example[key]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"Class does not define __delitem__", "example[key]"
+ ):
self.checkScript(fn, ())
def test_recursive_script_builtin_type_resolution(self):
@@ -1369,7 +1484,7 @@ class TestClassType(JitTestCase):
def g(self, x: device_t) -> device_ty:
return x
- def h(self, a: 'A') -> 'A':
+ def h(self, a: "A") -> "A":
return A()
def i(self, a: List[int]) -> int:
@@ -1404,14 +1519,14 @@ class TestClassType(JitTestCase):
Test resolution of built-in torch types(e.g. torch.Tensor, torch.device) when a class is recursively compiled
when compiling a module.
"""
- class Wrapper():
+
+ class Wrapper:
def __init__(self, t):
self.t = t
def to(self, l: List[torch.device], device: Optional[torch.device] = None):
return self.t.to(device=device)
-
class A(nn.Module):
def forward(self):
return Wrapper(torch.rand(4, 4))
@@ -1424,6 +1539,7 @@ class TestClassType(JitTestCase):
Test that the error message displayed when convering a class type
to an IValue that has an attribute of the wrong type.
"""
+
@torch.jit.script # noqa: B903
class ValHolder: # noqa: B903
def __init__(self, val):
@@ -1442,7 +1558,9 @@ class TestClassType(JitTestCase):
mod = self.mod2
return mod.val
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Could not cast attribute 'val' to type Tensor", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Could not cast attribute 'val' to type Tensor", ""
+ ):
torch.jit.script(Mod())
def test_recursive_scripting(self):
@@ -1450,6 +1568,7 @@ class TestClassType(JitTestCase):
Test that class types are recursively scripted when an Python instance of one
is encountered as a module attribute.
"""
+
class Class:
def __init__(self, a: int):
self.a = a
@@ -1473,6 +1592,7 @@ class TestClassType(JitTestCase):
are added as failed attributes and do not cause compilation itself
to fail unless they are used in scripted code.
"""
+
class UnscriptableClass:
def __init__(self, a: int):
self.a = a
@@ -1490,7 +1610,9 @@ class TestClassType(JitTestCase):
def forward(self) -> bool:
return self.obj.get_a()
- with self.assertRaisesRegexWithHighlight(RuntimeError, "failed to convert Python type", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "failed to convert Python type", ""
+ ):
torch.jit.script(ShouldNotCompile(UnscriptableClass(4)))
# This Module has an attribute of type UnscriptableClass
@@ -1509,7 +1631,6 @@ class TestClassType(JitTestCase):
self.checkModule(ShouldCompile(UnscriptableClass(4)), (4,))
-
def test_unresolved_class_attributes(self):
class UnresolvedAttrClass:
def __init__(self):
@@ -1538,7 +1659,9 @@ class TestClassType(JitTestCase):
u = UnresolvedAttrClass()
return u.attr_e
- error_message_regex = "object has no attribute or method.*is defined as a class attribute"
+ error_message_regex = (
+ "object has no attribute or method.*is defined as a class attribute"
+ )
for fn in (fn_a, fn_b, fn_c, fn_d, fn_e):
with self.assertRaisesRegex(RuntimeError, error_message_regex):
torch.jit.script(fn)
diff --git a/test/jit/test_complex.py b/test/jit/test_complex.py
index 3b7d344271..de09dd0a7c 100644
--- a/test/jit/test_complex.py
+++ b/test/jit/test_complex.py
@@ -1,19 +1,21 @@
# Owner(s): ["oncall: jit"]
-import torch
+import cmath
import os
import sys
-from torch.testing._internal.jit_utils import JitTestCase, execWrapper
-from torch.testing._internal.common_utils import IS_MACOS
-from typing import List, Dict
from itertools import product
from textwrap import dedent
-import cmath
+from typing import Dict, List
+
+import torch
+from torch.testing._internal.common_utils import IS_MACOS
+from torch.testing._internal.jit_utils import execWrapper, JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
+
class TestComplex(JitTestCase):
def test_script(self):
def fn(a: complex):
@@ -32,7 +34,7 @@ class TestComplex(JitTestCase):
def fn(a: Dict[complex, complex], key: complex) -> complex:
return a[key]
- input = {2 + 3j : 2 - 3j, -4.3 - 2j: 3j}
+ input = {2 + 3j: 2 - 3j, -4.3 - 2j: 3j}
self.checkScript(fn, (input, -4.3 - 2j))
def test_pickle(self):
@@ -41,7 +43,7 @@ class TestComplex(JitTestCase):
super().__init__()
self.a = 3 + 5j
self.b = [2 + 3j, 3 + 4j, 0 - 3j, -4 + 0j]
- self.c = {2 + 3j : 2 - 3j, -4.3 - 2j: 3j}
+ self.c = {2 + 3j: 2 - 3j, -4.3 - 2j: 3j}
@torch.jit.script_method
def forward(self, b: int):
@@ -50,7 +52,7 @@ class TestComplex(JitTestCase):
loaded = self.getExportImportCopy(ComplexModule())
self.assertEqual(loaded.a, 3 + 5j)
self.assertEqual(loaded.b, [2 + 3j, 3 + 4j, -3j, -4])
- self.assertEqual(loaded.c, {2 + 3j : 2 - 3j, -4.3 - 2j: 3j})
+ self.assertEqual(loaded.c, {2 + 3j: 2 - 3j, -4.3 - 2j: 3j})
self.assertEqual(loaded(2), 2 + 2j)
def test_complex_parse(self):
@@ -65,14 +67,19 @@ class TestComplex(JitTestCase):
self.checkScript(fn, (t1, t2, 2))
def test_complex_constants_and_ops(self):
- vals = ([0.0, 1.0, 2.2, -1.0, -0.0, -2.2, 1, 0, 2]
- + [10.0 ** i for i in range(2)] + [-(10.0 ** i) for i in range(2)])
+ vals = (
+ [0.0, 1.0, 2.2, -1.0, -0.0, -2.2, 1, 0, 2]
+ + [10.0**i for i in range(2)]
+ + [-(10.0**i) for i in range(2)]
+ )
complex_vals = tuple(complex(x, y) for x, y in product(vals, vals))
- funcs_template = dedent('''
+ funcs_template = dedent(
+ """
def func(a: complex):
return cmath.{func_or_const}(a)
- ''')
+ """
+ )
def checkCmath(func_name, funcs_template=funcs_template):
funcs_str = funcs_template.format(func_or_const=func_name)
@@ -80,11 +87,13 @@ class TestComplex(JitTestCase):
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.func
- f = scope['func']
+ f = scope["func"]
- if func_name in ['isinf', 'isnan', 'isfinite']:
- new_vals = vals + ([float('inf'), float('nan'), -1 * float('inf')])
- final_vals = tuple(complex(x, y) for x, y in product(new_vals, new_vals))
+ if func_name in ["isinf", "isnan", "isfinite"]:
+ new_vals = vals + ([float("inf"), float("nan"), -1 * float("inf")])
+ final_vals = tuple(
+ complex(x, y) for x, y in product(new_vals, new_vals)
+ )
else:
final_vals = complex_vals
@@ -107,8 +116,27 @@ class TestComplex(JitTestCase):
msg = f"Failed on {func_name} with input {a}. Python: {res_python}, Script: {res_script}"
self.assertEqual(res_python, res_script, msg=msg)
- unary_ops = ['log', 'log10', 'sqrt', 'exp', 'sin', 'cos', 'asin', 'acos', 'atan', 'sinh', 'cosh',
- 'tanh', 'asinh', 'acosh', 'atanh', 'phase', 'isinf', 'isnan', 'isfinite']
+ unary_ops = [
+ "log",
+ "log10",
+ "sqrt",
+ "exp",
+ "sin",
+ "cos",
+ "asin",
+ "acos",
+ "atan",
+ "sinh",
+ "cosh",
+ "tanh",
+ "asinh",
+ "acosh",
+ "atanh",
+ "phase",
+ "isinf",
+ "isnan",
+ "isfinite",
+ ]
# --- Unary ops ---
for op in unary_ops:
@@ -118,7 +146,7 @@ class TestComplex(JitTestCase):
return abs(x)
for val in complex_vals:
- self.checkScript(fn, (val, ))
+ self.checkScript(fn, (val,))
def pow_complex_float(x: complex, y: float):
return pow(x, y)
@@ -126,7 +154,6 @@ class TestComplex(JitTestCase):
def pow_float_complex(x: float, y: complex):
return pow(x, y)
-
self.checkScript(pow_float_complex, (2, 3j))
self.checkScript(pow_complex_float, (3j, 2))
@@ -135,7 +162,7 @@ class TestComplex(JitTestCase):
for x, y in zip(complex_vals, complex_vals):
# Reference: https://github.com/pytorch/pytorch/issues/54622
- if (x == 0):
+ if x == 0:
continue
self.checkScript(pow_complex_complex, (x, y))
@@ -143,16 +170,25 @@ class TestComplex(JitTestCase):
# --- Binary op ---
def rect_fn(x: float, y: float):
return cmath.rect(x, y)
- for x, y in product(vals, vals):
- self.checkScript(rect_fn, (x, y, ))
- func_constants_template = dedent('''
+ for x, y in product(vals, vals):
+ self.checkScript(
+ rect_fn,
+ (
+ x,
+ y,
+ ),
+ )
+
+ func_constants_template = dedent(
+ """
def func():
return cmath.{func_or_const}
- ''')
- float_consts = ['pi', 'e', 'tau', 'inf', 'nan']
- complex_consts = ['infj', 'nanj']
- for x in (float_consts + complex_consts):
+ """
+ )
+ float_consts = ["pi", "e", "tau", "inf", "nan"]
+ complex_consts = ["infj", "nanj"]
+ for x in float_consts + complex_consts:
checkCmath(x, funcs_template=func_constants_template)
def test_infj_nanj_pickle(self):
@@ -177,77 +213,293 @@ class TestComplex(JitTestCase):
def fn_int(real: int, img: int):
return complex(real, img)
- self.checkScript(fn_int, (0, 0, ))
- self.checkScript(fn_int, (-1234, 0, ))
- self.checkScript(fn_int, (0, -1256, ))
- self.checkScript(fn_int, (-167, -1256, ))
+ self.checkScript(
+ fn_int,
+ (
+ 0,
+ 0,
+ ),
+ )
+ self.checkScript(
+ fn_int,
+ (
+ -1234,
+ 0,
+ ),
+ )
+ self.checkScript(
+ fn_int,
+ (
+ 0,
+ -1256,
+ ),
+ )
+ self.checkScript(
+ fn_int,
+ (
+ -167,
+ -1256,
+ ),
+ )
def fn_float(real: float, img: float):
return complex(real, img)
- self.checkScript(fn_float, (0.0, 0.0, ))
- self.checkScript(fn_float, (-1234.78, 0, ))
- self.checkScript(fn_float, (0, 56.18, ))
- self.checkScript(fn_float, (-1.9, -19.8, ))
+ self.checkScript(
+ fn_float,
+ (
+ 0.0,
+ 0.0,
+ ),
+ )
+ self.checkScript(
+ fn_float,
+ (
+ -1234.78,
+ 0,
+ ),
+ )
+ self.checkScript(
+ fn_float,
+ (
+ 0,
+ 56.18,
+ ),
+ )
+ self.checkScript(
+ fn_float,
+ (
+ -1.9,
+ -19.8,
+ ),
+ )
def fn_bool(real: bool, img: bool):
return complex(real, img)
- self.checkScript(fn_bool, (True, True, ))
- self.checkScript(fn_bool, (False, False, ))
- self.checkScript(fn_bool, (False, True, ))
- self.checkScript(fn_bool, (True, False, ))
+ self.checkScript(
+ fn_bool,
+ (
+ True,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_bool,
+ (
+ False,
+ False,
+ ),
+ )
+ self.checkScript(
+ fn_bool,
+ (
+ False,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_bool,
+ (
+ True,
+ False,
+ ),
+ )
def fn_bool_int(real: bool, img: int):
return complex(real, img)
- self.checkScript(fn_bool_int, (True, 0, ))
- self.checkScript(fn_bool_int, (False, 0, ))
- self.checkScript(fn_bool_int, (False, -1, ))
- self.checkScript(fn_bool_int, (True, 3, ))
+ self.checkScript(
+ fn_bool_int,
+ (
+ True,
+ 0,
+ ),
+ )
+ self.checkScript(
+ fn_bool_int,
+ (
+ False,
+ 0,
+ ),
+ )
+ self.checkScript(
+ fn_bool_int,
+ (
+ False,
+ -1,
+ ),
+ )
+ self.checkScript(
+ fn_bool_int,
+ (
+ True,
+ 3,
+ ),
+ )
def fn_int_bool(real: int, img: bool):
return complex(real, img)
- self.checkScript(fn_int_bool, (0, True, ))
- self.checkScript(fn_int_bool, (0, False, ))
- self.checkScript(fn_int_bool, (-3, True, ))
- self.checkScript(fn_int_bool, (6, False, ))
+ self.checkScript(
+ fn_int_bool,
+ (
+ 0,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_int_bool,
+ (
+ 0,
+ False,
+ ),
+ )
+ self.checkScript(
+ fn_int_bool,
+ (
+ -3,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_int_bool,
+ (
+ 6,
+ False,
+ ),
+ )
def fn_bool_float(real: bool, img: float):
return complex(real, img)
- self.checkScript(fn_bool_float, (True, 0.0, ))
- self.checkScript(fn_bool_float, (False, 0.0, ))
- self.checkScript(fn_bool_float, (False, -1.0, ))
- self.checkScript(fn_bool_float, (True, 3.0, ))
+ self.checkScript(
+ fn_bool_float,
+ (
+ True,
+ 0.0,
+ ),
+ )
+ self.checkScript(
+ fn_bool_float,
+ (
+ False,
+ 0.0,
+ ),
+ )
+ self.checkScript(
+ fn_bool_float,
+ (
+ False,
+ -1.0,
+ ),
+ )
+ self.checkScript(
+ fn_bool_float,
+ (
+ True,
+ 3.0,
+ ),
+ )
def fn_float_bool(real: float, img: bool):
return complex(real, img)
- self.checkScript(fn_float_bool, (0.0, True, ))
- self.checkScript(fn_float_bool, (0.0, False, ))
- self.checkScript(fn_float_bool, (-3.0, True, ))
- self.checkScript(fn_float_bool, (6.0, False, ))
+ self.checkScript(
+ fn_float_bool,
+ (
+ 0.0,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_float_bool,
+ (
+ 0.0,
+ False,
+ ),
+ )
+ self.checkScript(
+ fn_float_bool,
+ (
+ -3.0,
+ True,
+ ),
+ )
+ self.checkScript(
+ fn_float_bool,
+ (
+ 6.0,
+ False,
+ ),
+ )
def fn_float_int(real: float, img: int):
return complex(real, img)
- self.checkScript(fn_float_int, (0.0, 1, ))
- self.checkScript(fn_float_int, (0.0, -1, ))
- self.checkScript(fn_float_int, (1.8, -3, ))
- self.checkScript(fn_float_int, (2.7, 8, ))
+ self.checkScript(
+ fn_float_int,
+ (
+ 0.0,
+ 1,
+ ),
+ )
+ self.checkScript(
+ fn_float_int,
+ (
+ 0.0,
+ -1,
+ ),
+ )
+ self.checkScript(
+ fn_float_int,
+ (
+ 1.8,
+ -3,
+ ),
+ )
+ self.checkScript(
+ fn_float_int,
+ (
+ 2.7,
+ 8,
+ ),
+ )
def fn_int_float(real: int, img: float):
return complex(real, img)
- self.checkScript(fn_int_float, (1, 0.0, ))
- self.checkScript(fn_int_float, (-1, 1.7, ))
- self.checkScript(fn_int_float, (-3, 0.0, ))
- self.checkScript(fn_int_float, (2, -8.9, ))
+ self.checkScript(
+ fn_int_float,
+ (
+ 1,
+ 0.0,
+ ),
+ )
+ self.checkScript(
+ fn_int_float,
+ (
+ -1,
+ 1.7,
+ ),
+ )
+ self.checkScript(
+ fn_int_float,
+ (
+ -3,
+ 0.0,
+ ),
+ )
+ self.checkScript(
+ fn_int_float,
+ (
+ 2,
+ -8.9,
+ ),
+ )
def test_torch_complex_constructor_with_tensor(self):
- tensors = ([torch.rand(1), torch.randint(-5, 5, (1, )), torch.tensor([False])])
+ tensors = [torch.rand(1), torch.randint(-5, 5, (1,)), torch.tensor([False])]
def fn_tensor_float(real, img: float):
return complex(real, img)
@@ -280,7 +532,13 @@ class TestComplex(JitTestCase):
return complex(real, img) + complex(2)
for x, y in product(tensors, tensors):
- self.checkScript(fn_tensor_tensor, (x, y, ))
+ self.checkScript(
+ fn_tensor_tensor,
+ (
+ x,
+ y,
+ ),
+ )
def test_comparison_ops(self):
def fn1(a: complex, b: complex):
@@ -316,7 +574,7 @@ class TestComplex(JitTestCase):
def fn(x: List[complex]):
return sum(x)
- self.checkScript(fn, (torch.randn(4, dtype=torch.cdouble).tolist(), ))
+ self.checkScript(fn, (torch.randn(4, dtype=torch.cdouble).tolist(),))
def test_tensor_attributes(self):
def tensor_real(x):
@@ -326,8 +584,8 @@ class TestComplex(JitTestCase):
return x.imag
t = torch.randn(2, 3, dtype=torch.cdouble)
- self.checkScript(tensor_real, (t, ))
- self.checkScript(tensor_imag, (t, ))
+ self.checkScript(tensor_real, (t,))
+ self.checkScript(tensor_imag, (t,))
def test_binary_op_complex_tensor(self):
def mul(x: complex, y: torch.Tensor):
@@ -350,7 +608,7 @@ class TestComplex(JitTestCase):
ops = [mul, add, eq, ne, sub, div]
- for shape in [(1, ), (2, 2)]:
+ for shape in [(1,), (2, 2)]:
x = 0.71 + 0.71j
y = torch.randn(shape, dtype=torch.cfloat)
for op in ops:
diff --git a/test/jit/test_complexity.py b/test/jit/test_complexity.py
index ca1bf612d0..f58887b012 100644
--- a/test/jit/test_complexity.py
+++ b/test/jit/test_complexity.py
@@ -10,18 +10,29 @@ import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
-from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
- get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
-from torch.testing._internal.common_utils import run_tests, set_default_dtype, suppress_warnings, IS_FBCODE
+from torch.testing._internal.common_utils import (
+ IS_FBCODE,
+ run_tests,
+ set_default_dtype,
+ suppress_warnings,
+)
+from torch.testing._internal.jit_metaprogramming_utils import (
+ get_all_nn_module_tests,
+ get_nn_functional_compiled_fn_and_inputs,
+ get_nn_mod_test_name,
+ nn_functional_tests,
+ try_get_nn_module_compiled_mod_and_inputs,
+)
+from torch.testing._internal.jit_utils import enable_profiling_mode, JitTestCase
def num_ifs_loops(graph):
graph_str = str(graph)
# only look at body of graph
- graph_body = graph_str[0:graph_str.find("return")]
+ graph_body = graph_str[0 : graph_str.find("return")]
return graph_body.count("prim::Loop") + graph_body.count("prim::If")
+
def num_non_tensor_nodes(block):
num_non_tensor = 0
for node in block.nodes():
@@ -40,6 +51,7 @@ def num_non_tensor_nodes(block):
num_non_tensor += int(not tensor_out)
return num_non_tensor
+
class TestComplexity(JitTestCase):
def setUp(self):
super().setUp()
@@ -90,5 +102,6 @@ class TestComplexity(JitTestCase):
for line in stats:
print(line)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/jit/test_convert_activation.py b/test/jit/test_convert_activation.py
index f414459ece..6e8f4a0547 100644
--- a/test/jit/test_convert_activation.py
+++ b/test/jit/test_convert_activation.py
@@ -2,16 +2,18 @@
import os
import sys
+import unittest
from itertools import product
+
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import FileCheck
-import unittest
try:
import torchvision
+
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
@@ -22,10 +24,12 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
activations = [
F.celu,
@@ -41,6 +45,7 @@ activations = [
F.silu,
]
+
class TestFunctionalToInplaceActivation(JitTestCase):
def test_check_no_type_promotion(self):
dtypes = [
@@ -67,6 +72,7 @@ class TestFunctionalToInplaceActivation(JitTestCase):
def test_functional_to_inplace_activation(self):
for activation in activations:
+
def test_basic(x):
y = x + 1
z = activation(y)
@@ -76,7 +82,7 @@ class TestFunctionalToInplaceActivation(JitTestCase):
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}(").run(fn.graph)
- self.run_pass('functional_to_inplace_activation', fn.graph)
+ self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}(").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__}_").run(fn.graph)
inp = torch.rand([2, 2])
@@ -91,7 +97,7 @@ class TestFunctionalToInplaceActivation(JitTestCase):
return z
fn = torch.jit.script(test1)
- self.run_pass('functional_to_inplace_activation', fn.graph)
+ self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::sigmoid_").run(fn.graph)
# inplace conversion should not happen because y is alias
@@ -102,7 +108,7 @@ class TestFunctionalToInplaceActivation(JitTestCase):
return z
fn = torch.jit.script(test2)
- self.run_pass('functional_to_inplace_activation', fn.graph)
+ self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
# inplace conversion should not happen because self.x is
@@ -117,22 +123,33 @@ class TestFunctionalToInplaceActivation(JitTestCase):
return y
fn = torch.jit.script(Test3(torch.rand([2, 2])).eval())
- self.run_pass('functional_to_inplace_activation', fn.graph)
+ self.run_pass("functional_to_inplace_activation", fn.graph)
FileCheck().check_not("aten::relu_").run(fn.graph)
@skipIfNoTorchVision
def test_resnet18_correctness(self):
model = torchvision.models.resnet18()
frozen_model = torch.jit.freeze(torch.jit.script(model.eval()))
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
inp = torch.randn(N, C, H, W)
- self.run_pass('functional_to_inplace_activation', frozen_model.graph)
+ self.run_pass("functional_to_inplace_activation", frozen_model.graph)
self.assertEqual(model(inp), frozen_model(inp))
class TestInplaceToFunctionalActivation(JitTestCase):
def test_inplace_to_functional_activation(self):
for activation in activations:
+
def test_basic(x):
y = x + 1
activation(y, inplace=True)
@@ -142,7 +159,7 @@ class TestInplaceToFunctionalActivation(JitTestCase):
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}_").run(fn.graph)
- self.run_pass('inplace_to_functional_activation', fn.graph)
+ self.run_pass("inplace_to_functional_activation", fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}_").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__}(").run(fn.graph)
@@ -151,6 +168,7 @@ class TestInplaceToFunctionalActivation(JitTestCase):
torch.sigmoid_,
torch.tanh_,
]:
+
def test_basic(x):
y = x + 1
activation(y)
@@ -160,7 +178,7 @@ class TestInplaceToFunctionalActivation(JitTestCase):
self.run_pass("inline", fn.graph)
self.run_pass("constant_propagation", fn.graph)
FileCheck().check(f"aten::{activation.__name__}").run(fn.graph)
- self.run_pass('inplace_to_functional_activation', fn.graph)
+ self.run_pass("inplace_to_functional_activation", fn.graph)
FileCheck().check_not(f"aten::{activation.__name__}").run(fn.graph)
FileCheck().check(f"aten::{activation.__name__[:-1]}(").run(fn.graph)
@@ -171,7 +189,17 @@ class TestInplaceToFunctionalActivation(JitTestCase):
def test_resnet18_correctness(self):
model = torchvision.models.resnet18()
frozen_model = torch.jit.freeze(torch.jit.script(model.eval()))
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
inp = torch.randn(N, C, H, W)
- self.run_pass('inplace_to_functional_activation', frozen_model.graph)
+ self.run_pass("inplace_to_functional_activation", frozen_model.graph)
self.assertEqual(model(inp), frozen_model(inp))
diff --git a/test/jit/test_cuda.py b/test/jit/test_cuda.py
index bc0e36a677..cb73c65b7a 100644
--- a/test/jit/test_cuda.py
+++ b/test/jit/test_cuda.py
@@ -1,16 +1,21 @@
# Owner(s): ["oncall: jit"]
+import gc
import os
import sys
-import gc
import unittest
+from typing import NamedTuple
import torch
-from typing import NamedTuple
from torch.testing import FileCheck
-from torch.testing._internal.jit_utils import JitTestCase
-from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf, NoTest, TEST_CUDA
from torch.testing._internal.common_cuda import TEST_MULTIGPU
+from torch.testing._internal.common_utils import (
+ NoTest,
+ skipCUDANonDefaultStreamIf,
+ skipIfRocm,
+ TEST_CUDA,
+)
+from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -18,7 +23,7 @@ sys.path.append(pytorch_test_dir)
# If GPU is not available, then do not run the tests
if not TEST_CUDA:
- print('CUDA not available, skipping tests', file=sys.stderr)
+ print("CUDA not available, skipping tests", file=sys.stderr)
JitTestCase = NoTest # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
@@ -36,10 +41,12 @@ if __name__ == "__main__":
"instead."
)
+
class TestCUDA(JitTestCase):
"""
A suite of tests for the CUDA API in TorchScript.
"""
+
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
@@ -54,10 +61,10 @@ class TestCUDA(JitTestCase):
def test_device_synchronize():
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize()
- torch.cuda.synchronize('cuda')
- torch.cuda.synchronize('cuda:0')
+ torch.cuda.synchronize("cuda")
+ torch.cuda.synchronize("cuda:0")
torch.cuda.synchronize(0)
- torch.cuda.synchronize(torch.device('cuda:1'))
+ torch.cuda.synchronize(torch.device("cuda:1"))
after_current_device_index = torch.cuda.current_device()
# Check if the current device index is same as the device index before
@@ -66,7 +73,7 @@ class TestCUDA(JitTestCase):
@torch.jit.script
def test_multi_device_synchronize():
- torch.cuda.synchronize(torch.device('cuda:0'))
+ torch.cuda.synchronize(torch.device("cuda:0"))
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize(1)
after_current_device_index = torch.cuda.current_device()
@@ -76,11 +83,9 @@ class TestCUDA(JitTestCase):
return prev_current_device_index == after_current_device_index
self.assertTrue(test_device_synchronize)
- FileCheck().check("cuda::synchronize(") \
- .run(test_device_synchronize.graph)
+ FileCheck().check("cuda::synchronize(").run(test_device_synchronize.graph)
self.assertTrue(test_multi_device_synchronize)
- FileCheck().check("cuda::synchronize(") \
- .run(test_multi_device_synchronize.graph)
+ FileCheck().check("cuda::synchronize(").run(test_multi_device_synchronize.graph)
def test_stream_args(self):
# Test stream creation with default arguments
@@ -165,7 +170,6 @@ class TestCUDA(JitTestCase):
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@skipCUDANonDefaultStreamIf(True)
def test_streams_and_events(self):
-
# Test default_stream API by passing device ID as an argument and
# and check if the stream device index matches with the device ID
@torch.jit.script
@@ -182,14 +186,14 @@ class TestCUDA(JitTestCase):
# This test checks for the default stream ID is set to 0 on the device
@torch.jit.script
def test_default_streams():
- s0 = torch.cuda.default_stream(torch.device('cuda:0'))
- s1 = torch.cuda.default_stream(torch.device('cuda:1'))
+ s0 = torch.cuda.default_stream(torch.device("cuda:0"))
+ s1 = torch.cuda.default_stream(torch.device("cuda:1"))
- d = torch.device('cuda:1')
+ d = torch.device("cuda:1")
# Check the current stream id and default id are same
# on the current device. The current device id by default is 0
- s2 = torch.cuda.current_stream(torch.device('cuda:0'))
+ s2 = torch.cuda.current_stream(torch.device("cuda:0"))
check_s2 = s2.id() == s0.id()
check_d0 = torch.cuda.current_device() == s2.device_index()
@@ -203,9 +207,25 @@ class TestCUDA(JitTestCase):
# Check if the current device was reset to 0
is_device_d0 = torch.cuda.current_device() == s2.device_index()
- return s0.device_index(), s1.device_index(), check_s2, check_s3, check_d0, check_d1, is_device_d0
-
- d0, d1, check_s2, check_s3, check_d0, check_d1, is_device_d0 = test_default_streams()
+ return (
+ s0.device_index(),
+ s1.device_index(),
+ check_s2,
+ check_s3,
+ check_d0,
+ check_d1,
+ is_device_d0,
+ )
+
+ (
+ d0,
+ d1,
+ check_s2,
+ check_s3,
+ check_d0,
+ check_d1,
+ is_device_d0,
+ ) = test_default_streams()
self.assertEqual(d0, 0)
self.assertEqual(d1, 1)
@@ -228,12 +248,21 @@ class TestCUDA(JitTestCase):
with torch.cuda.stream(None):
cur_device_index = torch.cuda.current_device()
is_device_index_same = cur_device_index == device_index
- is_current_stream_same = torch.cuda.current_stream(device).id() == current_stream.id()
- is_default_stream_same = torch.cuda.default_stream(device).id() == default_stream.id()
+ is_current_stream_same = (
+ torch.cuda.current_stream(device).id() == current_stream.id()
+ )
+ is_default_stream_same = (
+ torch.cuda.default_stream(device).id() == default_stream.id()
+ )
# Check if the device index, current stream and default streams have not changed
- are_streams_same = is_device_index_same and is_current_stream_same and is_default_stream_same
+ are_streams_same = (
+ is_device_index_same
+ and is_current_stream_same
+ and is_default_stream_same
+ )
return are_streams_same
+
self.assertTrue(test_set_none_stream())
# This test checks if the Device Context manager is a no op
@@ -246,6 +275,7 @@ class TestCUDA(JitTestCase):
# Check if the current device is the same
is_device_same = torch.cuda.current_device() == device_index
return is_device_same
+
self.assertTrue(test_set_device_none())
# Check if a CUDA JIT stream is created
@@ -260,15 +290,15 @@ class TestCUDA(JitTestCase):
# Class used to store results for the test: test_get_stream.
class Result(NamedTuple):
- t1 : torch.Tensor
- t2 : torch.Tensor
- is_current_and_default_stream_same : bool
- is_default_and_user_stream_not_same : bool
- is_stream_set : bool
- is_stream_reset : bool
- default_stream_query : bool
- default_stream_id : int
- user_stream_id : int
+ t1: torch.Tensor
+ t2: torch.Tensor
+ is_current_and_default_stream_same: bool
+ is_default_and_user_stream_not_same: bool
+ is_stream_set: bool
+ is_stream_reset: bool
+ default_stream_query: bool
+ default_stream_id: int
+ user_stream_id: int
# The test aims at checking different stream proporties.
@torch.jit.script
@@ -280,15 +310,23 @@ class TestCUDA(JitTestCase):
user_stream = torch.cuda.Stream()
# Check if the current and default streams are the same on the device
- is_current_and_default_stream_same = current_stream.id() == default_stream.id()
+ is_current_and_default_stream_same = (
+ current_stream.id() == default_stream.id()
+ )
# Check if user stream and default stream are not the same on the device
- is_default_and_user_stream_not_same = default_stream.id() != user_stream.id()
+ is_default_and_user_stream_not_same = (
+ default_stream.id() != user_stream.id()
+ )
with torch.cuda.stream(user_stream):
- is_stream_set = torch.cuda.current_stream(device).id() == user_stream.id()
+ is_stream_set = (
+ torch.cuda.current_stream(device).id() == user_stream.id()
+ )
# Check if the stream was reset to current_stream
- is_stream_reset = torch.cuda.current_stream(device).id() == current_stream.id()
+ is_stream_reset = (
+ torch.cuda.current_stream(device).id() == current_stream.id()
+ )
tensor1 = torch.rand(10000, 10000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
@@ -297,9 +335,16 @@ class TestCUDA(JitTestCase):
# Capture all the results in the class Result
res = Result(
- tensor1, tensor2, is_current_and_default_stream_same,
- is_default_and_user_stream_not_same, is_stream_set,
- is_stream_reset, default_stream_query, default_stream.id(), user_stream.id())
+ tensor1,
+ tensor2,
+ is_current_and_default_stream_same,
+ is_default_and_user_stream_not_same,
+ is_stream_set,
+ is_stream_reset,
+ default_stream_query,
+ default_stream.id(),
+ user_stream.id(),
+ )
return res
result = test_get_stream()
@@ -310,8 +355,12 @@ class TestCUDA(JitTestCase):
self.assertTrue(result.is_stream_set)
self.assertTrue(result.is_stream_reset)
self.assertTrue(result.default_stream_query)
- self.assertEqual(result.default_stream_id, 0) # Check if the default stream ID is always 0
- self.assertNotEqual(result.user_stream_id, 0) # Check if the user stream is always non zero
+ self.assertEqual(
+ result.default_stream_id, 0
+ ) # Check if the default stream ID is always 0
+ self.assertNotEqual(
+ result.user_stream_id, 0
+ ) # Check if the user stream is always non zero
# Test the stream context manager. This test checks if the stream is switched
# to the user stream on using the stream context manager.
@@ -329,14 +378,20 @@ class TestCUDA(JitTestCase):
# Wait for B to be computed
user_stream.synchronize()
# Check if the stream has been reset on the current device
- is_stream_reset = torch.cuda.current_stream(device).id() == current_stream.id()
+ is_stream_reset = (
+ torch.cuda.current_stream(device).id() == current_stream.id()
+ )
return A, B, check, is_stream_reset
A, B, is_stream_set, is_stream_reset = test_stream_context()
self.assertEqual(torch.matmul(A, A), B)
- self.assertTrue(is_stream_set, "Error: Current stream was not set to user stream!")
- self.assertTrue(is_stream_reset, "Error: The stream was not restored to previous stream!")
+ self.assertTrue(
+ is_stream_set, "Error: Current stream was not set to user stream!"
+ )
+ self.assertTrue(
+ is_stream_reset, "Error: The stream was not restored to previous stream!"
+ )
# Test multiple nested streams. Check if the operations are computed as expected on the streams
# This test has been adapted from the eager mode tests available at test/test_cuda.py
@@ -372,11 +427,24 @@ class TestCUDA(JitTestCase):
# Check if the stream and device has been restored to previous stream and device
is_device_current = torch.cuda.current_device() == prev_device_index
- is_stream_current = torch.cuda.current_stream(device).id() == prev_current_stream.id()
-
- check_stream = is_stream_s1 and is_stream_s2 and is_stream_s1_after and is_stream_current
- check_device = is_device_s1 and is_device_s2 and is_device_s1_after and is_device_current
+ is_stream_current = (
+ torch.cuda.current_stream(device).id() == prev_current_stream.id()
+ )
+
+ check_stream = (
+ is_stream_s1
+ and is_stream_s2
+ and is_stream_s1_after
+ and is_stream_current
+ )
+ check_device = (
+ is_device_s1
+ and is_device_s2
+ and is_device_s1_after
+ and is_device_current
+ )
return A, B, C, D, check_stream, check_device
+
A, B, C, D, check_stream, check_device = test_multiple_stream()
self.assertEqual(torch.matmul(A, A), C)
@@ -401,7 +469,9 @@ class TestCUDA(JitTestCase):
B = torch.mm(A, A).to("cuda")
s1.record_event(event)
# Check if the current_stream is reset
- is_current_stream_1 = torch.cuda.current_stream(device).id() == prev_current_stream.id()
+ is_current_stream_1 = (
+ torch.cuda.current_stream(device).id() == prev_current_stream.id()
+ )
# Wait for ops on s1 to be computed
s2.wait_event(event)
with torch.cuda.stream(s2):
@@ -410,9 +480,16 @@ class TestCUDA(JitTestCase):
# Wait for C to be computed
s2.synchronize()
# Check if the current_stream is reset
- is_current_stream_2 = torch.cuda.current_stream(device).id() == prev_current_stream.id()
-
- check_stream = is_current_stream_1 and is_current_stream_2 and is_stream_s1 and is_stream_s2
+ is_current_stream_2 = (
+ torch.cuda.current_stream(device).id() == prev_current_stream.id()
+ )
+
+ check_stream = (
+ is_current_stream_1
+ and is_current_stream_2
+ and is_stream_s1
+ and is_stream_s2
+ )
return A, B, C, check_stream
A, B, C, check_stream = test_data_dependency_between_streams()
@@ -425,6 +502,7 @@ class TestCUDA(JitTestCase):
def test_simple_event():
e = torch.cuda.Event(True, False, False)
return e is not None
+
self.assertTrue(test_simple_event(), "Could not create CUDA Event!")
# Record the CUDA event for operation torch.mm on the current stream
@@ -474,6 +552,7 @@ class TestCUDA(JitTestCase):
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
+
self.assertGreater(test_stream_synchronize(), 0)
# Test event synchronization for the event that records a stream doing
@@ -536,12 +615,13 @@ class TestCUDA(JitTestCase):
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
+
self.assertGreater(test_event_wait(), 0)
# Test for stream wait_event. Checks if the stream waits on the event
@torch.jit.script
def test_wait_event():
- d1 = torch.device('cuda:1')
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream(d1)
@@ -550,11 +630,12 @@ class TestCUDA(JitTestCase):
e0 = torch.cuda.Event(False, False, False)
s0.record_event(e0)
- s1 = torch.cuda.current_stream(torch.device('cuda:0'))
+ s1 = torch.cuda.current_stream(torch.device("cuda:0"))
s1.wait_event(e0)
s1.synchronize()
return e0.query() and s0.query() and s1.query()
+
self.assertTrue(test_wait_event())
# Test if a scripted module with cuda streams can be saved, loaded and executed
diff --git a/test/jit/test_custom_operators.py b/test/jit/test_custom_operators.py
index e3cb6393ca..34d47d6ce3 100644
--- a/test/jit/test_custom_operators.py
+++ b/test/jit/test_custom_operators.py
@@ -11,33 +11,37 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
def canonical(graph):
return torch._C._jit_pass_canonicalize(graph).str(False)
-class TestCustomOperators(JitTestCase):
+class TestCustomOperators(JitTestCase):
def test_dynamic_op_registry(self):
from torch._ops import _OpNamespace
- self.assertTrue(hasattr(torch, 'ops'))
- if '_test' in torch.ops.__dict__:
- torch.ops.__dict__.pop('_test')
+ self.assertTrue(hasattr(torch, "ops"))
+
+ if "_test" in torch.ops.__dict__:
+ torch.ops.__dict__.pop("_test")
# Don't use `hasattr()` because it will call `__getattr__`.
- self.assertNotIn('_test', torch.ops.__dict__)
+ self.assertNotIn("_test", torch.ops.__dict__)
torch.ops._test
- self.assertIn('_test', torch.ops.__dict__)
+ self.assertIn("_test", torch.ops.__dict__)
self.assertEqual(type(torch.ops._test), _OpNamespace)
- self.assertNotIn('leaky_relu', torch.ops._test.__dict__)
+ self.assertNotIn("leaky_relu", torch.ops._test.__dict__)
op = torch.ops._test.leaky_relu
self.assertTrue(callable(op))
- self.assertIn('leaky_relu', torch.ops._test.__dict__)
+ self.assertIn("leaky_relu", torch.ops._test.__dict__)
op2 = torch.ops._test.leaky_relu
self.assertEqual(op, op2)
@@ -46,7 +50,7 @@ class TestCustomOperators(JitTestCase):
with self.assertRaisesRegexWithHighlight(
AttributeError,
f"Invalid attribute '{attr}' for '_OpNamespace' '_test'",
- ""
+ "",
):
getattr(torch.ops._test, attr)
@@ -63,15 +67,13 @@ class TestCustomOperators(JitTestCase):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"aten::relu\(\) expected at most 1 argument\(s\) but received 2 argument\(s\)",
- ""
+ "",
):
torch.ops.aten.relu(1, 2)
def test_passing_too_few_args(self):
with self.assertRaisesRegexWithHighlight(
- RuntimeError,
- r"aten::relu\(\) is missing value for argument 'self'.",
- ""
+ RuntimeError, r"aten::relu\(\) is missing value for argument 'self'.", ""
):
torch.ops.aten.relu()
@@ -79,7 +81,7 @@ class TestCustomOperators(JitTestCase):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"aten::type_as\(\) is missing value for argument 'other'.",
- ""
+ "",
):
torch.ops.aten.type_as(torch.ones(5, 5))
@@ -87,7 +89,7 @@ class TestCustomOperators(JitTestCase):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
"Unknown keyword argument 'foo' for operator '_test::leaky_relu'",
- ""
+ "",
):
torch.ops._test.leaky_relu(torch.ones(5), foo=torch.ones(5))
@@ -102,6 +104,7 @@ class TestCustomOperators(JitTestCase):
@torch.jit.script
def func(x):
return torch.ops.aten.relu(x)
+
input = torch.ones(5, 5)
self.assertEqual(func(input), input.relu())
@@ -110,28 +113,37 @@ class TestCustomOperators(JitTestCase):
func = torch.jit.trace(torch.ops.aten.relu, [input])
self.assertEqual(func(input), input.relu())
- @unittest.skip("Need to figure out default dtype differences between fbcode and oss")
+ @unittest.skip(
+ "Need to figure out default dtype differences between fbcode and oss"
+ )
def test_script_graph_for_custom_ops_matches_traced_graph(self):
input = torch.ones(5, 5)
trace = torch.jit.trace(torch.ops.aten.relu, [input])
- self.assertExpectedInline(canonical(trace.graph), '''\
+ self.assertExpectedInline(
+ canonical(trace.graph),
+ """\
graph(%0 : Float(5, 5)):
%1 : Float(5, 5) = aten::relu(%0)
return (%1)
-''')
+""",
+ )
def test_script_graph_contains_custom_op(self):
@torch.jit.script
def func(x):
return torch.ops.aten.relu(x)
- self.assertExpectedInline(canonical(func.graph), '''\
+
+ self.assertExpectedInline(
+ canonical(func.graph),
+ """\
graph(%x.1 : Tensor):
%1 : Tensor = aten::relu(%x.1)
return (%1)
-''')
+""",
+ )
def test_generic_list(self):
- self.assertEqual(torch.ops._test.get_first([['hello']]), 'hello')
+ self.assertEqual(torch.ops._test.get_first([["hello"]]), "hello")
# https://github.com/pytorch/pytorch/issues/80508
def test_where_no_scalar(self):
diff --git a/test/jit/test_data_parallel.py b/test/jit/test_data_parallel.py
index fd35a2681f..215d3a974a 100644
--- a/test/jit/test_data_parallel.py
+++ b/test/jit/test_data_parallel.py
@@ -13,17 +13,21 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA_MULTI_GPU
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestDataParallel(JitTestCase):
class Mpy(torch.nn.Module):
def __init__(self):
super(TestDataParallel.Mpy, self).__init__()
- self.m = nn.Sequential(nn.Linear(2, 2), nn.BatchNorm1d(2),
- nn.ReLU(), nn.Linear(2, 2))
+ self.m = nn.Sequential(
+ nn.Linear(2, 2), nn.BatchNorm1d(2), nn.ReLU(), nn.Linear(2, 2)
+ )
@torch.jit.ignore
def forward(self, input):
@@ -50,13 +54,13 @@ class TestDataParallel(JitTestCase):
return self.m2(x)
class Msm(torch.jit.ScriptModule):
-
- __constants__ = ['m']
+ __constants__ = ["m"]
def __init__(self):
super(TestDataParallel.Msm, self).__init__()
- self.m = nn.Sequential(nn.Linear(2, 2), nn.BatchNorm1d(2),
- nn.ReLU(), nn.Linear(2, 2))
+ self.m = nn.Sequential(
+ nn.Linear(2, 2), nn.BatchNorm1d(2), nn.ReLU(), nn.Linear(2, 2)
+ )
@torch.jit.script_method
def forward(self, input):
@@ -140,7 +144,7 @@ class TestDataParallel(JitTestCase):
# Use .data here to avoid version counter bump.
# The graph created by the following forward will be wrong but
# we never backward through them so it's fine
- p.data -= 1. * p.grad
+ p.data -= 1.0 * p.grad
second_forward = module(x)
# replica which is on the same GPU has a shallow copy of the original
diff --git a/test/jit/test_dataclasses.py b/test/jit/test_dataclasses.py
index b8f68d7073..274818336a 100644
--- a/test/jit/test_dataclasses.py
+++ b/test/jit/test_dataclasses.py
@@ -1,14 +1,16 @@
# Owner(s): ["oncall: jit"]
# flake8: noqa
-from dataclasses import dataclass, field, InitVar
-from hypothesis import given, settings, strategies as st
-from torch.testing._internal.jit_utils import JitTestCase
-from typing import List, Optional
import sys
-import torch
import unittest
+from dataclasses import dataclass, field, InitVar
from enum import Enum
+from typing import List, Optional
+
+import torch
+from hypothesis import given, settings, strategies as st
+from torch.testing._internal.jit_utils import JitTestCase
+
# Example jittable dataclass
@dataclass(order=True)
@@ -20,8 +22,8 @@ class Point:
def __post_init__(self):
self.norm = (torch.tensor(self.x) ** 2 + torch.tensor(self.y) ** 2) ** 0.5
-class MixupScheme(Enum):
+class MixupScheme(Enum):
INPUT = ["input"]
MANIFOLD = [
@@ -38,6 +40,7 @@ class MixupParams:
self.alpha = alpha
self.scheme = scheme
+
class MixupScheme2(Enum):
A = 1
B = 2
@@ -49,6 +52,7 @@ class MixupParams2:
self.alpha = alpha
self.scheme = scheme
+
@dataclass
class MixupParams3:
def __init__(self, alpha: float = 0.125, scheme: MixupScheme2 = MixupScheme2.A):
@@ -59,11 +63,11 @@ class MixupParams3:
# Make sure the Meta internal tooling doesn't raise an overflow error
NonHugeFloats = st.floats(min_value=-1e4, max_value=1e4, allow_nan=False)
-class TestDataclasses(JitTestCase):
+class TestDataclasses(JitTestCase):
@classmethod
def tearDownClass(cls):
- torch._C._jit_clear_class_registry()
+ torch._C._jit_clear_class_registry()
def test_init_vars(self):
@torch.jit.script
@@ -75,7 +79,9 @@ class TestDataclasses(JitTestCase):
norm: Optional[torch.Tensor] = None
def __post_init__(self, norm_p: int):
- self.norm = (torch.tensor(self.x) ** norm_p + torch.tensor(self.y) ** norm_p) ** (1 / norm_p)
+ self.norm = (
+ torch.tensor(self.x) ** norm_p + torch.tensor(self.y) ** norm_p
+ ) ** (1 / norm_p)
def fn(x: float, y: float, p: int):
pt = Point2(x, y, p)
@@ -88,6 +94,7 @@ class TestDataclasses(JitTestCase):
@given(NonHugeFloats, NonHugeFloats)
def test__post_init__(self, x, y):
P = torch.jit.script(Point)
+
def fn(x: float, y: float):
pt = P(x, y)
return pt.norm
@@ -95,7 +102,9 @@ class TestDataclasses(JitTestCase):
self.checkScript(fn, [x, y])
@settings(deadline=None)
- @given(st.tuples(NonHugeFloats, NonHugeFloats), st.tuples(NonHugeFloats, NonHugeFloats))
+ @given(
+ st.tuples(NonHugeFloats, NonHugeFloats), st.tuples(NonHugeFloats, NonHugeFloats)
+ )
def test_comparators(self, pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
@@ -122,6 +131,7 @@ class TestDataclasses(JitTestCase):
with self.assertRaises(NotImplementedError):
torch.jit.script(Foo)
+
def fn():
foo = Foo()
return foo.x
@@ -137,7 +147,7 @@ class TestDataclasses(JitTestCase):
a: int
b: int
- def __eq__(self, other: 'CustomEq') -> bool:
+ def __eq__(self, other: "CustomEq") -> bool:
return self.a == other.a # ignore the b field
def fn(a: int, b1: int, b2: int):
@@ -154,9 +164,7 @@ class TestDataclasses(JitTestCase):
torch.jit.script(MixupParams2) # don't throw
-
def test_use_unregistered_dataclass_raises(self):
-
def f(a: MixupParams3):
return 0
diff --git a/test/jit/test_device_analysis.py b/test/jit/test_device_analysis.py
index 3ce42e171b..eadde705a2 100644
--- a/test/jit/test_device_analysis.py
+++ b/test/jit/test_device_analysis.py
@@ -1,12 +1,12 @@
# Owner(s): ["oncall: jit"]
-from itertools import product
import unittest
+from itertools import product
import torch
+from torch.jit._passes._property_propagation import apply_input_props_using_example
from torch.testing._internal.common_utils import TEST_CUDA
from torch.testing._internal.jit_utils import JitTestCase
-from torch.jit._passes._property_propagation import apply_input_props_using_example
try:
from torchvision import models
diff --git a/test/jit/test_dtype_analysis.py b/test/jit/test_dtype_analysis.py
index 5c2a587e70..2870f3a5a9 100644
--- a/test/jit/test_dtype_analysis.py
+++ b/test/jit/test_dtype_analysis.py
@@ -7,19 +7,19 @@ from unittest.case import expectedFailure
import torch
from torch import complex32, float32, float64, int32, int64
from torch.jit._passes import _property_propagation
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ ops,
+)
from torch.testing._internal.common_methods_invocations import (
- SampleInput,
+ op_db,
sample_inputs_adaptive_avg_pool2d,
sample_inputs_conv2d,
+ SampleInput,
)
-from torch.testing._internal.common_utils import set_default_dtype, first_sample
-from torch.testing._internal.jit_utils import JitTestCase
+from torch.testing._internal.common_utils import first_sample, set_default_dtype
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
-from torch.testing._internal.common_device_type import (
- ops,
- instantiate_device_type_tests,
-)
-from torch.testing._internal.common_methods_invocations import op_db
+from torch.testing._internal.jit_utils import JitTestCase
"""
Dtype Analysis relies on symbolic shape analysis, which is still in beta
@@ -274,7 +274,9 @@ class TestDtypeAnalysis(TestDtypeBase):
):
for dtype in (torch.int8, torch.float64):
# Gets default version for conv2d
- sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[-1]
+ sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[
+ -1
+ ]
input_args = [sample_input.input, *sample_input.args]
self.assert_dtype_equal_custom_args(fn, input_args)
@@ -352,7 +354,9 @@ class TestDtypeCustomRules(TestDtypeBase):
# Run the Dtype Analysis
graph = traced_fn.graph # Note this is a cached graph
input_tensors = [t for t in input_args if isinstance(t, torch.Tensor)]
- input_tensors += [v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)]
+ input_tensors += [
+ v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)
+ ]
self.prop_dtype_on_graph(graph, input_tensors)
self.assert_output_dtype_equal(expected_res, graph)
diff --git a/test/jit/test_enum.py b/test/jit/test_enum.py
index e2462bd1f6..00c8904122 100644
--- a/test/jit/test_enum.py
+++ b/test/jit/test_enum.py
@@ -2,21 +2,24 @@
import os
import sys
+from enum import Enum
+from typing import Any, List
import torch
from torch.testing import FileCheck
-from enum import Enum
-from typing import Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestEnum(JitTestCase):
def test_enum_value_types(self):
@@ -38,11 +41,9 @@ class TestEnum(JitTestCase):
def supported_enum_types(a: IntEnum, b: FloatEnum, c: StringEnum):
return (a.name, b.name, c.name)
- FileCheck() \
- .check("IntEnum") \
- .check("FloatEnum") \
- .check("StringEnum") \
- .run(str(supported_enum_types.graph))
+ FileCheck().check("IntEnum").check("FloatEnum").check("StringEnum").run(
+ str(supported_enum_types.graph)
+ )
class TensorEnum(Enum):
FOO = torch.tensor(0)
@@ -54,7 +55,9 @@ class TestEnum(JitTestCase):
return a.name
# TODO: rewrite code so that the highlight is not empty.
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Cannot create Enum with value type 'Tensor'", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Cannot create Enum with value type 'Tensor'", ""
+ ):
torch.jit.script(unsupported_enum_types)
def test_enum_comp(self):
@@ -88,11 +91,9 @@ class TestEnum(JitTestCase):
def enum_comp(x: Foo) -> bool:
return x == Bar.ITEM1
- FileCheck() \
- .check("prim::Constant") \
- .check_same("Bar.ITEM1") \
- .check("aten::eq") \
- .run(str(enum_comp.graph))
+ FileCheck().check("prim::Constant").check_same("Bar.ITEM1").check(
+ "aten::eq"
+ ).run(str(enum_comp.graph))
self.assertEqual(enum_comp(Foo.ITEM1), False)
@@ -107,7 +108,9 @@ class TestEnum(JitTestCase):
return x == y
# TODO: rewrite code so that the highlight is not empty.
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Could not unify type list", ""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Could not unify type list", ""
+ ):
torch.jit.script(enum_comp)
def test_enum_name(self):
@@ -121,11 +124,9 @@ class TestEnum(JitTestCase):
def enum_name(x: Color) -> str:
return x.name
- FileCheck() \
- .check("Color") \
- .check_next("prim::EnumName") \
- .check_next("return") \
- .run(str(enum_name.graph))
+ FileCheck().check("Color").check_next("prim::EnumName").check_next(
+ "return"
+ ).run(str(enum_name.graph))
self.assertEqual(enum_name(Color.RED), Color.RED.name)
self.assertEqual(enum_name(Color.GREEN), Color.GREEN.name)
@@ -141,11 +142,9 @@ class TestEnum(JitTestCase):
def enum_value(x: Color) -> int:
return x.value
- FileCheck() \
- .check("Color") \
- .check_next("prim::EnumValue") \
- .check_next("return") \
- .run(str(enum_value.graph))
+ FileCheck().check("Color").check_next("prim::EnumValue").check_next(
+ "return"
+ ).run(str(enum_value.graph))
self.assertEqual(enum_value(Color.RED), Color.RED.value)
self.assertEqual(enum_value(Color.GREEN), Color.GREEN.value)
@@ -161,11 +160,9 @@ class TestEnum(JitTestCase):
def enum_const(x: Color) -> bool:
return x == Color.RED
- FileCheck() \
- .check("prim::Constant[value=__torch__.jit.test_enum.Color.RED]") \
- .check_next("aten::eq") \
- .check_next("return") \
- .run(str(enum_const.graph))
+ FileCheck().check(
+ "prim::Constant[value=__torch__.jit.test_enum.Color.RED]"
+ ).check_next("aten::eq").check_next("return").run(str(enum_const.graph))
self.assertEqual(enum_const(Color.RED), True)
self.assertEqual(enum_const(Color.GREEN), False)
@@ -183,7 +180,9 @@ class TestEnum(JitTestCase):
else:
return False
- with self.assertRaisesRegexWithHighlight(RuntimeError, "has no attribute 'PURPLE'", "Color.PURPLE"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "has no attribute 'PURPLE'", "Color.PURPLE"
+ ):
torch.jit.script(enum_const)
def test_enum_ivalue_type(self):
@@ -197,10 +196,9 @@ class TestEnum(JitTestCase):
def is_color_enum(x: Any):
return isinstance(x, Color)
- FileCheck() \
- .check("prim::isinstance[types=[Enum<__torch__.jit.test_enum.Color>]]") \
- .check_next("return") \
- .run(str(is_color_enum.graph))
+ FileCheck().check(
+ "prim::isinstance[types=[Enum<__torch__.jit.test_enum.Color>]]"
+ ).check_next("return").run(str(is_color_enum.graph))
self.assertEqual(is_color_enum(Color.RED), True)
self.assertEqual(is_color_enum(Color.GREEN), True)
@@ -217,10 +215,9 @@ class TestEnum(JitTestCase):
def closed_over_aliased_type():
return a.RED.value
- FileCheck() \
- .check("prim::Constant[value={}]".format(a.RED.value)) \
- .check_next("return") \
- .run(str(closed_over_aliased_type.graph))
+ FileCheck().check("prim::Constant[value={}]".format(a.RED.value)).check_next(
+ "return"
+ ).run(str(closed_over_aliased_type.graph))
self.assertEqual(closed_over_aliased_type(), Color.RED.value)
@@ -230,10 +227,9 @@ class TestEnum(JitTestCase):
def closed_over_aliased_value():
return b.value
- FileCheck() \
- .check("prim::Constant[value={}]".format(b.value)) \
- .check_next("return") \
- .run(str(closed_over_aliased_value.graph))
+ FileCheck().check("prim::Constant[value={}]".format(b.value)).check_next(
+ "return"
+ ).run(str(closed_over_aliased_value.graph))
self.assertEqual(closed_over_aliased_value(), Color.RED.value)
@@ -253,13 +249,9 @@ class TestEnum(JitTestCase):
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
- FileCheck() \
- .check("TestModule") \
- .check_next("Color") \
- .check_same("prim::GetAttr[name=\"e\"]") \
- .check_next("prim::EnumValue") \
- .check_next("return") \
- .run(str(scripted.graph))
+ FileCheck().check("TestModule").check_next("Color").check_same(
+ 'prim::GetAttr[name="e"]'
+ ).check_next("prim::EnumValue").check_next("return").run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED.value)
@@ -316,16 +308,12 @@ class TestEnum(JitTestCase):
m = TestModule(Color.RED)
scripted = torch.jit.script(m)
- FileCheck() \
- .check("TestModule") \
- .check_next("Color") \
- .check_same("prim::GetAttr[name=\"e\"]") \
- .check_next("return") \
- .run(str(scripted.graph))
+ FileCheck().check("TestModule").check_next("Color").check_same(
+ 'prim::GetAttr[name="e"]'
+ ).check_next("return").run(str(scripted.graph))
self.assertEqual(scripted(), Color.RED)
-
def test_enum_iterate(self):
class Color(Enum):
RED = 1
@@ -342,12 +330,9 @@ class TestEnum(JitTestCase):
make_global(Color)
scripted = torch.jit.script(iterate_enum)
- FileCheck() \
- .check("Enum<__torch__.jit.test_enum.Color>[]") \
- .check_same("Color.RED") \
- .check_same("Color.GREEN") \
- .check_same("Color.BLUE") \
- .run(str(scripted.graph))
+ FileCheck().check("Enum<__torch__.jit.test_enum.Color>[]").check_same(
+ "Color.RED"
+ ).check_same("Color.GREEN").check_same("Color.BLUE").run(str(scripted.graph))
# PURPLE always appears last because we follow Python's Enum definition order.
self.assertEqual(scripted(Color.RED), [Color.GREEN.value, Color.BLUE.value])
@@ -355,7 +340,6 @@ class TestEnum(JitTestCase):
# Tests that explicitly and/or repeatedly scripting an Enum class is permitted.
def test_enum_explicit_script(self):
-
@torch.jit.script
class Color(Enum):
RED = 1
diff --git a/test/jit/test_exception.py b/test/jit/test_exception.py
index 0f3aca030a..04c3294ec5 100644
--- a/test/jit/test_exception.py
+++ b/test/jit/test_exception.py
@@ -1,11 +1,13 @@
# Owner(s): ["oncall: jit"]
-from torch.testing._internal.common_utils import TestCase
import torch
from torch import nn
+from torch.testing._internal.common_utils import TestCase
r"""
Test TorchScript exception handling.
"""
+
+
class TestException(TestCase):
def test_pyop_exception_message(self):
class Foo(torch.jit.ScriptModule):
@@ -16,31 +18,40 @@ class TestException(TestCase):
@torch.jit.script_method
def forward(self, x):
return self.conv(x)
+
foo = Foo()
# testing that the correct error message propagates
- with self.assertRaisesRegex(RuntimeError, r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d"
+ ):
foo(torch.ones([123])) # wrong size
def test_builtin_error_messsage(self):
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
+
@torch.jit.script
def close_match(x):
return x.masked_fill(True)
- with self.assertRaisesRegex(RuntimeError, "This op may not exist or may not be currently "
- "supported in TorchScript"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "This op may not exist or may not be currently " "supported in TorchScript",
+ ):
+
@torch.jit.script
def unknown_op(x):
torch.set_anomaly_enabled(True)
return x
def test_exceptions(self):
- cu = torch.jit.CompilationUnit('''
+ cu = torch.jit.CompilationUnit(
+ """
def foo(cond):
if bool(cond):
raise ValueError(3)
return 1
- ''')
+ """
+ )
cu.foo(torch.tensor(0))
with self.assertRaisesRegex(torch.jit.Error, "3"):
@@ -97,6 +108,7 @@ class TestException(TestCase):
else:
raise Exception("Hi")
return a
+
self.assertEqual(foo(), 1)
@torch.jit.script
@@ -114,11 +126,13 @@ class TestException(TestCase):
no_message()
def test_assertions(self):
- cu = torch.jit.CompilationUnit('''
+ cu = torch.jit.CompilationUnit(
+ """
def foo(cond):
assert bool(cond), "hi"
return 0
- ''')
+ """
+ )
cu.foo(torch.tensor(1))
with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"):
@@ -142,7 +156,9 @@ class TestException(TestCase):
def fn(x):
return python_op(x)
- with self.assertRaisesRegex(RuntimeError, "operation failed in the TorchScript interpreter"):
+ with self.assertRaisesRegex(
+ RuntimeError, "operation failed in the TorchScript interpreter"
+ ):
fn(torch.tensor(4))
def test_dict_expansion_raises_error(self):
@@ -150,8 +166,9 @@ class TestException(TestCase):
d = {"foo": 1, "bar": 2, "baz": 3}
return {**d}
- with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError,
- "Dict expansion "):
+ with self.assertRaisesRegex(
+ torch.jit.frontend.NotSupportedError, "Dict expansion "
+ ):
torch.jit.script(fn)
def test_custom_python_exception(self):
@@ -162,7 +179,9 @@ class TestException(TestCase):
def fn():
raise MyValueError("test custom exception")
- with self.assertRaisesRegex(torch.jit.Error, "jit.test_exception.MyValueError: test custom exception"):
+ with self.assertRaisesRegex(
+ torch.jit.Error, "jit.test_exception.MyValueError: test custom exception"
+ ):
fn()
def test_custom_python_exception_defined_elsewhere(self):
@@ -171,5 +190,9 @@ class TestException(TestCase):
@torch.jit.script
def fn():
raise MyKeyError("This is a user defined key error")
- with self.assertRaisesRegex(torch.jit.Error, "jit.myexception.MyKeyError: This is a user defined key error"):
+
+ with self.assertRaisesRegex(
+ torch.jit.Error,
+ "jit.myexception.MyKeyError: This is a user defined key error",
+ ):
fn()
diff --git a/test/jit/test_freezing.py b/test/jit/test_freezing.py
index f13c2b113b..91d2bdb82c 100644
--- a/test/jit/test_freezing.py
+++ b/test/jit/test_freezing.py
@@ -10,29 +10,38 @@ import torch.nn as nn
import torch.nn.functional as F
from torch.jit._recursive import wrap_cpp_module
from torch.testing import FileCheck
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_CUDNN
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_quantized import override_quantized_engine
-from torch.testing._internal.common_utils import set_default_dtype, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, skipIfTorchDynamo
-from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_CUDA
+from torch.testing._internal.common_utils import (
+ set_default_dtype,
+ skipCUDAMemoryLeakCheckIf,
+ skipIfTorchDynamo,
+ TEST_WITH_ROCM,
+)
from torch.testing._internal.jit_utils import JitTestCase
from torch.utils import mkldnn as mkldnn_utils
try:
import torchvision
+
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None
+
def removeExceptions(graph):
- for n in graph.findAllNodes('prim::RaiseException'):
+ for n in graph.findAllNodes("prim::RaiseException"):
n.destroy()
@@ -42,26 +51,46 @@ class TestFreezing(JitTestCase):
class M(nn.Module):
def __init__(self):
super().__init__()
- self.a = 1 # folded
- self.b = 1.2 # folded
- self.c = "hello" # folded
- self.c2 = "hi\xA1" # not folded
- self.d = [1, 1] # folded
- self.e = [1.0, 1.1] # folded
- self.f = ["hello", "world"] # folded
+ self.a = 1 # folded
+ self.b = 1.2 # folded
+ self.c = "hello" # folded
+ self.c2 = "hi\xA1" # not folded
+ self.d = [1, 1] # folded
+ self.e = [1.0, 1.1] # folded
+ self.f = ["hello", "world"] # folded
self.f2 = [(1, "Over \u0e55\u0e57 57")]
- self.g = ([1, 2], 3.2, "4.4", torch.tensor([5.5], requires_grad=True)) # folded
- self.h = {"layer" : [torch.tensor([7.7], requires_grad=True)]}
- self.h2 = {"layer\xB1" : [torch.tensor([8.8], requires_grad=True)]}
+ self.g = (
+ [1, 2],
+ 3.2,
+ "4.4",
+ torch.tensor([5.5], requires_grad=True),
+ ) # folded
+ self.h = {"layer": [torch.tensor([7.7], requires_grad=True)]}
+ self.h2 = {"layer\xB1": [torch.tensor([8.8], requires_grad=True)]}
self.t = torch.tensor([1.2, 2.4], requires_grad=True) # folded
- self.ts = [torch.tensor([1.0, 2.0], requires_grad=True), torch.tensor([3.0, 4.0], requires_grad=True)] # folded
+ self.ts = [
+ torch.tensor([1.0, 2.0], requires_grad=True),
+ torch.tensor([3.0, 4.0], requires_grad=True),
+ ] # folded
self.tt = [[torch.tensor([3.3, 2.3], requires_grad=True), None]]
def forward(self, x):
- return str(self.a) + str(self.b) + self.c + self.c2 + str(self.d) + \
- str(self.e) + str(self.f) + str(self.f2) + str(self.g) + \
- str(self.h) + str(self.h2) + str(self.t) + str(self.ts) + str(self.tt)
-
+ return (
+ str(self.a)
+ + str(self.b)
+ + self.c
+ + self.c2
+ + str(self.d)
+ + str(self.e)
+ + str(self.f)
+ + str(self.f2)
+ + str(self.g)
+ + str(self.h)
+ + str(self.h2)
+ + str(self.t)
+ + str(self.ts)
+ + str(self.tt)
+ )
m = torch.jit.script(M())
m.eval()
@@ -79,20 +108,20 @@ class TestFreezing(JitTestCase):
# }
# ...
# }
- self.assertFalse(m2._c.hasattr('a'))
- self.assertFalse(m2._c.hasattr('b'))
- self.assertFalse(m2._c.hasattr('c'))
- self.assertFalse(m2._c.hasattr('c2'))
- self.assertFalse(m2._c.hasattr('d'))
- self.assertFalse(m2._c.hasattr('e'))
- self.assertFalse(m2._c.hasattr('f'))
- self.assertFalse(m2._c.hasattr('f2'))
- self.assertFalse(m2._c.hasattr('g'))
- self.assertFalse(m2._c.hasattr('h'))
- self.assertFalse(m2._c.hasattr('h2'))
- self.assertFalse(m2._c.hasattr('t'))
- self.assertFalse(m2._c.hasattr('ts'))
- self.assertFalse(m2._c.hasattr('tt'))
+ self.assertFalse(m2._c.hasattr("a"))
+ self.assertFalse(m2._c.hasattr("b"))
+ self.assertFalse(m2._c.hasattr("c"))
+ self.assertFalse(m2._c.hasattr("c2"))
+ self.assertFalse(m2._c.hasattr("d"))
+ self.assertFalse(m2._c.hasattr("e"))
+ self.assertFalse(m2._c.hasattr("f"))
+ self.assertFalse(m2._c.hasattr("f2"))
+ self.assertFalse(m2._c.hasattr("g"))
+ self.assertFalse(m2._c.hasattr("h"))
+ self.assertFalse(m2._c.hasattr("h2"))
+ self.assertFalse(m2._c.hasattr("t"))
+ self.assertFalse(m2._c.hasattr("ts"))
+ self.assertFalse(m2._c.hasattr("tt"))
output_f = m2.forward(input)
self.assertEqual(output_s, output_f)
@@ -152,12 +181,12 @@ class TestFreezing(JitTestCase):
# }
# }
mf = mf._c
- self.assertFalse(mf.hasattr('sub1'))
- self.assertFalse(mf.hasattr('a'))
- self.assertTrue(mf.hasattr('b'))
- self.assertTrue(mf.hasattr('sub2'))
- self.assertTrue(mf.sub2.hasattr('b')) # verify b is preserved in sub2
- self.assertFalse(mf.sub2.hasattr('a')) # verify a is removed in sub2
+ self.assertFalse(mf.hasattr("sub1"))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertTrue(mf.hasattr("b"))
+ self.assertTrue(mf.hasattr("sub2"))
+ self.assertTrue(mf.sub2.hasattr("b")) # verify b is preserved in sub2
+ self.assertFalse(mf.sub2.hasattr("a")) # verify a is removed in sub2
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
@@ -196,8 +225,8 @@ class TestFreezing(JitTestCase):
# submodule {
# }
# }
- self.assertFalse(mf.hasattr('a'))
- self.assertFalse(mf.hasattr('b'))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertFalse(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
@@ -249,14 +278,13 @@ class TestFreezing(JitTestCase):
# submodule {
# }
# }
- self.assertFalse(mf.hasattr('a'))
- self.assertFalse(mf.hasattr('b'))
- self.assertFalse(mf.hasattr('c'))
- self.assertTrue(mf.hasattr('d'))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertFalse(mf.hasattr("b"))
+ self.assertFalse(mf.hasattr("c"))
+ self.assertTrue(mf.hasattr("d"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
-
def test_freeze_module_with_fork2(self):
@torch.jit.script
def foo(x):
@@ -293,8 +321,8 @@ class TestFreezing(JitTestCase):
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. both 'a' and 'b' are preserved.
- self.assertTrue(mf.hasattr('a'))
- self.assertFalse(mf.hasattr('b'))
+ self.assertTrue(mf.hasattr("a"))
+ self.assertFalse(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
@@ -337,8 +365,8 @@ class TestFreezing(JitTestCase):
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. 'b' is preserved.
- self.assertFalse(mf.hasattr('a'))
- self.assertTrue(mf.hasattr('b'))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertTrue(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
@@ -355,7 +383,7 @@ class TestFreezing(JitTestCase):
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
- return self. b
+ return self.b
@torch.jit.export
def modify_b(self, x):
@@ -422,15 +450,15 @@ class TestFreezing(JitTestCase):
# }
# }
- self.assertTrue(mf.hasattr('sub1'))
- self.assertTrue(mf.sub1.hasattr('a'))
- self.assertTrue(mf.sub1.hasattr('b'))
- self.assertFalse(mf.hasattr('a'))
- self.assertTrue(mf.hasattr('sub2'))
- self.assertTrue(mf.sub2.hasattr('sub'))
- self.assertFalse(mf.sub2.hasattr('b'))
- self.assertTrue(mf.sub2.sub.hasattr('a'))
- self.assertTrue(mf.sub2.sub.hasattr('b'))
+ self.assertTrue(mf.hasattr("sub1"))
+ self.assertTrue(mf.sub1.hasattr("a"))
+ self.assertTrue(mf.sub1.hasattr("b"))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertTrue(mf.hasattr("sub2"))
+ self.assertTrue(mf.sub2.hasattr("sub"))
+ self.assertFalse(mf.sub2.hasattr("b"))
+ self.assertTrue(mf.sub2.sub.hasattr("a"))
+ self.assertTrue(mf.sub2.sub.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
@@ -447,12 +475,13 @@ class TestFreezing(JitTestCase):
@torch.jit.export
def modify_a(self, x):
self.a[0] = 10
- return self. b
+ return self.b
@torch.jit.export
def modify_b(self, x):
self.b[0] = 20
return self.a
+
Sub = SubModule()
class SubModule2(nn.Module):
@@ -476,13 +505,15 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(TestModule())
m.eval()
mf = torch._C._freeze_module(m._c)
- self.assertTrue(mf.hasattr('sub1'))
- self.assertTrue(mf.sub1.hasattr('a'))
- self.assertFalse(mf.sub1.hasattr('b'))
- self.assertTrue(mf.hasattr('sub2'))
- self.assertTrue(mf.sub2.hasattr('sub'))
- self.assertTrue(mf.sub2.sub.hasattr('a')) # Freezing detects that self.sub2.sub.a and self.sub1.a are alias
- self.assertFalse(mf.sub2.sub.hasattr('b'))
+ self.assertTrue(mf.hasattr("sub1"))
+ self.assertTrue(mf.sub1.hasattr("a"))
+ self.assertFalse(mf.sub1.hasattr("b"))
+ self.assertTrue(mf.hasattr("sub2"))
+ self.assertTrue(mf.sub2.hasattr("sub"))
+ self.assertTrue(
+ mf.sub2.sub.hasattr("a")
+ ) # Freezing detects that self.sub2.sub.a and self.sub1.a are alias
+ self.assertFalse(mf.sub2.sub.hasattr("b"))
input = torch.randn(2, 2)
output_s = m.forward(input)
output_f = mf.forward(input)
@@ -503,12 +534,13 @@ class TestFreezing(JitTestCase):
@torch.jit.export
def modify_a(self, x):
self.a = 10.0
- return self. b
+ return self.b
@torch.jit.export
def modify_b(self, x):
self.b = 20.0
return self.a
+
Sub = SubModule()
class SubModule2(nn.Module):
@@ -528,15 +560,16 @@ class TestFreezing(JitTestCase):
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z
+
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c)
- self.assertTrue(mf.hasattr('sub1'))
- self.assertTrue(mf.sub1.hasattr('a'))
- self.assertFalse(mf.sub1.hasattr('b'))
+ self.assertTrue(mf.hasattr("sub1"))
+ self.assertTrue(mf.sub1.hasattr("a"))
+ self.assertFalse(mf.sub1.hasattr("b"))
# sub2 is fully folded becasue self.sub1 and self.sub2.sub are not alias (Scripting bug)
- self.assertFalse(mf.hasattr('sub2'))
+ self.assertFalse(mf.hasattr("sub2"))
input = torch.randn(2, 2)
output = m.forward(input)
output_s = ms.forward(input)
@@ -545,7 +578,6 @@ class TestFreezing(JitTestCase):
self.assertNotEqual(output, output_s)
self.assertEqual(output_s, output_f)
-
def test_freeze_module_with_preserve_sub_module(self):
class SubModule(nn.Module):
def __init__(self):
@@ -564,16 +596,17 @@ class TestFreezing(JitTestCase):
def forward(self, x):
return self.sub2(x) + self.sub1(x)
+
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c, ["sub1"])
# Test that 'sub1' is preserved entirely and 'sub2' is completely folded
- self.assertTrue(mf.hasattr('sub1'))
- self.assertTrue(mf.sub1.hasattr('a'))
- self.assertTrue(mf.sub1.hasattr('b'))
- self.assertFalse(mf.hasattr('sub2'))
+ self.assertTrue(mf.hasattr("sub1"))
+ self.assertTrue(mf.sub1.hasattr("a"))
+ self.assertTrue(mf.sub1.hasattr("b"))
+ self.assertFalse(mf.hasattr("sub2"))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
@@ -598,6 +631,7 @@ class TestFreezing(JitTestCase):
def forward(self, x):
return self.sub2(x) + self.sub1(x)
+
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
@@ -605,18 +639,17 @@ class TestFreezing(JitTestCase):
# Test that be both sub1 and sub1 are preserved and 'b' is preserved
# even if it is not used. To fulfill user request to preserve 'sub1'
- self.assertTrue(mf.hasattr('sub1'))
- self.assertTrue(mf.sub1.hasattr('a'))
- self.assertTrue(mf.sub1.hasattr('b'))
- self.assertTrue(mf.hasattr('sub2'))
- self.assertTrue(mf.sub2.hasattr('a'))
- self.assertTrue(mf.sub2.hasattr('b'))
+ self.assertTrue(mf.hasattr("sub1"))
+ self.assertTrue(mf.sub1.hasattr("a"))
+ self.assertTrue(mf.sub1.hasattr("b"))
+ self.assertTrue(mf.hasattr("sub2"))
+ self.assertTrue(mf.sub2.hasattr("a"))
+ self.assertTrue(mf.sub2.hasattr("b"))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
-
def test_freeze_module_with_helperfunction(self):
class SubModule(nn.Module):
def __init__(self):
@@ -640,14 +673,17 @@ class TestFreezing(JitTestCase):
def _forward(self, x):
return self.sub(x)
+
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
mf = torch._C._freeze_module(m._c)
- self.assertFalse(mf.hasattr('sub'))
- self.assertFalse(mf.hasattr('a'))
- self.assertTrue(mf.hasattr('b'))
- with self.assertRaisesRegex(AttributeError, "TestModule (.*) does not have a field with name '_forward'"):
+ self.assertFalse(mf.hasattr("sub"))
+ self.assertFalse(mf.hasattr("a"))
+ self.assertTrue(mf.hasattr("b"))
+ with self.assertRaisesRegex(
+ AttributeError, "TestModule (.*) does not have a field with name '_forward'"
+ ):
mf._forward(x) # noqa: F821
def test_freeze_module_with_inplace_mutable(self):
@@ -665,7 +701,7 @@ class TestFreezing(JitTestCase):
m = FreezeMe()
m.eval()
m_f = torch._C._freeze_module(m._c)
- self.assertTrue(m_f.hasattr('a'))
+ self.assertTrue(m_f.hasattr("a"))
m.forward(torch.tensor([3]))
out = m_f.forward(torch.tensor([5]))
expected = [11, 22, 0, 1, 2, 0, 1, 2]
@@ -694,7 +730,7 @@ class TestFreezing(JitTestCase):
v = m_s.a
v.append(5)
m_s.a = v
- self.assertFalse(m_f.hasattr('a'))
+ self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(torch.tensor([5]))
expected = [1, 2, 3, 4]
self.assertEqual(out, expected)
@@ -703,7 +739,7 @@ class TestFreezing(JitTestCase):
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
- self.a = {"layer" : "4"}
+ self.a = {"layer": "4"}
def forward(self, x):
return self.a
@@ -723,16 +759,16 @@ class TestFreezing(JitTestCase):
m_f = torch._C._freeze_module(m_s._c)
m.a["layer2"] += "2"
m_s.modify_a(t)
- self.assertFalse(m_f.hasattr('a'))
+ self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(t)
- expected = {"layer" : "411", "layer2" : "3"}
+ expected = {"layer": "411", "layer2": "3"}
self.assertEqual(out, expected)
def test_freeze_module_with_mutable_tensor(self):
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
- self.a = torch.tensor([1., 2., 3.])
+ self.a = torch.tensor([1.0, 2.0, 3.0])
def forward(self, x):
return self.a
@@ -745,9 +781,9 @@ class TestFreezing(JitTestCase):
# Post-freezing tensor attribute mutations affect m_f.
# FIXME: deep copy all folded attributes so that m_f has full ownership.
m_s.a[0] += 5.0
- self.assertFalse(m_f.hasattr('a'))
+ self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(torch.tensor([5]))
- expected = [6., 5., 3.]
+ expected = [6.0, 5.0, 3.0]
self.assertEqual(out, expected)
def test_freeze_module_with_tuple(self):
@@ -757,7 +793,7 @@ class TestFreezing(JitTestCase):
self.a = (torch.tensor([1, 2, 3, 4, 5, 6]), "hi")
def forward(self, x):
- if (x[0] == 2.0):
+ if x[0] == 2.0:
self.a[0][0] = 10
return self.a[0].sum()
@@ -768,7 +804,7 @@ class TestFreezing(JitTestCase):
expected = m_s.forward(inp)
m_s.a[0][0] = 1
m_f = torch._C._freeze_module(m_s._c)
- self.assertFalse(m_f.hasattr('a'))
+ self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(inp)
self.assertEqual(out, expected)
@@ -789,7 +825,7 @@ class TestFreezing(JitTestCase):
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('a'))
+ self.assertTrue(m_f.hasattr("a"))
m_f.a[0] -= 10
out = m_f.forward(inp)
self.assertEqual(out, expected)
@@ -811,7 +847,7 @@ class TestFreezing(JitTestCase):
expected = m_s.forward(inp)
m_s.a[0][1] -= 10
m_f = torch._C._freeze_module(m_s._c)
- self.assertFalse(m_f.hasattr('a'))
+ self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(inp)
self.assertEqual(out, expected)
@@ -830,7 +866,7 @@ class TestFreezing(JitTestCase):
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('a'))
+ self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = torch.tensor(51) # 1+2+3+14+15+16
@@ -841,7 +877,7 @@ class TestFreezing(JitTestCase):
def __init__(self):
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
- self.b = {"layer" : ([self.a.view(2, 3), torch.tensor([10])], 20)}
+ self.b = {"layer": ([self.a.view(2, 3), torch.tensor([10])], 20)}
self.c = ([self.a.view(2, 3), torch.tensor([10])], 20)
self.d = (self.a.view(2, 3), 20)
@@ -854,7 +890,9 @@ class TestFreezing(JitTestCase):
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
- with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
+ with self.assertRaisesRegex(
+ RuntimeError, "module contains attributes values that overlaps"
+ ):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_tensor_attr3(self):
@@ -874,8 +912,8 @@ class TestFreezing(JitTestCase):
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('a'))
- self.assertTrue(m_f.hasattr('b'))
+ self.assertTrue(m_f.hasattr("a"))
+ self.assertTrue(m_f.hasattr("b"))
out = m_f.forward(inp)
expected += 10 # account for self.a += 10.
self.assertEqual(out, expected)
@@ -897,7 +935,9 @@ class TestFreezing(JitTestCase):
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_s.a[0] -= 10
- with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
+ with self.assertRaisesRegex(
+ RuntimeError, "module contains attributes values that overlaps"
+ ):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_overlapping_attrs(self):
@@ -919,7 +959,9 @@ class TestFreezing(JitTestCase):
inp = torch.tensor([5])
expected = m_s.forward(inp)
a[0] -= 10
- with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
+ with self.assertRaisesRegex(
+ RuntimeError, "module contains attributes values that overlaps"
+ ):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_attr(self):
@@ -939,8 +981,8 @@ class TestFreezing(JitTestCase):
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# FIXME: It should be assertTrue. Currently scripting is making a copy for setting self.b (see #33034)
- self.assertFalse(m_f.hasattr('a'))
- self.assertFalse(m_f.hasattr('c'))
+ self.assertFalse(m_f.hasattr("a"))
+ self.assertFalse(m_f.hasattr("c"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m_s.forward(inp)
@@ -967,7 +1009,7 @@ class TestFreezing(JitTestCase):
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('a'))
+ self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
@@ -991,7 +1033,7 @@ class TestFreezing(JitTestCase):
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('a'))
+ self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
@@ -1001,7 +1043,7 @@ class TestFreezing(JitTestCase):
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
- self.a = torch.tensor([1., 2., 3.])
+ self.a = torch.tensor([1.0, 2.0, 3.0])
def forward(self, x):
return self
@@ -1009,7 +1051,9 @@ class TestFreezing(JitTestCase):
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
- with self.assertRaisesRegex(RuntimeError, "attempted to freeze a module that return itself"):
+ with self.assertRaisesRegex(
+ RuntimeError, "attempted to freeze a module that return itself"
+ ):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_inlining(self):
@@ -1041,7 +1085,6 @@ class TestFreezing(JitTestCase):
self.assertTrue(torch._C._jit_object_is_non_holding(obj))
def test_freeze_module_return_sub_module(self):
-
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
@@ -1054,10 +1097,9 @@ class TestFreezing(JitTestCase):
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
- self.assertTrue(m_f.hasattr('conv1'))
+ self.assertTrue(m_f.hasattr("conv1"))
def test_freeze_module_no_forward(self):
-
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
@@ -1070,13 +1112,11 @@ class TestFreezing(JitTestCase):
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
- m_f = torch._C._freeze_module(m_s._c, preservedAttrs=['foo'])
+ m_f = torch._C._freeze_module(m_s._c, preservedAttrs=["foo"])
input = torch.ones(10)
self.assertEqual(m_s.foo(input), m_f.foo(input))
-
def test_freeze_no_forward(self):
-
class FreezeMe(nn.Module):
def __init__(self):
super().__init__()
@@ -1089,11 +1129,10 @@ class TestFreezing(JitTestCase):
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
- m_f = torch.jit.freeze(m_s, preserved_attrs=['foo'])
+ m_f = torch.jit.freeze(m_s, preserved_attrs=["foo"])
input = torch.ones(10)
self.assertEqual(m_s.foo(input), m_f.foo(input))
-
def test_freeze_module_in_training_mode(self):
class Net(nn.Module):
def __init__(self):
@@ -1174,50 +1213,51 @@ class TestFreezing(JitTestCase):
# }
# ...
# }
- self.assertFalse(mTrain_freezed.hasattr('training'))
- self.assertTrue(mTrain_freezed.hasattr('conv1'))
- self.assertFalse(mTrain_freezed.conv1.hasattr('training'))
- self.assertTrue(mTrain_freezed.conv1.hasattr('weight'))
- self.assertTrue(mTrain_freezed.conv1.hasattr('bias'))
- self.assertTrue(mTrain_freezed.hasattr('conv2'))
- self.assertFalse(mTrain_freezed.conv2.hasattr('training'))
- self.assertTrue(mTrain_freezed.conv2.hasattr('weight'))
- self.assertTrue(mTrain_freezed.conv2.hasattr('bias'))
- self.assertTrue(mTrain_freezed.hasattr('dropout1'))
- self.assertTrue(mTrain_freezed.dropout1.hasattr('training'))
- self.assertTrue(mTrain_freezed.hasattr('dropout2'))
- self.assertTrue(mTrain_freezed.dropout2.hasattr('training'))
- self.assertTrue(mTrain_freezed.hasattr('fc1'))
- self.assertTrue(mTrain_freezed.fc1.hasattr('weight'))
- self.assertTrue(mTrain_freezed.fc1.hasattr('bias'))
- self.assertTrue(mTrain_freezed.hasattr('fc2'))
- self.assertTrue(mTrain_freezed.fc2.hasattr('weight'))
- self.assertTrue(mTrain_freezed.fc2.hasattr('bias'))
+ self.assertFalse(mTrain_freezed.hasattr("training"))
+ self.assertTrue(mTrain_freezed.hasattr("conv1"))
+ self.assertFalse(mTrain_freezed.conv1.hasattr("training"))
+ self.assertTrue(mTrain_freezed.conv1.hasattr("weight"))
+ self.assertTrue(mTrain_freezed.conv1.hasattr("bias"))
+ self.assertTrue(mTrain_freezed.hasattr("conv2"))
+ self.assertFalse(mTrain_freezed.conv2.hasattr("training"))
+ self.assertTrue(mTrain_freezed.conv2.hasattr("weight"))
+ self.assertTrue(mTrain_freezed.conv2.hasattr("bias"))
+ self.assertTrue(mTrain_freezed.hasattr("dropout1"))
+ self.assertTrue(mTrain_freezed.dropout1.hasattr("training"))
+ self.assertTrue(mTrain_freezed.hasattr("dropout2"))
+ self.assertTrue(mTrain_freezed.dropout2.hasattr("training"))
+ self.assertTrue(mTrain_freezed.hasattr("fc1"))
+ self.assertTrue(mTrain_freezed.fc1.hasattr("weight"))
+ self.assertTrue(mTrain_freezed.fc1.hasattr("bias"))
+ self.assertTrue(mTrain_freezed.hasattr("fc2"))
+ self.assertTrue(mTrain_freezed.fc2.hasattr("weight"))
+ self.assertTrue(mTrain_freezed.fc2.hasattr("bias"))
model.eval()
mEval_freezed = torch._C._freeze_module(model._c)
- self.assertFalse(mEval_freezed.hasattr('conv1'))
- self.assertFalse(mEval_freezed.hasattr('conv2'))
- self.assertFalse(mEval_freezed.hasattr('dropout1'))
- self.assertFalse(mEval_freezed.hasattr('training'))
- self.assertFalse(mEval_freezed.hasattr('fc1'))
- self.assertFalse(mEval_freezed.hasattr('dropout2'))
- self.assertFalse(mEval_freezed.hasattr('fc2'))
- with self.assertRaisesRegex(AttributeError, "does not have a field with name 'state_dict'"):
+ self.assertFalse(mEval_freezed.hasattr("conv1"))
+ self.assertFalse(mEval_freezed.hasattr("conv2"))
+ self.assertFalse(mEval_freezed.hasattr("dropout1"))
+ self.assertFalse(mEval_freezed.hasattr("training"))
+ self.assertFalse(mEval_freezed.hasattr("fc1"))
+ self.assertFalse(mEval_freezed.hasattr("dropout2"))
+ self.assertFalse(mEval_freezed.hasattr("fc2"))
+ with self.assertRaisesRegex(
+ AttributeError, "does not have a field with name 'state_dict'"
+ ):
print(mEval_freezed.state_dict())
buffer = io.BytesIO()
torch.jit.save(mEval_freezed, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
- FileCheck().check_not('GetAttr[name=') \
- .run(m._c._get_method('forward').graph)
+ FileCheck().check_not("GetAttr[name=").run(m._c._get_method("forward").graph)
m2 = torch._C._freeze_module(model._c, preserveParameters=True)
- self.assertTrue(m2.hasattr('conv1'))
- self.assertTrue(m2.hasattr('conv2'))
- self.assertFalse(m2.hasattr('dropout1'))
- self.assertFalse(m2.hasattr('training'))
- self.assertTrue(m2.hasattr('fc1'))
- self.assertFalse(m2.hasattr('dropout2'))
- self.assertTrue(m2.hasattr('fc2'))
+ self.assertTrue(m2.hasattr("conv1"))
+ self.assertTrue(m2.hasattr("conv2"))
+ self.assertFalse(m2.hasattr("dropout1"))
+ self.assertFalse(m2.hasattr("training"))
+ self.assertTrue(m2.hasattr("fc1"))
+ self.assertFalse(m2.hasattr("dropout2"))
+ self.assertTrue(m2.hasattr("fc2"))
def test_freeze_module_detach_gradient(self):
mod = nn.Conv2d(8, 3, 4, 2, 1)
@@ -1227,7 +1267,7 @@ class TestFreezing(JitTestCase):
fmod = torch._C._freeze_module(smod._c)
self.assertTrue(mod.weight.requires_grad)
self.assertTrue(smod.weight.requires_grad)
- self.assertFalse(fmod.hasattr('weight'))
+ self.assertFalse(fmod.hasattr("weight"))
inp = torch.ones(1, 8, 32, 32)
out1 = fmod.forward(inp)
# FIXME: frozen module mutated from outside (original module).
@@ -1329,15 +1369,15 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(Module())
m.eval()
- m = torch.jit.freeze(m, preserved_attrs=['sub1.a', 'sub2.a'])
+ m = torch.jit.freeze(m, preserved_attrs=["sub1.a", "sub2.a"])
fm = m._c
- self.assertTrue(fm.hasattr('sub1'))
- self.assertTrue(fm.sub1.hasattr('a'))
- self.assertFalse(fm.sub1.hasattr('b'))
- self.assertTrue(fm.hasattr('sub2'))
- self.assertTrue(fm.sub2.hasattr('a'))
- self.assertFalse(fm.sub2.hasattr('b'))
+ self.assertTrue(fm.hasattr("sub1"))
+ self.assertTrue(fm.sub1.hasattr("a"))
+ self.assertFalse(fm.sub1.hasattr("b"))
+ self.assertTrue(fm.hasattr("sub2"))
+ self.assertTrue(fm.sub2.hasattr("a"))
+ self.assertFalse(fm.sub2.hasattr("b"))
self.assertEqual(m(), 6)
m.sub1.a += 1
self.assertEqual(m(), 7)
@@ -1366,12 +1406,12 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(Module())
m.eval()
- fm = torch.jit.freeze(m, preserved_attrs=['sub.a', 'sub.method_a'])._c
+ fm = torch.jit.freeze(m, preserved_attrs=["sub.a", "sub.method_a"])._c
- self.assertTrue(fm.hasattr('sub'))
- self.assertTrue(fm.sub.hasattr('a'))
- self.assertFalse(fm.sub.hasattr('b'))
- self.assertTrue(fm.sub._has_method('method_a'))
+ self.assertTrue(fm.hasattr("sub"))
+ self.assertTrue(fm.sub.hasattr("a"))
+ self.assertFalse(fm.sub.hasattr("b"))
+ self.assertTrue(fm.sub._has_method("method_a"))
def test_freeze_module_with_user_preserved_method_on_submodule(self):
class SubModule(nn.Module):
@@ -1394,11 +1434,11 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(Module())
m.eval()
- fm = torch.jit.freeze(m, preserved_attrs=['sub.method_a'])._c
+ fm = torch.jit.freeze(m, preserved_attrs=["sub.method_a"])._c
- self.assertTrue(fm.hasattr('sub'))
- self.assertTrue(fm.sub._has_method('method_a'))
- self.assertFalse(fm.sub._has_method('method_b'))
+ self.assertTrue(fm.hasattr("sub"))
+ self.assertTrue(fm.sub._has_method("method_a"))
+ self.assertFalse(fm.sub._has_method("method_b"))
@skipIfNoFBGEMM
def test_module_with_shared_type_instances(self):
@@ -1436,7 +1476,7 @@ class TestFreezing(JitTestCase):
torch.ao.quantization.convert(qModel, inplace=True)
return model
- with override_quantized_engine('fbgemm'):
+ with override_quantized_engine("fbgemm"):
data = torch.randn(4, 1, 4, 4, dtype=torch.float32)
m = Parent().to(torch.float32)
m = _static_quant(m)
@@ -1445,7 +1485,9 @@ class TestFreezing(JitTestCase):
torch._C._jit_pass_inline(m.graph)
m_frozen = wrap_cpp_module(torch._C._freeze_module(m._c))
# Earlier bug resulted in _packed_params set to false.
- FileCheck().check_not('_packed_params = False').run(m_frozen._c.dump_to_str(True, True, False))
+ FileCheck().check_not("_packed_params = False").run(
+ m_frozen._c.dump_to_str(True, True, False)
+ )
m_res = m(data)
# It used to segfault while running frozen module.
@@ -1483,6 +1525,7 @@ class TestFreezing(JitTestCase):
Test that Modules containing non-static ModuleDict or ModuleList
indexing cannot be frozen.
"""
+
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
@@ -1506,7 +1549,10 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(ModWithDict())
m.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing modules containing prim::ModuleContainerIndex is not supported"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Freezing modules containing prim::ModuleContainerIndex is not supported",
+ ):
mf = torch._C._freeze_module(m._c)
class ModWithList(torch.nn.Module):
@@ -1520,7 +1566,10 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(ModWithList())
m.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing modules containing prim::ModuleContainerIndex is not supported"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Freezing modules containing prim::ModuleContainerIndex is not supported",
+ ):
mf = torch._C._freeze_module(m._c)
def test_freeze_with_interface_mutable(self):
@@ -1591,7 +1640,9 @@ class TestFreezing(JitTestCase):
m = torch.jit.script(WrapperModule())
m.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing does not support SetAttr on an interface type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Freezing does not support SetAttr on an interface type"
+ ):
m_frozen = torch.jit.freeze(m)
def test_freeze_recursive_interfaces(self):
@@ -1663,7 +1714,6 @@ class TestFreezing(JitTestCase):
def forward(self, inp):
return inp.cos() * self.x
-
class InnerImpl2(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1704,7 +1754,9 @@ class TestFreezing(JitTestCase):
m_s = torch.jit.script(m)
m_s.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing does not support SetAttr on an interface type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Freezing does not support SetAttr on an interface type"
+ ):
m_s = torch.jit.freeze(m_s)
def test_freeze_interface_swapping_two_methods(self):
@@ -1771,10 +1823,14 @@ class TestFreezing(JitTestCase):
m1.eval()
m2.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing does not support SetAttr on an interface type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Freezing does not support SetAttr on an interface type"
+ ):
torch.jit.freeze(m1, preserved_attrs=["other_method"])
- with self.assertRaisesRegex(RuntimeError, "Freezing does not support SetAttr on an interface type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Freezing does not support SetAttr on an interface type"
+ ):
torch.jit.freeze(m2, preserved_attrs=["other_method"])
def test_freeze_recursive_interfaces_same_name(self):
@@ -1919,12 +1975,12 @@ class TestFreezing(JitTestCase):
class MyModule(torch.nn.Module):
__annotations__ = {
- 'box_coder': BoxCoder,
+ "box_coder": BoxCoder,
}
def __init__(self):
super().__init__()
- self.box_coder = BoxCoder(50.)
+ self.box_coder = BoxCoder(50.0)
def forward(self, input):
return self.box_coder.decode(input)
@@ -1990,6 +2046,7 @@ class TestFreezing(JitTestCase):
mod.forward(x), unscripted_mod.forward(x), atol=1e-5, rtol=1e-5
)
+
@skipIfTorchDynamo("somehow causing hanging during python shutdown")
class TestFrozenOptimizations(JitTestCase):
def setUp(self):
@@ -2003,16 +2060,27 @@ class TestFrozenOptimizations(JitTestCase):
def test_conv_bn_folding(self):
conv_bias = [True, False]
- module_pairs = [(nn.Conv1d, nn.BatchNorm1d), (nn.Conv2d, nn.BatchNorm2d), (nn.Conv3d, nn.BatchNorm3d)]
+ module_pairs = [
+ (nn.Conv1d, nn.BatchNorm1d),
+ (nn.Conv2d, nn.BatchNorm2d),
+ (nn.Conv3d, nn.BatchNorm3d),
+ ]
use_tracing = [True, False]
bn_running_stats = [True, False]
- for use_bias, modules, tracing, track_stats in product(conv_bias, module_pairs, use_tracing, bn_running_stats):
+ for use_bias, modules, tracing, track_stats in product(
+ conv_bias, module_pairs, use_tracing, bn_running_stats
+ ):
+
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
- self.conv = modules[0](in_channels, out_channels, bias=use_bias, **kwargs)
- self.bn = modules[1](out_channels, eps=0.001, track_running_stats=track_stats)
+ self.conv = modules[0](
+ in_channels, out_channels, bias=use_bias, **kwargs
+ )
+ self.bn = modules[1](
+ out_channels, eps=0.001, track_running_stats=track_stats
+ )
def forward(self, x):
x = self.conv(x)
@@ -2045,9 +2113,13 @@ class TestFrozenOptimizations(JitTestCase):
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_conv_bn", scripted_mod.graph)
if track_stats:
- FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("conv").check_not("aten::batch_norm").run(
+ scripted_mod.graph
+ )
else:
- FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("conv").check("aten::batch_norm").run(
+ scripted_mod.graph
+ )
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
@@ -2056,7 +2128,9 @@ class TestFrozenOptimizations(JitTestCase):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
- self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=True, **kwargs)
+ self.conv = torch.nn.Conv2d(
+ in_channels, out_channels, bias=True, **kwargs
+ )
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
self.amt = 3.2
@@ -2071,11 +2145,17 @@ class TestFrozenOptimizations(JitTestCase):
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).eval()
scripted_mod = torch.jit.script(mod_eager)
torch._C._jit_pass_inline(scripted_mod.make_prediction.graph)
- FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.make_prediction.graph)
+ FileCheck().check("conv").check("aten::batch_norm").run(
+ scripted_mod.make_prediction.graph
+ )
# _jit_pass_optimize_frozen_graph should not be called on non-method attributes (e.g. "amt")
- scripted_mod = torch.jit.freeze(scripted_mod, preserved_attrs=["make_prediction", "amt"])
- FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.make_prediction.graph)
+ scripted_mod = torch.jit.freeze(
+ scripted_mod, preserved_attrs=["make_prediction", "amt"]
+ )
+ FileCheck().check("conv").check_not("aten::batch_norm").run(
+ scripted_mod.make_prediction.graph
+ )
# During freezing this creates tensors constants that are attached to the frozen graph,
# which is then kept alive by the compilation unit (which causes a leak)
@@ -2088,8 +2168,12 @@ class TestFrozenOptimizations(JitTestCase):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
- self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=False, dtype=torch.half, **kwargs)
- self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001, dtype=torch.float)
+ self.conv = torch.nn.Conv2d(
+ in_channels, out_channels, bias=False, dtype=torch.half, **kwargs
+ )
+ self.bn = torch.nn.BatchNorm2d(
+ out_channels, eps=0.001, dtype=torch.float
+ )
def forward(self, x):
return self.bn(self.conv(x))
@@ -2110,27 +2194,35 @@ class TestFrozenOptimizations(JitTestCase):
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
def test_conv_add_folding(self):
-
@torch.no_grad()
- def test_conv_fusion(use_bias, module, tracing, op, scalar, add_tensor, expect_success):
-
+ def test_conv_fusion(
+ use_bias, module, tracing, op, scalar, add_tensor, expect_success
+ ):
class ConvOp(torch.nn.Module):
- __constants__ = ['use_scalar']
+ __constants__ = ["use_scalar"]
def __init__(self, in_channels, out_channels, tensor=None, **kwargs):
super().__init__()
- self.conv = module(in_channels, out_channels, bias=use_bias, **kwargs)
- self.conv2 = module(in_channels, out_channels, bias=use_bias, **kwargs)
+ self.conv = module(
+ in_channels, out_channels, bias=use_bias, **kwargs
+ )
+ self.conv2 = module(
+ in_channels, out_channels, bias=use_bias, **kwargs
+ )
self.use_scalar = scalar
tensor_size = [1 for _ in range(self.conv.weight.ndim)]
tensor_size[1] = self.conv.weight.size(0)
- self.tensor = add_tensor if add_tensor is not None else torch.rand(tensor_size)
+ self.tensor = (
+ add_tensor
+ if add_tensor is not None
+ else torch.rand(tensor_size)
+ )
self.op = op
def forward(self, x):
x = self.conv(x)
if self.use_scalar:
- return self.op(x, 2.)
+ return self.op(x, 2.0)
else:
return self.op(x, self.tensor)
@@ -2143,7 +2235,6 @@ class TestFrozenOptimizations(JitTestCase):
inps.append(inps[-1])
inps.append(inps[-1])
-
inp = torch.rand(inps)
if tracing:
@@ -2177,25 +2268,55 @@ class TestFrozenOptimizations(JitTestCase):
use_scalar = [False, True]
ops = [torch.add, torch.sub, torch.mul, torch.div]
- for use_bias, module, tracing, pytorch_op, scalar in product(conv_bias, modules, use_tracing, ops, use_scalar):
- test_conv_fusion(use_bias, module, tracing, pytorch_op, scalar, add_tensor=None, expect_success=True)
-
+ for use_bias, module, tracing, pytorch_op, scalar in product(
+ conv_bias, modules, use_tracing, ops, use_scalar
+ ):
+ test_conv_fusion(
+ use_bias,
+ module,
+ tracing,
+ pytorch_op,
+ scalar,
+ add_tensor=None,
+ expect_success=True,
+ )
for use_bias, pytorch_op in product(conv_bias, ops):
# broadcasting add
- test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False,
- add_tensor=torch.rand(32, 1, 32), expect_success=False)
+ test_conv_fusion(
+ use_bias,
+ nn.Conv2d,
+ False,
+ pytorch_op,
+ False,
+ add_tensor=torch.rand(32, 1, 32),
+ expect_success=False,
+ )
# broadcasting add
- test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False, add_tensor=torch.rand(1, 1), expect_success=True)
+ test_conv_fusion(
+ use_bias,
+ nn.Conv2d,
+ False,
+ pytorch_op,
+ False,
+ add_tensor=torch.rand(1, 1),
+ expect_success=True,
+ )
# add with different dtype
- test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False,
- add_tensor=torch.tensor([2]).to(torch.int), expect_success=True)
+ test_conv_fusion(
+ use_bias,
+ nn.Conv2d,
+ False,
+ pytorch_op,
+ False,
+ add_tensor=torch.tensor([2]).to(torch.int),
+ expect_success=True,
+ )
def test_conv_mul_add_bn(self):
class Conv_Mul_Add_Bn(nn.Module):
-
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
@@ -2204,7 +2325,9 @@ class TestFrozenOptimizations(JitTestCase):
self.tensor2 = torch.tensor(2)
def forward(self, x):
- return self.bn(torch.add(torch.mul(self.conv(x), self.tensor1), self.tensor2))
+ return self.bn(
+ torch.add(torch.mul(self.conv(x), self.tensor1), self.tensor2)
+ )
input = torch.randn(8, 3, 64, 64)
model = Conv_Mul_Add_Bn(3, 32, kernel_size=3, stride=1).eval()
@@ -2215,20 +2338,31 @@ class TestFrozenOptimizations(JitTestCase):
traced_model = torch.jit.freeze(traced_model)
tresult = traced_model(input)
self.assertEqual(result, tresult)
- FileCheck().check("conv").check_not("aten::batch_norm").run(traced_model.graph)
+ FileCheck().check("conv").check_not("aten::batch_norm").run(
+ traced_model.graph
+ )
FileCheck().check("conv").check_not("aten::add").run(traced_model.graph)
def test_linear_bn_folding(self):
- module_pairs = [(nn.Linear, nn.BatchNorm1d), (nn.Linear, nn.BatchNorm2d), (nn.Linear, nn.BatchNorm3d)]
+ module_pairs = [
+ (nn.Linear, nn.BatchNorm1d),
+ (nn.Linear, nn.BatchNorm2d),
+ (nn.Linear, nn.BatchNorm3d),
+ ]
use_tracing = [True, False]
bn_running_stats = [True, False]
- for modules, tracing, track_stats in product(module_pairs, use_tracing, bn_running_stats):
+ for modules, tracing, track_stats in product(
+ module_pairs, use_tracing, bn_running_stats
+ ):
+
class LinearBN(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = modules[0](in_features, out_features)
- self.bn = modules[1](out_features, eps=0.001, track_running_stats=track_stats)
+ self.bn = modules[1](
+ out_features, eps=0.001, track_running_stats=track_stats
+ )
def forward(self, x):
x = self.linear(x)
@@ -2259,20 +2393,30 @@ class TestFrozenOptimizations(JitTestCase):
FileCheck().check("linear").check("batch").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
- FileCheck().check("linear").check("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("linear").check("aten::batch_norm").run(
+ scripted_mod.graph
+ )
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
if track_stats:
- FileCheck().check("linear").check_not("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("linear").check_not("aten::batch_norm").run(
+ scripted_mod.graph
+ )
else:
- FileCheck().check("linear").check("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("linear").check("aten::batch_norm").run(
+ scripted_mod.graph
+ )
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
def test_bn_not_broadcast_with_linear(self):
- module_pairs = [(nn.Linear, nn.BatchNorm1d), (nn.Linear, nn.BatchNorm2d), (nn.Linear, nn.BatchNorm3d)]
+ module_pairs = [
+ (nn.Linear, nn.BatchNorm1d),
+ (nn.Linear, nn.BatchNorm2d),
+ (nn.Linear, nn.BatchNorm3d),
+ ]
use_tracing = [True, False]
linear_in = 3
# (linear_out, bn_in)
@@ -2316,7 +2460,9 @@ class TestFrozenOptimizations(JitTestCase):
FileCheck().check("linear").check("batch").run(scripted_mod.graph)
self.run_pass("fold_frozen_linear_bn", scripted_mod.graph)
- FileCheck().check("linear").check("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("linear").check("aten::batch_norm").run(
+ scripted_mod.graph
+ )
frozen_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_linear_bn", frozen_mod.graph)
@@ -2327,21 +2473,33 @@ class TestFrozenOptimizations(JitTestCase):
self.assertEqual(mod_eager(inp), frozen_mod(inp))
# successfully failed folding
- with self.assertRaisesRegex(AssertionError, "To fuse, linear.out_features == bn.num_features or bn.num_features == 1"):
+ with self.assertRaisesRegex(
+ AssertionError,
+ "To fuse, linear.out_features == bn.num_features or bn.num_features == 1",
+ ):
nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
@skipCUDAMemoryLeakCheckIf(True)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_bn_folding_autocast_scenario_cuda(self):
- module_pairs = [(nn.Linear, nn.BatchNorm1d), (nn.Linear, nn.BatchNorm2d), (nn.Linear, nn.BatchNorm3d)]
+ module_pairs = [
+ (nn.Linear, nn.BatchNorm1d),
+ (nn.Linear, nn.BatchNorm2d),
+ (nn.Linear, nn.BatchNorm3d),
+ ]
use_tracing = [True, False]
bn_running_stats = [True, False]
- for modules, tracing, track_stats in product(module_pairs, use_tracing, bn_running_stats):
+ for modules, tracing, track_stats in product(
+ module_pairs, use_tracing, bn_running_stats
+ ):
+
class LinearBN(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
- self.linear = modules[0](in_features, out_features, bias=False, dtype=torch.half)
+ self.linear = modules[0](
+ in_features, out_features, bias=False, dtype=torch.half
+ )
self.bn = modules[1](out_features, eps=0.001, dtype=torch.float)
def forward(self, x):
@@ -2366,7 +2524,9 @@ class TestFrozenOptimizations(JitTestCase):
else:
scripted_mod = torch.jit.script(mod_eager)
scripted_mod = torch.jit.freeze(scripted_mod)
- FileCheck().check("linear").check_not("aten::batch_norm").run(scripted_mod.graph)
+ FileCheck().check("linear").check_not("aten::batch_norm").run(
+ scripted_mod.graph
+ )
lin_node = scripted_mod.graph.findNode("aten::linear", True)
self.assertTrue(lin_node is not None)
weight_input = lin_node.namedInput("weight")
@@ -2383,6 +2543,7 @@ class TestFrozenOptimizations(JitTestCase):
out_dimms = [[5, 10], [1, 5]]
for w1_dim, w2_dim in out_dimms:
+
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super().__init__()
@@ -2399,14 +2560,15 @@ class TestFrozenOptimizations(JitTestCase):
mod_eager = ModMultLinear(w1_dim, w2_dim).eval()
test_val1 = torch.rand([50, 5])
- self.check_linear_optimizations(mod_eager, 2, 1, (test_val1, ))
+ self.check_linear_optimizations(mod_eager, 2, 1, (test_val1,))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_complex(self):
"""
- Testing that the interleaving of multiple optimizations does not
- cause errors, and gets optimized as expected
+ Testing that the interleaving of multiple optimizations does not
+ cause errors, and gets optimized as expected
"""
+
class ModMultLinear(nn.Module):
def __init__(self):
super().__init__()
@@ -2426,7 +2588,7 @@ class TestFrozenOptimizations(JitTestCase):
mod_eager = ModMultLinear().eval()
test_val1 = torch.rand([50, 5])
- self.check_linear_optimizations(mod_eager, 4, 2, (test_val1, ))
+ self.check_linear_optimizations(mod_eager, 4, 2, (test_val1,))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_different_input(self):
@@ -2479,11 +2641,15 @@ class TestFrozenOptimizations(JitTestCase):
test_val2 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 4, 3, (test_val1, test_val2, True))
- def check_linear_optimizations(self, eager_mod, orig_linears, new_linears, test_vals):
+ def check_linear_optimizations(
+ self, eager_mod, orig_linears, new_linears, test_vals
+ ):
for is_cuda in [False, True]:
if is_cuda:
mod_to_device = eager_mod.cuda()
- test_vals_to_device = [t.cuda() if isinstance(t, torch.Tensor) else t for t in test_vals]
+ test_vals_to_device = [
+ t.cuda() if isinstance(t, torch.Tensor) else t for t in test_vals
+ ]
else:
mod_to_device = eager_mod
test_vals_to_device = test_vals
@@ -2491,29 +2657,42 @@ class TestFrozenOptimizations(JitTestCase):
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
- FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
+ FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
+ op_graph
+ )
# successively no-ops with non-const inputs
self.run_pass("concat_frozen_linear", op_graph)
- FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
+ FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
+ op_graph
+ )
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass("concat_frozen_linear", op_graph)
if is_cuda:
- FileCheck().check_count("aten::linear", new_linears, exactly=True).run(op_graph)
+ FileCheck().check_count("aten::linear", new_linears, exactly=True).run(
+ op_graph
+ )
else:
- FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
-
- self.assertEqual(mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device))
+ FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
+ op_graph
+ )
+ self.assertEqual(
+ mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device)
+ )
def test_optimize_freeze_module(self):
in_channels, out_channels = 3, 32
- conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
- bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
+ conv = torch.nn.Conv2d(
+ in_channels, out_channels, kernel_size=3, stride=2, bias=True
+ )
+ bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
mod = torch.nn.Sequential(conv, bn)
# set optimize to False here, by default freezing runs run_frozen_optimizations
- frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize_numerics=False)
+ frozen_mod = torch.jit.freeze(
+ torch.jit.script(mod.eval()), optimize_numerics=False
+ )
# inspect frozen mod
FileCheck().check("batch_norm").run(frozen_mod.graph)
torch.jit.run_frozen_optimizations(frozen_mod)
@@ -2565,7 +2744,9 @@ class TestFrozenOptimizations(JitTestCase):
output_f = frozen_mod.forward(input)
self.assertEqual(output_s, output_f)
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_freeze_mkdlnn(self):
conv = torch.nn.Conv2d(3, 32, kernel_size=3, stride=2).eval().float()
convmkl = mkldnn_utils.to_mkldnn(conv)
@@ -2573,7 +2754,9 @@ class TestFrozenOptimizations(JitTestCase):
inp = torch.rand([4, 3, 4, 4]).float()
self.assertEqual(out(inp.to_mkldnn()).to_dense(), conv(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_conv_to_mkldnn(self):
with set_default_dtype(torch.float):
for module, trace in product([nn.Conv2d, nn.Conv3d], [False, True]):
@@ -2600,7 +2783,9 @@ class TestFrozenOptimizations(JitTestCase):
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
- FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check("to_dense").run(scripted_mod.graph)
+ FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check(
+ "to_dense"
+ ).run(scripted_mod.graph)
self.assertEqual(mod(inp), scripted_mod(inp))
self.assertEqual(mod(inp), scripted_mod(inp))
@@ -2617,7 +2802,9 @@ class TestFrozenOptimizations(JitTestCase):
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
- self.check_linear_optimizations_2(mod_eager, 1, 0, "transpose_frozen_linear", (test_val,))
+ self.check_linear_optimizations_2(
+ mod_eager, 1, 0, "transpose_frozen_linear", (test_val,)
+ )
def test_linear_non_constant_weight(self):
class ModLinear(torch.nn.Module):
@@ -2631,9 +2818,13 @@ class TestFrozenOptimizations(JitTestCase):
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
test_weight = torch.rand([30, 20])
- self.check_linear_optimizations_2(mod_eager, 1, 1, "transpose_frozen_linear", (test_val, test_weight))
+ self.check_linear_optimizations_2(
+ mod_eager, 1, 1, "transpose_frozen_linear", (test_val, test_weight)
+ )
- def check_linear_optimizations_2(self, eager_mod, orig_linears, new_linears, opt_pass, test_vals):
+ def check_linear_optimizations_2(
+ self, eager_mod, orig_linears, new_linears, opt_pass, test_vals
+ ):
# TODO: merge with check_linear_optimizations once both diffs land
mod_to_device = eager_mod
test_vals_to_device = test_vals
@@ -2641,43 +2832,52 @@ class TestFrozenOptimizations(JitTestCase):
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
- FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
+ FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
+ op_graph
+ )
# successively no-ops with non-const inputs
self.run_pass(opt_pass, op_graph)
- FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
+ FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(
+ op_graph
+ )
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass(opt_pass, op_graph)
FileCheck().check_count("aten::linear", new_linears, exactly=True).run(op_graph)
- self.assertEqual(mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device))
+ self.assertEqual(
+ mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device)
+ )
@staticmethod
def conv():
# Generic composable conv for testing purposes
return nn.Conv2d(8, 8, 1)
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_collapse_adjacent_conversions(self):
-
with set_default_dtype(torch.float):
mod = nn.Sequential(self.conv(), self.conv()).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
- FileCheck().check("to_mkldnn") \
- .check("prim::mkldnn_convolution") \
- .check("prim::mkldnn_convolution") \
- .check("to_dense") \
- .run(scripted_mod.graph)
- FileCheck().check_count("to_mkldnn", 1, exactly=True).run(scripted_mod.graph)
+ FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check(
+ "prim::mkldnn_convolution"
+ ).check("to_dense").run(scripted_mod.graph)
+ FileCheck().check_count("to_mkldnn", 1, exactly=True).run(
+ scripted_mod.graph
+ )
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_mkldnn_fuser_broadcasting(self):
class Add(nn.Module):
def __init__(self, tensor):
@@ -2693,7 +2893,9 @@ class TestFrozenOptimizations(JitTestCase):
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
- FileCheck().check("prim::BroadcastMKLDNNTensors").run(scripted_mod.graph)
+ FileCheck().check("prim::BroadcastMKLDNNTensors").run(
+ scripted_mod.graph
+ )
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
@@ -2701,9 +2903,14 @@ class TestFrozenOptimizations(JitTestCase):
# for good measure, check that broadcasting does not work without this op
# so we can remove the op if it ever gets supported
with self.assertRaisesRegex(RuntimeError, ""):
- torch.rand([1, 8, 8, 8]).to_mkldnn() + torch.rand(add_inp).to_mkldnn()
+ (
+ torch.rand([1, 8, 8, 8]).to_mkldnn()
+ + torch.rand(add_inp).to_mkldnn()
+ )
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_mkldnn_inplace_removal(self):
class AddMul(nn.Module):
def __init__(self, tensor):
@@ -2719,19 +2926,35 @@ class TestFrozenOptimizations(JitTestCase):
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
# add gets uninplaced and reinplaced
- FileCheck().check("aten::to_mkldnn").check("aten::add_").check("aten::div_").run(scripted_mod.graph)
+ FileCheck().check("aten::to_mkldnn").check("aten::add_").check(
+ "aten::div_"
+ ).run(scripted_mod.graph)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
@skipIfNoTorchVision
def test_maxpool_mkldnn(self):
with set_default_dtype(torch.float):
model = torchvision.models.resnet18()
- sub_model = torch.nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool)
+ sub_model = torch.nn.Sequential(
+ model.conv1, model.bn1, model.relu, model.maxpool
+ )
mod = torch.jit.freeze(torch.jit.script(sub_model.eval()))
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("max_pool").check("to_dense").run(mod.graph)
@@ -2755,11 +2978,16 @@ class TestFrozenOptimizations(JitTestCase):
conv_ops = [nn.Conv2d, nn.Conv3d]
use_add_z = [True, False]
use_tracing = [True, False]
- for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, use_add_z, use_tracing):
+ for use_bias, conv, add_z, tracing in product(
+ conv_bias, conv_ops, use_add_z, use_tracing
+ ):
+
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
- self.conv = conv(in_channels, out_channels, bias=use_bias, **kwargs)
+ self.conv = conv(
+ in_channels, out_channels, bias=use_bias, **kwargs
+ )
self.relu = nn.ReLU(inplace=True)
self.add_z = add_z
@@ -2786,24 +3014,35 @@ class TestFrozenOptimizations(JitTestCase):
frozen_mod = torch.jit.optimize_for_inference(scripted_mod)
if TEST_WITH_ROCM:
if add_z:
- FileCheck().check("aten::miopen_convolution_add_relu").run(frozen_mod.graph)
+ FileCheck().check("aten::miopen_convolution_add_relu").run(
+ frozen_mod.graph
+ )
else:
- FileCheck().check("aten::miopen_convolution_relu").run(frozen_mod.graph)
+ FileCheck().check("aten::miopen_convolution_relu").run(
+ frozen_mod.graph
+ )
else:
if add_z:
- FileCheck().check("aten::cudnn_convolution_add_relu").run(frozen_mod.graph)
+ FileCheck().check("aten::cudnn_convolution_add_relu").run(
+ frozen_mod.graph
+ )
else:
- FileCheck().check("aten::cudnn_convolution_relu").run(frozen_mod.graph)
+ FileCheck().check("aten::cudnn_convolution_relu").run(
+ frozen_mod.graph
+ )
self.assertEqual(mod_eager(inp), frozen_mod(inp))
@unittest.skipIf(not (TEST_CUDNN or TEST_WITH_ROCM), "requires CUDNN")
def test_freeze_conv_relu_fusion_not_forward(self):
with set_default_dtype(torch.float):
+
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
- self.conv = nn.Conv2d(in_channels, out_channels, bias=None, **kwargs)
+ self.conv = nn.Conv2d(
+ in_channels, out_channels, bias=None, **kwargs
+ )
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
@@ -2823,24 +3062,41 @@ class TestFrozenOptimizations(JitTestCase):
scripted_mod = torch.jit.script(mod_eager)
- frozen_mod = torch.jit.freeze(scripted_mod, preserved_attrs=['make_prediction'])
- optimized_mod = torch.jit.optimize_for_inference(frozen_mod, other_methods=['make_prediction'])
+ frozen_mod = torch.jit.freeze(
+ scripted_mod, preserved_attrs=["make_prediction"]
+ )
+ optimized_mod = torch.jit.optimize_for_inference(
+ frozen_mod, other_methods=["make_prediction"]
+ )
if TEST_WITH_ROCM:
- FileCheck().check("aten::miopen_convolution_relu").run(optimized_mod.make_prediction.graph)
+ FileCheck().check("aten::miopen_convolution_relu").run(
+ optimized_mod.make_prediction.graph
+ )
else:
- FileCheck().check("aten::cudnn_convolution_relu").run(optimized_mod.make_prediction.graph)
+ FileCheck().check("aten::cudnn_convolution_relu").run(
+ optimized_mod.make_prediction.graph
+ )
- self.assertEqual(mod_eager.make_prediction(inp), optimized_mod.make_prediction(inp))
+ self.assertEqual(
+ mod_eager.make_prediction(inp), optimized_mod.make_prediction(inp)
+ )
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_numel_less_than_size_with_padding(self):
-
with set_default_dtype(torch.float):
+
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
- 1, 2, kernel_size=(2, 4), stride=2, padding=2, dilation=(2, 1),
+ 1,
+ 2,
+ kernel_size=(2, 4),
+ stride=2,
+ padding=2,
+ dilation=(2, 1),
)
def forward(self, i0):
@@ -2849,7 +3105,6 @@ class TestFrozenOptimizations(JitTestCase):
o1 = torch.clip(x, -1.5, 1.5)
return o0, o1
-
i0 = torch.zeros((1, 1, 1, 2), dtype=torch.float32)
mod = MyModule()
out = mod(i0)
@@ -2860,9 +3115,12 @@ class TestFrozenOptimizations(JitTestCase):
eout = exported(i0)
self.assertTrue(all(torch.allclose(x, y) for x, y in zip(out, eout)))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_incompatible_perf_formats(self):
with set_default_dtype(torch.float):
+
class Mod(nn.Module):
def __init__(self):
super().__init__()
@@ -2877,47 +3135,83 @@ class TestFrozenOptimizations(JitTestCase):
model = Mod()
model.eval()
mod = torch.jit.freeze(torch.jit.script(model))
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
self.assertEqual(model(inp), mod(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_pool2d_batchnorm(self):
with set_default_dtype(torch.float):
-
- pooling_layers = [torch.nn.AdaptiveAvgPool2d(4),
- # torch.nn.AdaptiveMaxPool2d(4), # return tuples
- torch.nn.MaxPool2d(4),
- torch.nn.AvgPool2d(4),
- torch.nn.BatchNorm2d(64).eval()]
+ pooling_layers = [
+ torch.nn.AdaptiveAvgPool2d(4),
+ # torch.nn.AdaptiveMaxPool2d(4), # return tuples
+ torch.nn.MaxPool2d(4),
+ torch.nn.AvgPool2d(4),
+ torch.nn.BatchNorm2d(64).eval(),
+ ]
for pl in pooling_layers:
- sub_model = torch.nn.Sequential(torch.nn.Conv2d(3, 64, 2, 2), torch.nn.ReLU(), pl, torch.nn.Hardswish())
+ sub_model = torch.nn.Sequential(
+ torch.nn.Conv2d(3, 64, 2, 2),
+ torch.nn.ReLU(),
+ pl,
+ torch.nn.Hardswish(),
+ )
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
inp = torch.randn(N, C, H, W)
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
- self.run_pass('dce', mod.graph)
+ self.run_pass("dce", mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_pool3d_batchnorm(self):
with set_default_dtype(torch.float):
-
- pooling_layers = [torch.nn.MaxPool3d(4),
- # torch.nn.AdaptiveAvgPool3d(4), # no ideep bindings
- # torch.nn.AdaptiveMaxPool3d(4), # return tuples
- torch.nn.AvgPool3d(4),
- torch.nn.BatchNorm3d(64).eval()]
+ pooling_layers = [
+ torch.nn.MaxPool3d(4),
+ # torch.nn.AdaptiveAvgPool3d(4), # no ideep bindings
+ # torch.nn.AdaptiveMaxPool3d(4), # return tuples
+ torch.nn.AvgPool3d(4),
+ torch.nn.BatchNorm3d(64).eval(),
+ ]
for pl in pooling_layers:
- sub_model = torch.nn.Sequential(torch.nn.Conv3d(3, 64, 2, 2), torch.nn.ReLU(), pl, torch.nn.Hardswish())
+ sub_model = torch.nn.Sequential(
+ torch.nn.Conv3d(3, 64, 2, 2),
+ torch.nn.ReLU(),
+ pl,
+ torch.nn.Hardswish(),
+ )
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
N, C, H, W, D = 10, 3, 64, 64, 64
@@ -2925,16 +3219,18 @@ class TestFrozenOptimizations(JitTestCase):
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
- self.run_pass('dce', mod.graph)
+ self.run_pass("dce", mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
-
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
@skipIfNoTorchVision
def test_conv_hardswish(self):
with set_default_dtype(torch.float):
+
class Clamp(torch.nn.Module):
def __init__(self, min_val, max_val, **kwargs):
super().__init__()
@@ -2944,20 +3240,30 @@ class TestFrozenOptimizations(JitTestCase):
def forward(self, x):
return torch.clamp(x, self.min_val, self.max_val)
- N, C, H, W, = 10, 3, 224, 224
+ (
+ N,
+ C,
+ H,
+ W,
+ ) = (
+ 10,
+ 3,
+ 224,
+ 224,
+ )
activations = [
torch.nn.Hardswish(),
torch.nn.Hardsigmoid(),
torch.nn.ReLU6(),
torch.nn.Tanh(),
- torch.nn.Hardtanh(0., 6.),
- torch.nn.Hardtanh(1., 100.),
- torch.nn.Hardtanh(-100., -1.),
+ torch.nn.Hardtanh(0.0, 6.0),
+ torch.nn.Hardtanh(1.0, 100.0),
+ torch.nn.Hardtanh(-100.0, -1.0),
torch.nn.GELU(),
- Clamp(-100., -1.),
- Clamp(1., 100.),
- Clamp(0., 6.),
- Clamp(-1., 0.),
+ Clamp(-100.0, -1.0),
+ Clamp(1.0, 100.0),
+ Clamp(0.0, 6.0),
+ Clamp(-1.0, 0.0),
]
model = torchvision.models.resnet18()
@@ -2967,19 +3273,23 @@ class TestFrozenOptimizations(JitTestCase):
mod = torch.jit.freeze(torch.jit.script(sub_model))
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
- FileCheck().check_count("aten::to_dense", 1, exactly=True).run(mod.graph)
+ FileCheck().check_count("aten::to_dense", 1, exactly=True).run(
+ mod.graph
+ )
self.assertEqual(sub_model(inp), mod(inp))
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_hardswish_hardsigmoid(self):
with set_default_dtype(torch.float):
op_map = {
- 'prim::MKLDNNHardSwish' : F.hardswish,
- 'prim::MKLDNNHardSigmoid' : F.hardsigmoid,
+ "prim::MKLDNNHardSwish": F.hardswish,
+ "prim::MKLDNNHardSigmoid": F.hardsigmoid,
}
input_sizes = ([0], [1], [3], [1, 3, 8, 8])
- for (mkldnn_opname, aten_op) in op_map.items():
+ for mkldnn_opname, aten_op in op_map.items():
for size in input_sizes:
for inplace in (True, False):
inplace_str = "_" if inplace else ""
@@ -2997,9 +3307,12 @@ class TestFrozenOptimizations(JitTestCase):
# and we aren't testing aten impls anyways
self.assertEqual(aten_op(x, inplace=False), m(x).to_dense())
- @unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
def test_scalar_mul(self):
with set_default_dtype(torch.float):
+
class Mod(nn.Module):
def __init__(self):
super().__init__()
@@ -3007,7 +3320,7 @@ class TestFrozenOptimizations(JitTestCase):
def forward(self, x):
a1 = self.mod(x) * 4
- return a1 * 4 + a1 * 5.
+ return a1 * 4 + a1 * 5.0
mod = Mod().eval()
scripted = torch.jit.freeze(torch.jit.script(mod))
@@ -3041,6 +3354,7 @@ class TestFrozenOptimizations(JitTestCase):
FileCheck().check("aten::detach").run(frozen_mod.graph)
self.assertEqual(frozen_mod(inp), mod(inp))
+
@skipIfTorchDynamo("somehow causing hanging during python shutdown")
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled")
class TestMKLDNNReinplacing(JitTestCase):
@@ -3073,7 +3387,9 @@ class TestMKLDNNReinplacing(JitTestCase):
mod_eager = nn.Sequential(self.getConv(), nn.Hardswish(), nn.ReLU())
mod = self.freezeAndConvert(mod_eager)
- FileCheck().check("mkldnn_convolution").check_next("prim::MKLDNNHardSwish_").check_next("aten::relu_").run(mod.graph)
+ FileCheck().check("mkldnn_convolution").check_next(
+ "prim::MKLDNNHardSwish_"
+ ).check_next("aten::relu_").run(mod.graph)
self.checkResults(mod_eager, mod)
def test_merge_liveness(self):
diff --git a/test/jit/test_functional_blocks.py b/test/jit/test_functional_blocks.py
index 31a78e2db8..d8c29ede47 100644
--- a/test/jit/test_functional_blocks.py
+++ b/test/jit/test_functional_blocks.py
@@ -11,10 +11,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestFunctionalBlocks(JitTestCase):
def test_subgraph_creation(self):
@@ -30,14 +33,22 @@ class TestFunctionalBlocks(JitTestCase):
return x + y + z
graph = torch.jit.script(fn).graph
- self.run_pass('create_functional_graphs', graph)
+ self.run_pass("create_functional_graphs", graph)
# all uses of x and y should be sunk
- FileCheck().check(r"%x").check_not(r"%x").check("FunctionalGraph").check(r"%x").run(graph)
- FileCheck().check(r"%y").check_not(r"%y").check("FunctionalGraph").check(r"%y").run(graph)
+ FileCheck().check(r"%x").check_not(r"%x").check("FunctionalGraph").check(
+ r"%x"
+ ).run(graph)
+ FileCheck().check(r"%y").check_not(r"%y").check("FunctionalGraph").check(
+ r"%y"
+ ).run(graph)
# Don't allow any outputs which escape scope, so there is one final addition in the graph
- FileCheck().check("Tensor = prim::Functional").check_next("aten::add").run(graph)
+ FileCheck().check("Tensor = prim::Functional").check_next("aten::add").run(
+ graph
+ )
# z + 1, z.add_(2) considered non functional, z = z * z should be considered functional
- FileCheck().check("add").check("add_").check_not("mul").check("FunctionalGraph").run(graph)
+ FileCheck().check("add").check("add_").check_not("mul").check(
+ "FunctionalGraph"
+ ).run(graph)
diff --git a/test/jit/test_fuser_common.py b/test/jit/test_fuser_common.py
index 524690c95e..6a982051b1 100644
--- a/test/jit/test_fuser_common.py
+++ b/test/jit/test_fuser_common.py
@@ -3,9 +3,11 @@
import torch
from torch.testing._internal.jit_utils import JitTestCase
+
class TestFuserCommon(JitTestCase):
def test_autodiff_fallback(self):
for rq in [True, False]:
+
@torch.jit.script
def fn(x):
return torch.max(x**2.0, x**3.0)
diff --git a/test/jit/test_graph_rewrite_passes.py b/test/jit/test_graph_rewrite_passes.py
index 3ecdba6bb4..061ef66aa1 100644
--- a/test/jit/test_graph_rewrite_passes.py
+++ b/test/jit/test_graph_rewrite_passes.py
@@ -1,9 +1,9 @@
# Owner(s): ["oncall: jit"]
-from torch.testing._internal.jit_utils import JitTestCase
import torch
import torch._C
from torch.testing import FileCheck
+from torch.testing._internal.jit_utils import JitTestCase
class TestGraphRewritePasses(JitTestCase):
diff --git a/test/jit/test_hash.py b/test/jit/test_hash.py
index 2ca1e9cda0..bc0bcda07a 100644
--- a/test/jit/test_hash.py
+++ b/test/jit/test_hash.py
@@ -3,9 +3,9 @@
import os
import sys
-import torch
+from typing import List, Tuple
-from typing import Tuple, List
+import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -13,9 +13,12 @@ sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestHash(JitTestCase):
def test_hash_tuple(self):
@@ -38,6 +41,7 @@ class TestHash(JitTestCase):
def test_hash_tensor(self):
"""Tensors should hash by identity"""
+
def fn(t1, t2):
return hash(t1) == hash(t2)
@@ -74,7 +78,7 @@ class TestHash(JitTestCase):
self.checkScript(fn, (1.2345, 6.789))
self.checkScript(fn, (1.2345, float("inf")))
self.checkScript(fn, (float("inf"), float("inf")))
- self.checkScript(fn, (1.2345, float('nan')))
+ self.checkScript(fn, (1.2345, float("nan")))
if sys.version_info < (3, 10):
# Hash of two nans are not guaranteed to be equal. From https://docs.python.org/3/whatsnew/3.10.html :
# Hashes of NaN values of both float type and decimal.Decimal type now depend on object identity.
@@ -103,9 +107,9 @@ class TestHash(JitTestCase):
def fn(d1: torch.device, d2: torch.device):
return hash(d1) == hash(d2)
- gpu0 = torch.device('cuda:0')
- gpu1 = torch.device('cuda:1')
- cpu = torch.device('cpu')
+ gpu0 = torch.device("cuda:0")
+ gpu1 = torch.device("cuda:1")
+ cpu = torch.device("cpu")
self.checkScript(fn, (gpu0, gpu0))
self.checkScript(fn, (gpu0, gpu1))
self.checkScript(fn, (gpu0, cpu))
diff --git a/test/jit/test_hooks.py b/test/jit/test_hooks.py
index 2963837a63..acced59318 100644
--- a/test/jit/test_hooks.py
+++ b/test/jit/test_hooks.py
@@ -6,21 +6,29 @@ import unittest
from typing import Tuple
import torch
+
from jit.test_hooks_modules import (
- ModuleDirectforwardSubmodCall, ModuleForwardSingleInput,
- ModuleForwardTupleInput, create_forward_tuple_input,
- create_module_forward_multiple_inputs, create_module_forward_single_input,
+ create_forward_tuple_input,
+ create_module_forward_multiple_inputs,
+ create_module_forward_single_input,
create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
- create_module_multiple_hooks_single_input, create_module_no_forward_input,
- create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
+ create_module_multiple_hooks_single_input,
+ create_module_no_forward_input,
+ create_module_same_hook_repeated,
+ create_submodule_forward_multiple_inputs,
create_submodule_forward_single_input,
create_submodule_forward_single_input_return_not_tupled,
create_submodule_hook_return_nothing,
create_submodule_multiple_hooks_multiple_inputs,
create_submodule_multiple_hooks_single_input,
- create_submodule_no_forward_input, create_submodule_same_hook_repeated,
- create_submodule_to_call_directly_with_hooks)
+ create_submodule_no_forward_input,
+ create_submodule_same_hook_repeated,
+ create_submodule_to_call_directly_with_hooks,
+ ModuleDirectforwardSubmodCall,
+ ModuleForwardSingleInput,
+ ModuleForwardTupleInput,
+)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -37,7 +45,6 @@ if __name__ == "__main__":
# Tests for JIT forward hooks and pre-hooks
class TestHooks(JitTestCase):
-
def test_module_no_forward_input(self):
self.checkModule(create_module_no_forward_input(), ())
@@ -73,7 +80,8 @@ class TestHooks(JitTestCase):
def test_submodule_multiple_hooks_multiple_inputs(self):
self.checkModule(
- create_submodule_multiple_hooks_multiple_inputs(), (["a"], "no_pre_hook"),
+ create_submodule_multiple_hooks_multiple_inputs(),
+ (["a"], "no_pre_hook"),
)
def test_submodule_forward_single_input(self):
@@ -242,7 +250,8 @@ class TestHooks(JitTestCase):
m.register_forward_pre_hook(pre_hook_wrong_input1)
with self.assertRaisesRegex(
- RuntimeError, "has the wrong inner types for the input tuple argument",
+ RuntimeError,
+ "has the wrong inner types for the input tuple argument",
):
torch.jit.script(m)
@@ -278,7 +287,8 @@ class TestHooks(JitTestCase):
m.register_forward_pre_hook(pre_hook_wrong_output)
with self.assertRaisesRegex(
- RuntimeError, "returned the wrong type of: 'int'",
+ RuntimeError,
+ "returned the wrong type of: 'int'",
):
torch.jit.script(m)
diff --git a/test/jit/test_hooks_modules.py b/test/jit/test_hooks_modules.py
index 1ae7a315ec..2a5e68ab1c 100644
--- a/test/jit/test_hooks_modules.py
+++ b/test/jit/test_hooks_modules.py
@@ -1,8 +1,9 @@
# Owner(s): ["oncall: jit"]
-import torch
from typing import List, Tuple
+import torch
+
class SubmoduleNoForwardInputs(torch.nn.Module):
def __init__(self, name):
diff --git a/test/jit/test_ignorable_args.py b/test/jit/test_ignorable_args.py
index c9fcce329b..dc9ab1907c 100644
--- a/test/jit/test_ignorable_args.py
+++ b/test/jit/test_ignorable_args.py
@@ -2,6 +2,7 @@
import os
import sys
+
import torch
from torch._C import parse_ir
from torch.testing import FileCheck
@@ -11,10 +12,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
# Tests that Python slice class is supported in TorchScript
class TestIgnorableArgs(JitTestCase):
@@ -44,11 +48,14 @@ class TestIgnorableArgs(JitTestCase):
# We ignore trailing arguments after start=2 for dim 0
# and after end=1 for dim 1
# because in %16, %15 and %0 are default values for the schema.
- FileCheck().check("torch.slice(torch.slice(torch.tensor(_0), 0, 2), 1, None, 1)").run(src)
+ FileCheck().check(
+ "torch.slice(torch.slice(torch.tensor(_0), 0, 2), 1, None, 1)"
+ ).run(src)
self.assertEqual(function(), function_copy())
def test_add_out_ignorable_args(self):
@torch.jit.script
def fn(x: torch.Tensor, y: torch.Tensor):
torch.add(x, y, out=y)
+
FileCheck().check("torch.add(x, y, out=y)").run(fn.code)
diff --git a/test/jit/test_ignore_context_manager.py b/test/jit/test_ignore_context_manager.py
index 4d0660e9eb..76df7cc4e2 100644
--- a/test/jit/test_ignore_context_manager.py
+++ b/test/jit/test_ignore_context_manager.py
@@ -9,13 +9,16 @@ import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase
from torch.jit.frontend import _IS_ASTUNPARSE_INSTALLED
+from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestIgnoreContextManager(JitTestCase):
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
@@ -26,11 +29,14 @@ class TestIgnoreContextManager(JitTestCase):
b: int = 5
c: int = 0
d: int = 6
- with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int", c="out:int", d="out:int"):
+ with torch.jit._IgnoreContextManager(
+ a="inp:int", b="inp:int", c="out:int", d="out:int"
+ ):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
d = 9
return c + d
+
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), model())
@@ -41,10 +47,13 @@ class TestIgnoreContextManager(JitTestCase):
a: int = 4
b: int = 5
c: int = 0
- with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int", c="out:int"):
+ with torch.jit._IgnoreContextManager(
+ a="inp:int", b="inp:int", c="out:int"
+ ):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
return c
+
model = B()
s = torch.jit.script(model)
self.assertEqual(s(), 11)
@@ -58,6 +67,7 @@ class TestIgnoreContextManager(JitTestCase):
l = [2 for i in range(a) if i > 2]
b = l[0] + a
return b
+
model = C()
s = torch.jit.script(model)
self.assertEqual(s(), 6)
@@ -72,6 +82,7 @@ class TestIgnoreContextManager(JitTestCase):
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int"):
l = [2 + b for i in range(a) if i > 2]
return a
+
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 4)
@@ -85,6 +96,7 @@ class TestIgnoreContextManager(JitTestCase):
c = [2 for i in range(7) if i > 2]
c[0] = 3
return c[0] + c[1]
+
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 5)
diff --git a/test/jit/test_isinstance.py b/test/jit/test_isinstance.py
index 7f42b709e7..44568fd292 100644
--- a/test/jit/test_isinstance.py
+++ b/test/jit/test_isinstance.py
@@ -2,10 +2,10 @@
import os
import sys
+import warnings
+from typing import Any, Dict, List, Optional, Tuple
import torch
-import warnings
-from typing import List, Any, Dict, Tuple, Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -19,6 +19,7 @@ if __name__ == "__main__":
"instead."
)
+
# Tests for torch.jit.isinstance
class TestIsinstance(JitTestCase):
def test_int(self):
@@ -223,28 +224,42 @@ class TestIsinstance(JitTestCase):
x = ["1", "2", "3"]
- err_msg = "Attempted to use List without a contained type. " \
+ err_msg = (
+ "Attempted to use List without a contained type. "
r"Please add a contained type, e.g. List\[int\]"
+ )
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
torch.jit.script(list_no_contained_type)
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
list_no_contained_type(x)
-
-
def test_tuple_no_contained_type(self):
def tuple_no_contained_type(x: Any):
assert torch.jit.isinstance(x, Tuple)
x = ("1", "2", "3")
- err_msg = "Attempted to use Tuple without a contained type. " \
+ err_msg = (
+ "Attempted to use Tuple without a contained type. "
r"Please add a contained type, e.g. Tuple\[int\]"
+ )
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
torch.jit.script(tuple_no_contained_type)
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
tuple_no_contained_type(x)
def test_optional_no_contained_type(self):
@@ -253,12 +268,20 @@ class TestIsinstance(JitTestCase):
x = ("1", "2", "3")
- err_msg = "Attempted to use Optional without a contained type. " \
+ err_msg = (
+ "Attempted to use Optional without a contained type. "
r"Please add a contained type, e.g. Optional\[int\]"
+ )
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
torch.jit.script(optional_no_contained_type)
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
optional_no_contained_type(x)
def test_dict_no_contained_type(self):
@@ -267,12 +290,20 @@ class TestIsinstance(JitTestCase):
x = {"a": "aa"}
- err_msg = "Attempted to use Dict without contained types. " \
+ err_msg = (
+ "Attempted to use Dict without contained types. "
r"Please add contained type, e.g. Dict\[int, int\]"
+ )
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
torch.jit.script(dict_no_contained_type)
- with self.assertRaisesRegex(RuntimeError, err_msg,):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ err_msg,
+ ):
dict_no_contained_type(x)
def test_tuple_rhs(self):
diff --git a/test/jit/test_jit_utils.py b/test/jit/test_jit_utils.py
index c72aad3623..5ef5f4e899 100644
--- a/test/jit/test_jit_utils.py
+++ b/test/jit/test_jit_utils.py
@@ -13,10 +13,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
# Tests various JIT-related utility functions.
class TestJitUtils(JitTestCase):
@@ -24,58 +27,71 @@ class TestJitUtils(JitTestCase):
def test_get_callable_argument_names_positional_or_keyword(self):
def fn_positional_or_keyword_args_only(x, y):
return x + y
+
self.assertEqual(
["x", "y"],
- torch._jit_internal.get_callable_argument_names(fn_positional_or_keyword_args_only))
+ torch._jit_internal.get_callable_argument_names(
+ fn_positional_or_keyword_args_only
+ ),
+ )
# Tests that POSITIONAL_ONLY arguments are ignored.
def test_get_callable_argument_names_positional_only(self):
- code = dedent('''
+ code = dedent(
+ """
def fn_positional_only_arg(x, /, y):
return x + y
- ''')
+ """
+ )
- fn_positional_only_arg = jit_utils._get_py3_code(code, 'fn_positional_only_arg')
+ fn_positional_only_arg = jit_utils._get_py3_code(code, "fn_positional_only_arg")
self.assertEqual(
["y"],
- torch._jit_internal.get_callable_argument_names(fn_positional_only_arg))
+ torch._jit_internal.get_callable_argument_names(fn_positional_only_arg),
+ )
# Tests that VAR_POSITIONAL arguments are ignored.
def test_get_callable_argument_names_var_positional(self):
# Tests that VAR_POSITIONAL arguments are ignored.
def fn_var_positional_arg(x, *arg):
return x + arg[0]
+
self.assertEqual(
["x"],
- torch._jit_internal.get_callable_argument_names(fn_var_positional_arg))
+ torch._jit_internal.get_callable_argument_names(fn_var_positional_arg),
+ )
# Tests that KEYWORD_ONLY arguments are ignored.
def test_get_callable_argument_names_keyword_only(self):
def fn_keyword_only_arg(x, *, y):
return x + y
+
self.assertEqual(
- ["x"],
- torch._jit_internal.get_callable_argument_names(fn_keyword_only_arg))
+ ["x"], torch._jit_internal.get_callable_argument_names(fn_keyword_only_arg)
+ )
# Tests that VAR_KEYWORD arguments are ignored.
def test_get_callable_argument_names_var_keyword(self):
def fn_var_keyword_arg(**args):
- return args['x'] + args['y']
+ return args["x"] + args["y"]
+
self.assertEqual(
- [],
- torch._jit_internal.get_callable_argument_names(fn_var_keyword_arg))
+ [], torch._jit_internal.get_callable_argument_names(fn_var_keyword_arg)
+ )
# Tests that a function signature containing various different types of
# arguments are ignored.
def test_get_callable_argument_names_hybrid(self):
- code = dedent('''
+ code = dedent(
+ """
def fn_hybrid_args(x, /, y, *args, **kwargs):
return x + y + args[0] + kwargs['z']
- ''')
- fn_hybrid_args = jit_utils._get_py3_code(code, 'fn_hybrid_args')
+ """
+ )
+ fn_hybrid_args = jit_utils._get_py3_code(code, "fn_hybrid_args")
self.assertEqual(
- ["y"],
- torch._jit_internal.get_callable_argument_names(fn_hybrid_args))
+ ["y"], torch._jit_internal.get_callable_argument_names(fn_hybrid_args)
+ )
def test_checkscriptassertraisesregex(self):
def fn():
@@ -84,22 +100,18 @@ class TestJitUtils(JitTestCase):
self.checkScriptRaisesRegex(fn, (), Exception, "range", name="fn")
- s = dedent("""
+ s = dedent(
+ """
def fn():
tup = (1, 2)
return tup[2]
- """)
+ """
+ )
self.checkScriptRaisesRegex(s, (), Exception, "range", name="fn")
def test_no_tracer_warn_context_manager(self):
torch._C._jit_set_tracer_state_warn(True)
with jit_utils.NoTracerWarnContextManager() as no_warn:
- self.assertEqual(
- False,
- torch._C._jit_get_tracer_state_warn()
- )
- self.assertEqual(
- True,
- torch._C._jit_get_tracer_state_warn()
- )
+ self.assertEqual(False, torch._C._jit_get_tracer_state_warn())
+ self.assertEqual(True, torch._C._jit_get_tracer_state_warn())
diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py
index ddaa0ec0cd..f780a9836b 100644
--- a/test/jit/test_list_dict.py
+++ b/test/jit/test_list_dict.py
@@ -1,29 +1,33 @@
# Owner(s): ["oncall: jit"]
+import inspect
import os
import sys
-import inspect
+import types
import unittest
-from typing import Any, Dict, List, NamedTuple, Optional, Tuple
-from textwrap import dedent
from collections import OrderedDict
+from textwrap import dedent
+from typing import Any, Dict, List, NamedTuple, Optional, Tuple
-from torch import Tensor
import torch
import torch.nn as nn
-import types
+
+from torch import Tensor
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing._internal.common_utils import skipIfTorchDynamo, TEST_CUDA
+from torch.testing._internal.jit_utils import JitTestCase, make_global
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestList(JitTestCase):
def test_list_bool_conversion(self):
@@ -63,16 +67,16 @@ class TestList(JitTestCase):
self.checkScript(int_in, ([1, 3, 3],))
def float_in(x: List[float]) -> bool:
- return 2. in x
+ return 2.0 in x
- self.checkScript(float_in, ([1., 2., 3.],))
- self.checkScript(float_in, ([1., 3., 3.],))
+ self.checkScript(float_in, ([1.0, 2.0, 3.0],))
+ self.checkScript(float_in, ([1.0, 3.0, 3.0],))
def str_in(x: List[str]) -> bool:
- return 'hi' in x
+ return "hi" in x
- self.checkScript(str_in, (['not', 'here'],))
- self.checkScript(str_in, (['hi', 'bye'],))
+ self.checkScript(str_in, (["not", "here"],))
+ self.checkScript(str_in, (["hi", "bye"],))
self.checkScript(str_in, ([],))
def test_list_literal(self):
@@ -81,6 +85,7 @@ class TestList(JitTestCase):
if 1 == 1:
x = [2, 3]
return
+
self.checkScript(reassign, (), optimize=False)
def reassign_arity_change():
@@ -88,6 +93,7 @@ class TestList(JitTestCase):
if 1 == 1:
x = [1, 2, 3]
return
+
self.checkScript(reassign_arity_change, (), optimize=False)
def reassign_from_empty_literal():
@@ -95,7 +101,10 @@ class TestList(JitTestCase):
if 1 == 1:
x = [1, 2, 3]
return
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"previously had type List\[Tensor\]", "x"):
+
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"previously had type List\[Tensor\]", "x"
+ ):
self.checkScript(reassign_from_empty_literal, (), optimize=False)
def reassign_from_empty_builtin():
@@ -109,6 +118,7 @@ class TestList(JitTestCase):
if 1 == 1:
z = [torch.randn([1])]
return
+
self.checkScript(reassign_from_empty_builtin, (), optimize=False)
def reassign_bad_type():
@@ -116,7 +126,10 @@ class TestList(JitTestCase):
if 1 == 1:
x = [1.0]
return
- with self.assertRaisesRegexWithHighlight(RuntimeError, "previously had type", "x"):
+
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "previously had type", "x"
+ ):
self.checkScript(reassign_bad_type, (), optimize=False)
def reassign_nested():
@@ -126,7 +139,10 @@ class TestList(JitTestCase):
if 1 == 1:
x = [1.0]
return
- with self.assertRaisesRegexWithHighlight(RuntimeError, "previously had type", "x"):
+
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "previously had type", "x"
+ ):
self.checkScript(reassign_nested, (), optimize=False)
def test_list_variance(self):
@@ -146,6 +162,7 @@ class TestList(JitTestCase):
y: List[None] = [None, None, None]
x: List[Optional[int]] = y
"""
+
def test_listliteral_is_typed_from_annotation():
x: List[Optional[int]] = [None, None, None]
return x
@@ -163,11 +180,14 @@ class TestList(JitTestCase):
y: List[Optional[int]] = x
return x
- with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
- "annotated with type "
- r"List\[Optional\[int\]\] but is "
- "being assigned to a value of type "
- r"List\[int\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Variable 'y' is "
+ "annotated with type "
+ r"List\[Optional\[int\]\] but is "
+ "being assigned to a value of type "
+ r"List\[int\]",
+ ):
torch.jit.script(test_lists_with_different_internal_types_are_invariant)
def test_lists_with_different_internal_types_are_invariant_recursive(self):
@@ -175,12 +195,17 @@ class TestList(JitTestCase):
y: List[List[Optional[int]]] = x
return x
- with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
- "annotated with type "
- r"List\[List\[Optional\[int\]\]\] "
- "but is being assigned to a value "
- r"of type List\[List\[int\]\]"):
- torch.jit.script(test_lists_with_different_internal_types_are_invariant_recursive)
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Variable 'y' is "
+ "annotated with type "
+ r"List\[List\[Optional\[int\]\]\] "
+ "but is being assigned to a value "
+ r"of type List\[List\[int\]\]",
+ ):
+ torch.jit.script(
+ test_lists_with_different_internal_types_are_invariant_recursive
+ )
def test_del(self):
def inputs():
@@ -203,10 +228,15 @@ class TestList(JitTestCase):
del x[100]
return x
- with self.assertRaisesRegexWithHighlight(RuntimeError, "out of range", "x[100]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "out of range", "x[100]"
+ ):
fn2([])
- with self.assertRaisesRegexWithHighlight(RuntimeError, "deletion at a single index", "x[1:3]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "deletion at a single index", "x[1:3]"
+ ):
+
@torch.jit.script
def fn(x: List[int]) -> List[int]:
del x[1:3]
@@ -214,14 +244,19 @@ class TestList(JitTestCase):
def test_list_keyword(self):
def foo():
- return list([1, 2, 3]), list(("a", "b")), list(range(5)), list("abcdefg") # noqa: C410
+ return (
+ list([1, 2, 3]), # noqa: C410
+ list(("a", "b")), # noqa: C410
+ list(range(5)),
+ list("abcdefg"),
+ )
self.checkScript(foo, ())
def foo2():
x: List[int] = list()
x.append(1)
- return x,
+ return (x,)
self.checkScript(foo2, ())
@@ -229,7 +264,9 @@ class TestList(JitTestCase):
return list(list("abc")) # noqa: C414
self.checkScript(foo3, ())
- FileCheck().check_count("aten::list", 2, exactly=True).run(torch.jit.script(foo3).graph)
+ FileCheck().check_count("aten::list", 2, exactly=True).run(
+ torch.jit.script(foo3).graph
+ )
def test_dict_keyword_with_kwargs(self):
def fn():
@@ -245,13 +282,13 @@ class TestList(JitTestCase):
def test_dict_keyword_with_iterable(self):
def fn():
- return dict([("foo", 1), ("bar", 2), ("baz", 3)]) # noqa: C406
+ return dict([("foo", 1), ("bar", 2), ("baz", 3)]) # noqa: C406
self.checkScript(fn, ())
def test_dict_keyword_with_empty_iterable(self):
def fn():
- return dict([]) # noqa: C406
+ return dict([]) # noqa: C406
self.checkScript(fn, ())
@@ -263,13 +300,13 @@ class TestList(JitTestCase):
def test_dict_keyword_with_mapping(self):
def fn():
- return {"foo" : 1, "bar" : 2, "baz" : 3}
+ return {"foo": 1, "bar": 2, "baz": 3}
self.checkScript(fn, ())
def test_dict_keyword_with_mapping_and_kwargs(self):
def fn():
- return dict({"foo" : 1, "bar" : 2}, baz=3)
+ return dict({"foo": 1, "bar": 2}, baz=3)
self.checkScript(fn, ())
@@ -281,7 +318,7 @@ class TestList(JitTestCase):
def test_dict_keyword_with_dict_comprehension_and_kwargs(self):
def fn():
- return dict({chr(65 + i) : i for i in range(4)}, foo=2)
+ return dict({chr(65 + i): i for i in range(4)}, foo=2)
self.checkScript(fn, ())
@@ -300,12 +337,17 @@ class TestList(JitTestCase):
self.checkScript(fn, ())
def test_dict_keyword_with_mismatched_annotations(self):
- err_msg = r"Dict type annotation `Dict\[int, str\]` did not " \
- "match the type of an actual key type `str`"
+ err_msg = (
+ r"Dict type annotation `Dict\[int, str\]` did not "
+ "match the type of an actual key type `str`"
+ )
with self.assertRaisesRegex(RuntimeError, err_msg):
+
@torch.jit.script
def fn():
- x: Dict[int, str] = dict([("foo", 1), ("bar", 2), ("baz", 3)]) # noqa: C406
+ x: Dict[int, str] = dict( # noqa: C406
+ [("foo", 1), ("bar", 2), ("baz", 3)]
+ )
return x
def test_dict_keyword_with_nested_call(self):
@@ -316,14 +358,14 @@ class TestList(JitTestCase):
def test_dict_keyword_with_previously_declared_variable(self):
def fn():
- d = {"foo" : 1, "bar" : 2}
+ d = {"foo": 1, "bar": 2}
return dict(d)
self.checkScript(fn, ())
def test_dict_keyword_with_previously_declared_variable_and_kwargs(self):
def fn():
- d = {"foo" : 1, "bar" : 2}
+ d = {"foo": 1, "bar": 2}
return dict(d, baz=3)
self.checkScript(fn, ())
@@ -352,14 +394,39 @@ class TestList(JitTestCase):
args_right_int = [[2, 1, 1], [1, 8, 8], [], [1], [], [1, 2]]
run_tests(jit_min_list, args_left_int, args_right_int)
- args_left_float = [[1., 8., 8.], [2., 1., 1.], [], [2.], [1.], [1., 2., 3.]]
- args_right_float = [[2., 1., 1.], [1., 8., 8.], [], [1.], [], [1., 2.]]
+ args_left_float = [
+ [1.0, 8.0, 8.0],
+ [2.0, 1.0, 1.0],
+ [],
+ [2.0],
+ [1.0],
+ [1.0, 2.0, 3.0],
+ ]
+ args_right_float = [[2.0, 1.0, 1.0], [1.0, 8.0, 8.0], [], [1.0], [], [1.0, 2.0]]
run_tests(jit_min_list_float, args_left_float, args_right_float)
- args_left_bool = [[], [], [], [False], [True], [False, True], [True, True],
- [False, False, False], [False, False, True]]
- args_right_bool = [[], [False], [True], [True], [False], [True, True],
- [False, True], [False, False, True], [False, False, False]]
+ args_left_bool = [
+ [],
+ [],
+ [],
+ [False],
+ [True],
+ [False, True],
+ [True, True],
+ [False, False, False],
+ [False, False, True],
+ ]
+ args_right_bool = [
+ [],
+ [False],
+ [True],
+ [True],
+ [False],
+ [True, True],
+ [False, True],
+ [False, False, True],
+ [False, False, False],
+ ]
run_tests(jit_min_list_bool, args_left_bool, args_right_bool)
def jit_max_list(a: List[int], b: List[int]) -> List[int]:
@@ -375,8 +442,15 @@ class TestList(JitTestCase):
args_right_int = [[8, 1, 1], [1, 8, 8], [], [2], [1], [1, 2, 3]]
run_tests(jit_max_list, args_left_int, args_right_int)
- args_left_float = [[1., 8., 8.], [8., 1., 1.], [], [1.], [], [1., 2.]]
- args_right_float = [[8., 1., 1.], [1., 8., 8.], [], [2.], [1.], [1., 2., 3.]]
+ args_left_float = [[1.0, 8.0, 8.0], [8.0, 1.0, 1.0], [], [1.0], [], [1.0, 2.0]]
+ args_right_float = [
+ [8.0, 1.0, 1.0],
+ [1.0, 8.0, 8.0],
+ [],
+ [2.0],
+ [1.0],
+ [1.0, 2.0, 3.0],
+ ]
run_tests(jit_max_list_float, args_left_float, args_right_float)
run_tests(jit_max_list_bool, args_left_bool, args_right_bool)
@@ -398,15 +472,15 @@ class TestList(JitTestCase):
a = [1, 2, 3]
return a[4]
- self.checkScriptRaisesRegex(bad_index, (), Exception,
- "list index out of range")
+ self.checkScriptRaisesRegex(bad_index, (), Exception, "list index out of range")
def bad_negative_index():
a = [1, 2, 3]
return a[-5]
- self.checkScriptRaisesRegex(bad_negative_index, (), Exception,
- "list index out of range")
+ self.checkScriptRaisesRegex(
+ bad_negative_index, (), Exception, "list index out of range"
+ )
def test_list_len(self):
def func():
@@ -421,7 +495,9 @@ class TestList(JitTestCase):
self.checkScript(func2, ())
- @skipIfTorchDynamo("TorchDynamo fails to raise on this checkScriptRaisesRegex, because we trace it properly now")
+ @skipIfTorchDynamo(
+ "TorchDynamo fails to raise on this checkScriptRaisesRegex, because we trace it properly now"
+ )
def test_list_ops(self):
def test_equality():
a = [1, 2, 3]
@@ -510,13 +586,12 @@ class TestList(JitTestCase):
return x == y
self.checkScriptRaisesRegex(
- test_invalid_list_equality,
- (),
- RuntimeError,
- "Boolean value of Tensor")
+ test_invalid_list_equality, (), RuntimeError, "Boolean value of Tensor"
+ )
def test_list_sort(self):
- template = dedent('''
+ template = dedent(
+ """
def func():
li_1 = {list_create}
li_2 = {list_create}
@@ -525,26 +600,36 @@ class TestList(JitTestCase):
li_2.sort(reverse=True)
li_4 = sorted(li_3)
return li_1, li_2, li_3, li_4
- ''')
+ """
+ )
- lists = ["[]", "[1, 3, 2]", "[True, False, True]", "[1.2, .2, 3.2]",
- "[torch.tensor(1.0), torch.tensor(0.2), torch.tensor(0.5)]",
- "[torch.tensor(5), torch.tensor(-2), torch.tensor(4)]"]
+ lists = [
+ "[]",
+ "[1, 3, 2]",
+ "[True, False, True]",
+ "[1.2, .2, 3.2]",
+ "[torch.tensor(1.0), torch.tensor(0.2), torch.tensor(0.5)]",
+ "[torch.tensor(5), torch.tensor(-2), torch.tensor(4)]",
+ ]
for li in lists:
code = template.format(list_create=li)
scope = {}
exec(code, globals(), scope)
cu = torch.jit.CompilationUnit(code)
t1 = cu.func()
- t2 = scope['func']()
+ t2 = scope["func"]()
self.assertEqual(t1, t2)
def test_fail(x: List[Tensor]) -> List[Tensor]:
x.sort()
return x
- self.checkScriptRaisesRegex(test_fail, (([torch.zeros([2]), torch.zeros([2])],)), Exception,
- "Boolean value of Tensor with more than one value")
+ self.checkScriptRaisesRegex(
+ test_fail,
+ (([torch.zeros([2]), torch.zeros([2])],)),
+ Exception,
+ "Boolean value of Tensor with more than one value",
+ )
@torch.jit.script
def test_mutation():
@@ -567,36 +652,43 @@ class TestList(JitTestCase):
def test_regular_slice():
a = [0, 1, 2, 3, 4]
return a[2:3] == [2]
+
self.checkScript(test_regular_slice, ())
def test_open_ended_slice():
a = [0, 1, 2, 3, 4]
return a[2:] == [2, 3, 4]
+
self.checkScript(test_open_ended_slice, ())
def test_open_ended_slice2():
a = [0, 1, 2, 3, 4]
return a[:2] == [0, 1]
+
self.checkScript(test_open_ended_slice2, ())
def test_negative_slice():
a = [0, 1, 2, 3, 4]
return a[:-1] == [0, 1, 2, 3]
+
self.checkScript(test_negative_slice, ())
def test_negative_slice2():
a = [0, 1, 2, 3, 4]
return a[-3:-1] == [2, 3]
+
self.checkScript(test_negative_slice2, ())
def test_backward_slice():
a = [0, 1, 2, 3, 4]
return a[3:2] == torch.jit.annotate(List[int], [])
+
self.checkScript(test_backward_slice, ())
def test_over_slice():
a = [0, 1, 2, 3, 4]
return a[3:10] == [3, 4]
+
self.checkScript(test_backward_slice, ())
def test_slice_index(self):
@@ -611,29 +703,37 @@ class TestList(JitTestCase):
def test_index_slice1(x):
x = x[:, :, [0, 1]]
return x
+
self.checkScript(test_index_slice1, (a,))
def test_index_slice2(x):
x = x[[2, 1, 0], :, :]
return x
+
self.checkScript(test_index_slice2, (a,))
def test_index_slice3(x):
x = x[[0, 1], :, [1]]
return x
+
self.checkScript(test_index_slice3, (a,))
def test_index_slice_empty_list(x):
empty_list: List[int] = []
x = x[empty_list, :, :]
return x
+
self.checkScript(test_index_slice_empty_list, (a,))
def test_index_slice_out_of_bounds_index(x):
x = x[[4], :, :]
return x
- with self.assertRaisesRegexWithHighlight(RuntimeError, "index 4 is out of bounds for dimension 0 with size 3",
- "x[[4], :, :]"):
+
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "index 4 is out of bounds for dimension 0 with size 3",
+ "x[[4], :, :]",
+ ):
self.checkScript(test_index_slice_out_of_bounds_index, (a,))
def test_mutable_list_append(self):
@@ -642,11 +742,11 @@ class TestList(JitTestCase):
a.append(2)
a.append(3)
return a == [0, 1, 2, 3]
+
self.checkScript(test_append, ())
def test_comprehensions_basic(self):
def comp(l: List[int]) -> List[int]:
-
n = [x * 3 for x in l]
return n
@@ -655,7 +755,6 @@ class TestList(JitTestCase):
def test_comprehensions_basic_float(self):
def comp(l: List[float]) -> List[float]:
-
n = [x * 3 for x in l]
return n
@@ -664,7 +763,6 @@ class TestList(JitTestCase):
def test_comprehensions_two_comps(self):
@torch.jit.script
def comp(l1: List[int], l2: List[int]) -> List[int]:
-
n = [x * 3 for x in l1]
n2 = [x + 2 for x in l2]
return n + n2
@@ -715,6 +813,7 @@ class TestList(JitTestCase):
a = [1]
a.append(4)
return a == [1, 4]
+
self.checkScript(test_append_2, ())
def test_mutable_list_append_if(self):
@@ -723,6 +822,7 @@ class TestList(JitTestCase):
if 1 == 1:
a.append(4)
return a == [1, 4]
+
self.checkScript(test_append_if, ())
def test_mutable_list_append_if_else(self):
@@ -733,6 +833,7 @@ class TestList(JitTestCase):
else:
a.append(10)
return a == [1, 10]
+
self.checkScript(test_append_if_else, ())
def test_mutable_list_append_loop(self):
@@ -742,6 +843,7 @@ class TestList(JitTestCase):
a.append(i)
return a == [0, 1, 2, 3, 4]
+
self.checkScript(test_append_loop, ())
def test_mutable_list_append_loop_if(self):
@@ -754,6 +856,7 @@ class TestList(JitTestCase):
a.append(0)
return a == [0, 0, 0, 0, 4]
+
self.checkScript(test_append_loop_if, ())
def test_mutable_list_nested_loop(self):
@@ -764,6 +867,7 @@ class TestList(JitTestCase):
a.append(i + j)
return a == [0, 1, 1, 2]
+
self.checkScript(test_nested_loop, ())
def test_mutable_list_function_inline(self):
@@ -785,6 +889,7 @@ class TestList(JitTestCase):
a.reverse()
return a == []
+
self.checkScript(test_reverse_empty, ())
def test_mutable_list_reverse(self):
@@ -793,6 +898,7 @@ class TestList(JitTestCase):
a.reverse()
return a == [4, 3, 2, 1]
+
self.checkScript(test_reverse, ())
def test_mutable_tensor_list_reverse(self):
@@ -801,6 +907,7 @@ class TestList(JitTestCase):
a.reverse()
return a == [torch.tensor(2), torch.tensor(1)]
+
self.checkScript(test_tensor_reverse, ())
def test_mutable_list_pop_empty(self):
@@ -809,7 +916,9 @@ class TestList(JitTestCase):
a = torch.jit.annotate(List[int], [])
return a.pop()
- with self.assertRaisesRegexWithHighlight(RuntimeError, "pop from empty list", "a.pop"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "pop from empty list", "a.pop"
+ ):
test_pop_empty()
def test_mutable_list_pop(self):
@@ -884,6 +993,7 @@ class TestList(JitTestCase):
a.clear()
return len(a) == 0
+
self.checkScript(test_clear_empty, ())
def test_mutable_list_clear(self):
@@ -892,6 +1002,7 @@ class TestList(JitTestCase):
a.clear()
return len(a) == 0
+
self.checkScript(test_clear, ())
def test_mutable_list_insert(self):
@@ -900,6 +1011,7 @@ class TestList(JitTestCase):
a.insert(2, 5)
return a == [1, 2, 5, 3, 4]
+
self.checkScript(test_list_insert, ())
def test_mutable_list_insert_negative(self):
@@ -908,6 +1020,7 @@ class TestList(JitTestCase):
a.insert(-1, 5)
return a == [1, 2, 3, 5, 4]
+
self.checkScript(test_list_insert_negative, ())
def test_mutable_list_insert_neg_out_of_bounds(self):
@@ -916,6 +1029,7 @@ class TestList(JitTestCase):
a.insert(-10, 5)
return a == [5, 1, 2, 3, 4]
+
self.checkScript(test_list_insert_neg_out_of_bounds, ())
def test_mutable_list_insert_out_of_bounds(self):
@@ -924,6 +1038,7 @@ class TestList(JitTestCase):
a.insert(10, 5)
return a == [1, 2, 3, 4, 5]
+
self.checkScript(test_list_insert_out_of_bounds, ())
def test_mutable_list_remove_not_existing(self):
@@ -934,7 +1049,9 @@ class TestList(JitTestCase):
return a
- with self.assertRaisesRegexWithHighlight(RuntimeError, "x not in list", "a.remove"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "x not in list", "a.remove"
+ ):
test_list_remove_not_existing()
def test_mutable_list_remove(self):
@@ -943,6 +1060,7 @@ class TestList(JitTestCase):
a.remove(3)
return a == [1, 2, 4]
+
self.checkScript(test_list_remove, ())
def test_str_list_remove():
@@ -950,6 +1068,7 @@ class TestList(JitTestCase):
a.remove("foo")
return a == ["bar"]
+
self.checkScript(test_str_list_remove, ())
def test_list_index_not_existing(self):
@@ -960,7 +1079,9 @@ class TestList(JitTestCase):
return i
- with self.assertRaisesRegexWithHighlight(RuntimeError, "'5' is not in list", "a.index"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "'5' is not in list", "a.index"
+ ):
list_index_not_existing()
def test_list_index(self):
@@ -969,6 +1090,7 @@ class TestList(JitTestCase):
i = a.index(3)
return i == 2
+
self.checkScript(list_index, ())
def list_str_index():
@@ -976,6 +1098,7 @@ class TestList(JitTestCase):
i = a.index("bar")
return i == 1
+
self.checkScript(list_str_index, ())
def test_tensor_list_index(self):
@@ -984,6 +1107,7 @@ class TestList(JitTestCase):
i = a.index(torch.tensor(3))
return i == 2
+
self.checkScript(tensor_list_index, ())
def test_tensor_list_index_not_existing(self):
@@ -994,7 +1118,9 @@ class TestList(JitTestCase):
return i
- with self.assertRaisesRegexWithHighlight(RuntimeError, "is not in list", "a.index"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "is not in list", "a.index"
+ ):
tensor_list_index_not_existing()
def test_list_count(self):
@@ -1003,6 +1129,7 @@ class TestList(JitTestCase):
i = a.count(4)
return i == 3
+
self.checkScript(list_count, ())
def list_str_count():
@@ -1010,6 +1137,7 @@ class TestList(JitTestCase):
i = a.count("foo")
return i == 2
+
self.checkScript(list_str_count, ())
def test_list_count_not_existing(self):
@@ -1018,6 +1146,7 @@ class TestList(JitTestCase):
i = a.count(5)
return i == 0
+
self.checkScript(list_count_not_existing, ())
def test_tensor_list_count(self):
@@ -1026,6 +1155,7 @@ class TestList(JitTestCase):
i = a.count(torch.tensor(4))
return i == 3
+
self.checkScript(tensor_list_count, ())
def test_tensor_list_count_not_existing(self):
@@ -1034,6 +1164,7 @@ class TestList(JitTestCase):
i = a.count(torch.tensor(5))
return i == 0
+
self.checkScript(tensor_list_count_not_existing, ())
def test_mutable_list_remove_tensor(self):
@@ -1042,6 +1173,7 @@ class TestList(JitTestCase):
a.remove(torch.zeros(1))
return len(a) == 2
+
self.checkScript(test_list_remove_tensor, ())
def test_mutable_list_remove2(self):
@@ -1050,23 +1182,26 @@ class TestList(JitTestCase):
a.remove(1)
return len(a) == 0
+
self.checkScript(test_list_remove2, ())
def test_extend_list_mutable(self):
@torch.jit.script
def extend_list(a: List[Tensor], b: List[Tensor]) -> List[Tensor]:
-
a.extend(b)
return a
for l in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
- for r in [[], [torch.rand(2)], [torch.rand(2), torch.rand(2), torch.rand(2)]]:
+ for r in [
+ [],
+ [torch.rand(2)],
+ [torch.rand(2), torch.rand(2), torch.rand(2)],
+ ]:
self.assertEqual(extend_list(l, r), l + r)
def test_extend_list_immutable(self):
@torch.jit.script
def extend_list(a: List[int], b: List[int]) -> List[int]:
-
a.extend(b)
return a
@@ -1109,7 +1244,6 @@ class TestList(JitTestCase):
def max_floatlist(li: List[float]) -> float:
return max(li)
-
int_lists = [1], [2, 1, 2], [-3, 4, 2], [-2, -7, 1, 4], [2, 1, 0, 4], []
def check_list(fn, li):
@@ -1136,6 +1270,7 @@ class TestList(JitTestCase):
"""
Boolean dtype unit tests.
"""
+
def to_list_bool_0D(x: torch.Tensor) -> bool:
li = torch.jit.annotate(bool, x.tolist())
return li
@@ -1173,6 +1308,7 @@ class TestList(JitTestCase):
"""
Int dtype unit tests.
"""
+
def to_list_int_0D(x: torch.Tensor) -> int:
li = torch.jit.annotate(int, x.tolist())
return li
@@ -1206,6 +1342,7 @@ class TestList(JitTestCase):
"""
Float dtype unit tests.
"""
+
def to_list_float_0D(x: torch.Tensor) -> float:
li = torch.jit.annotate(float, x.tolist())
return li
@@ -1227,17 +1364,23 @@ class TestList(JitTestCase):
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.float),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.float),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float),))
- self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float).transpose(0, 1),))
+ self.checkScript(
+ to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.float).transpose(0, 1),)
+ )
self.checkScript(to_list_float_0D, (torch.randn(5, dtype=torch.double)[0],))
self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.double),))
self.checkScript(to_list_float_2D, (torch.randn(5, 6, dtype=torch.double),))
self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double),))
- self.checkScript(to_list_float_3D, (torch.randn(5, 6, 7, dtype=torch.double).transpose(0, 1),))
+ self.checkScript(
+ to_list_float_3D,
+ (torch.randn(5, 6, 7, dtype=torch.double).transpose(0, 1),),
+ )
"""
Complex dtype unit tests.
"""
+
def to_list_complex_0D(x: torch.Tensor) -> complex:
li = torch.jit.annotate(complex, x.tolist())
return li
@@ -1258,14 +1401,24 @@ class TestList(JitTestCase):
self.checkScript(to_list_complex_0D, (torch.randn(5, dtype=torch.cfloat)[0],))
self.checkScript(to_list_complex_1D, (torch.randn(5, dtype=torch.cfloat),))
self.checkScript(to_list_complex_2D, (torch.randn(5, 6, dtype=torch.cfloat),))
- self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cfloat),))
- self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cfloat).transpose(0, 1),))
+ self.checkScript(
+ to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cfloat),)
+ )
+ self.checkScript(
+ to_list_complex_3D,
+ (torch.randn(5, 6, 7, dtype=torch.cfloat).transpose(0, 1),),
+ )
self.checkScript(to_list_complex_0D, (torch.randn(5, dtype=torch.cdouble)[0],))
self.checkScript(to_list_complex_1D, (torch.randn(5, dtype=torch.cdouble),))
self.checkScript(to_list_complex_2D, (torch.randn(5, 6, dtype=torch.cdouble),))
- self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cdouble),))
- self.checkScript(to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cdouble).transpose(0, 1),))
+ self.checkScript(
+ to_list_complex_3D, (torch.randn(5, 6, 7, dtype=torch.cdouble),)
+ )
+ self.checkScript(
+ to_list_complex_3D,
+ (torch.randn(5, 6, 7, dtype=torch.cdouble).transpose(0, 1),),
+ )
"""
Non-happy path tests:
@@ -1275,6 +1428,7 @@ class TestList(JitTestCase):
- type annotation with the wrong dimension
- type annotation with scalar type that doesn't match the input scalar type
"""
+
def to_list_missing_type_annotation(x: torch.Tensor) -> List[float]:
li = x.tolist()
return li
@@ -1291,21 +1445,21 @@ class TestList(JitTestCase):
li = torch.jit.annotate(List[List[float]], x.tolist())
return li
- def to_list_type_annotation_incorrect_scalar_type(x: torch.Tensor) -> List[float]:
+ def to_list_type_annotation_incorrect_scalar_type(
+ x: torch.Tensor,
+ ) -> List[float]:
li = torch.jit.annotate(List[float], x.tolist())
return li
with self.assertRaisesRegexWithHighlight(
- RuntimeError,
- r"Expected type hint for result of tolist()",
- "x.tolist("
+ RuntimeError, r"Expected type hint for result of tolist()", "x.tolist("
):
self.checkScript(to_list_missing_type_annotation, (torch.randn(5),))
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"Return value was annotated as having type List\[float\] but is actually of type float",
- "return li"
+ "return li",
):
self.checkScript(to_list_incorrect_type_annotation, (torch.randn(5),))
@@ -1318,7 +1472,9 @@ class TestList(JitTestCase):
RuntimeError,
r"Output annotation list dimension and runtime tensor dimension must match",
):
- self.checkScript(to_list_type_annotation_wrong_dim, (torch.randn(5, dtype=torch.double),))
+ self.checkScript(
+ to_list_type_annotation_wrong_dim, (torch.randn(5, dtype=torch.double),)
+ )
with self.assertRaisesRegex(
RuntimeError,
@@ -1329,9 +1485,10 @@ class TestList(JitTestCase):
(torch.ones(5, dtype=torch.long),),
)
- @unittest.skipIf(not TEST_CUDA, 'CUDA not available')
+ @unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_to_list_gpu(self):
"""GPU tests for Tensor.tolist() function."""
+
def to_list_bool_1D(x: torch.Tensor) -> List[bool]:
li = torch.jit.annotate(List[bool], x.tolist())
return li
@@ -1344,12 +1501,14 @@ class TestList(JitTestCase):
li = torch.jit.annotate(List[float], x.tolist())
return li
- self.checkScript(to_list_bool_1D, (torch.tensor(
- [True, False, True, False], dtype=torch.bool).cuda(),))
- self.checkScript(to_list_int_1D, (torch.tensor(
- [1, 2, 3, 4], dtype=torch.long).cuda(),))
- self.checkScript(to_list_float_1D, (torch.randn(
- 5, dtype=torch.double).cuda(),))
+ self.checkScript(
+ to_list_bool_1D,
+ (torch.tensor([True, False, True, False], dtype=torch.bool).cuda(),),
+ )
+ self.checkScript(
+ to_list_int_1D, (torch.tensor([1, 2, 3, 4], dtype=torch.long).cuda(),)
+ )
+ self.checkScript(to_list_float_1D, (torch.randn(5, dtype=torch.double).cuda(),))
def test_no_element_type_annotation(self):
def fn_with_comment(x: torch.Tensor) -> List:
@@ -1360,38 +1519,55 @@ class TestList(JitTestCase):
a: List = x.tolist()
return a
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use List without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use List without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use List without a contained type"
+ ):
torch.jit.script(fn_with_comment)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use List without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use List without a contained type"
+ ):
torch.jit.script(annotated_fn)
def test_list_none(self):
- with self.assertRaisesRegex(RuntimeError, "Can not create ListType with None type"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Can not create ListType with None type"
+ ):
x = torch._C.ListType(None)
def test_list_unification_hint(self):
- with self.assertRaisesRegex(RuntimeError, "Expected an annotation of type List"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected an annotation of type List"
+ ):
+
@torch.jit.script
def x():
- b : int = [2, 3]
+ b: int = [2, 3]
return b
class TestDict(JitTestCase):
def dict(self):
- return {u'a': torch.ones(1), u'b': torch.ones(1) + 1, u'c': torch.ones(1) + 2}
+ return {"a": torch.ones(1), "b": torch.ones(1) + 1, "c": torch.ones(1) + 2}
def dict2(self):
- return {'x': torch.ones(1) + 100, 'y': torch.ones(1) + 101, 'z': torch.ones(1) + 102}
+ return {
+ "x": torch.ones(1) + 100,
+ "y": torch.ones(1) + 101,
+ "z": torch.ones(1) + 102,
+ }
def dict_bool(self):
return {True: 1}
@@ -1428,10 +1604,10 @@ class TestDict(JitTestCase):
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_del(self):
def inputs():
- return {'hi': 2, 'bye': 3}
+ return {"hi": 2, "bye": 3}
def fn(x: Dict[str, int]) -> Dict[str, int]:
- del x['hi']
+ del x["hi"]
return x
python_out = fn(inputs())
@@ -1441,7 +1617,7 @@ class TestDict(JitTestCase):
cu.define(dedent(inspect.getsource(fn)))
self.assertEqual(cu.fn(inputs()), python_out)
self.assertEqual(torch.jit.script(fn)(inputs()), python_out)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x['hi']"):
+ with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", 'x["hi"]'):
self.checkScript(fn, [{}])
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
@@ -1460,6 +1636,7 @@ class TestDict(JitTestCase):
here), and 2) the value type of the dict is a subtype of the
value type of the rhs dict.
"""
+
def test_dictliteral_is_typed_from_annotation():
x: Dict[str, Optional[int]] = {"foo": None, "bar": None, "baz": None}
return x
@@ -1468,7 +1645,9 @@ class TestDict(JitTestCase):
def test_dictcomprehension_is_typed_from_annotation():
metasyntactics = ["foo", "bar", "baz"]
- x: Dict[str, Optional[int]] = {word: None for word in metasyntactics} # noqa: RUF025
+ x: Dict[str, Optional[int]] = {
+ word: None for word in metasyntactics
+ } # noqa: RUF025
return x
self.checkScript(test_dictcomprehension_is_typed_from_annotation, ())
@@ -1478,11 +1657,14 @@ class TestDict(JitTestCase):
y: Dict[str, Optional[int]] = x
return x
- with self.assertRaisesRegex(RuntimeError, "Variable 'y' is "
- "annotated with type "
- r"Dict\[str, Optional\[int\]\] but "
- "is being assigned to a value of "
- r"type Dict\[str, int\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Variable 'y' is "
+ "annotated with type "
+ r"Dict\[str, Optional\[int\]\] but "
+ "is being assigned to a value of "
+ r"type Dict\[str, int\]",
+ ):
torch.jit.script(test_dicts_with_different_value_types_are_invariant)
def test_dicts_with_different_value_types_are_invariant_recursive(self):
@@ -1491,13 +1673,18 @@ class TestDict(JitTestCase):
z: Dict[str, Dict[str, Optional[int]]] = y
return x
- with self.assertRaisesRegex(RuntimeError, "Variable 'z' is "
- "annotated with type "
- r"Dict\[str, Dict\[str, Optional"
- r"\[int\]\]\] but is being assigned"
- r" to a value of type Dict\[str, "
- r"Dict\[str, int\]\]"):
- torch.jit.script(test_dicts_with_different_value_types_are_invariant_recursive)
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Variable 'z' is "
+ "annotated with type "
+ r"Dict\[str, Dict\[str, Optional"
+ r"\[int\]\]\] but is being assigned"
+ r" to a value of type Dict\[str, "
+ r"Dict\[str, int\]\]",
+ ):
+ torch.jit.script(
+ test_dicts_with_different_value_types_are_invariant_recursive
+ )
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_keys(self):
@@ -1547,8 +1734,8 @@ class TestDict(JitTestCase):
# checkScript
scripted_func = torch.jit.script(func)
- eager_out = (func(self.dict()))
- script_out = (scripted_func(self.dict()))
+ eager_out = func(self.dict())
+ script_out = scripted_func(self.dict())
self.assertEqual(len(eager_out), len(script_out))
for item in eager_out:
@@ -1566,30 +1753,35 @@ class TestDict(JitTestCase):
script_out = torch.jit.script(fn)(self.dict(), *args)
self.assertEqual(eager_out, script_out)
- tester(pop, 'a')
+ tester(pop, "a")
with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x.pop"):
- torch.jit.script(pop)(self.dict(), 'x')
-
+ torch.jit.script(pop)(self.dict(), "x")
- def default_pop(x: Dict[str, Tensor], key: str, default: Tensor) -> Tuple[Tensor, Dict[str, Tensor]]:
+ def default_pop(
+ x: Dict[str, Tensor], key: str, default: Tensor
+ ) -> Tuple[Tensor, Dict[str, Tensor]]:
return x.pop(key, default), x
- tester(default_pop, 'a', torch.randn(2, 2))
- tester(default_pop, 'x', torch.randn(2, 2))
+ tester(default_pop, "a", torch.randn(2, 2))
+ tester(default_pop, "x", torch.randn(2, 2))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_setdefault(self):
- def setdefault(x: Dict[str, Tensor], key: str, default: Tensor) -> Dict[str, Tensor]:
+ def setdefault(
+ x: Dict[str, Tensor], key: str, default: Tensor
+ ) -> Dict[str, Tensor]:
x.setdefault(key, default)
return x
- self.checkScript(setdefault, (self.dict(), 'a', torch.randn(2, 2)))
- self.checkScript(setdefault, (self.dict(), 'nonexistant', torch.randn(2, 2)))
+ self.checkScript(setdefault, (self.dict(), "a", torch.randn(2, 2)))
+ self.checkScript(setdefault, (self.dict(), "nonexistant", torch.randn(2, 2)))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_update(self):
- def update(a: Dict[str, Tensor], b: Dict[str, Tensor]) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]:
+ def update(
+ a: Dict[str, Tensor], b: Dict[str, Tensor]
+ ) -> Tuple[Dict[str, Tensor], Dict[str, Tensor]]:
a.update(b)
return a, b
@@ -1601,7 +1793,7 @@ class TestDict(JitTestCase):
def foo() -> Dict[str, int]:
a: Dict[str, int] = {}
for i in range(3):
- a.update({'a': i})
+ a.update({"a": i})
return a
self.checkScript(foo, ())
@@ -1609,28 +1801,30 @@ class TestDict(JitTestCase):
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_aug_assign(self):
def aug_assign_dict_tensor(a: Dict[str, Tensor]) -> Dict[str, Tensor]:
- a['a'] += 1
- a['b'] -= 12
- a['c'] *= 122
- a['c'] /= 2
- a['c'] %= 2
+ a["a"] += 1
+ a["b"] -= 12
+ a["c"] *= 122
+ a["c"] /= 2
+ a["c"] %= 2
return a
def aug_assign_dict_prim(a: Dict[str, float]) -> Dict[str, float]:
- a['a'] += 3.4
- a['b'] -= 2.4
- a['c'] *= 3.0
- a['c'] /= 2.0
- a['c'] %= 2.0
+ a["a"] += 3.4
+ a["b"] -= 2.4
+ a["c"] *= 3.0
+ a["c"] /= 2.0
+ a["c"] %= 2.0
return a
self.checkScript(aug_assign_dict_tensor, (self.dict(),))
- self.checkScript(aug_assign_dict_prim, ({'a': 3.0, 'b': 2.0, 'c': 4.0},))
+ self.checkScript(aug_assign_dict_prim, ({"a": 3.0, "b": 2.0, "c": 4.0},))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_popitem(self):
@torch.jit.script
- def popitem(x: Dict[str, Tensor]) -> Tuple[Tuple[str, Tensor], Dict[str, Tensor]]:
+ def popitem(
+ x: Dict[str, Tensor]
+ ) -> Tuple[Tuple[str, Tensor], Dict[str, Tensor]]:
item = x.popitem()
return item, x
@@ -1660,13 +1854,13 @@ class TestDict(JitTestCase):
def get(x: Dict[str, Tensor], key: str) -> Optional[Tensor]:
return x.get(key)
- self.checkScript(get, (self.dict(), 'a'))
+ self.checkScript(get, (self.dict(), "a"))
self.checkScript(get, (self.dict(), "doesn't exist"))
def get_default(x: Dict[str, Tensor], key: str) -> Optional[Tensor]:
return x.get(key, torch.randn(2, 2))
- self.checkScript(get, (self.dict(), 'a'))
+ self.checkScript(get, (self.dict(), "a"))
self.checkScript(get, (self.dict(), "doesn't exist"))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
@@ -1688,12 +1882,12 @@ class TestDict(JitTestCase):
def simple(x: Dict[str, int]) -> Dict[str, int]:
return x
- self.checkScript(simple, ({'item': 20, 'other_item': 120},))
+ self.checkScript(simple, ({"item": 20, "other_item": 120},))
def index(x: Dict[str, int]) -> int:
- return x['item']
+ return x["item"]
- self.checkScript(index, ({'item': 20, 'other_item': 120},))
+ self.checkScript(index, ({"item": 20, "other_item": 120},))
def type_default() -> Dict[str, Tensor]:
return {}
@@ -1702,29 +1896,35 @@ class TestDict(JitTestCase):
@torch.jit.script
def missing_index(x: Dict[str, int]) -> int:
- return x['dne']
+ return x["dne"]
- with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", "x['dne'"):
- missing_index({'item': 20, 'other_item': 120})
+ with self.assertRaisesRegexWithHighlight(RuntimeError, "KeyError", 'x["dne"'):
+ missing_index({"item": 20, "other_item": 120})
- code = dedent('''
+ code = dedent(
+ """
def literal1():
return torch.jit.annotate(Dict[int, float], {})
def literal2():
return torch.jit.annotate(Dict[int, float], {10: 1.2})
- ''')
+ """
+ )
cu = torch.jit.CompilationUnit(code)
self.assertEqual({}, cu.literal1())
self.assertEqual({10: 1.2}, cu.literal2())
- cu = torch.jit.CompilationUnit(dedent('''
+ cu = torch.jit.CompilationUnit(
+ dedent(
+ """
def literal3():
return torch.jit.annotate(Dict[int, float], {10: 1.2, 11: 1.3})
- '''))
+ """
+ )
+ )
self.assertEqual({10: 1.2, 11: 1.3}, cu.literal3())
def list_of_dicts() -> List[Dict[str, Tensor]]:
- return [{'word': torch.ones(2) + 3}, {'other word': torch.ones(1) + 2}]
+ return [{"word": torch.ones(2) + 3}, {"other word": torch.ones(1) + 2}]
self.checkScript(list_of_dicts, ())
@@ -1733,14 +1933,17 @@ class TestDict(JitTestCase):
@torch.jit.script
def fn() -> Dict[str, int]:
a = torch.jit.annotate(Dict[str, int], {})
- a['ok'] = 10
+ a["ok"] = 10
return a
- self.assertEqual(fn(), {'ok': 10})
+ self.assertEqual(fn(), {"ok": 10})
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_key_type(self):
- with self.assertRaisesRegexWithHighlight(RuntimeError, "but instead found type", "a[None]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "but instead found type", "a[None]"
+ ):
+
@torch.jit.script
def fn(a: Dict[str, int]) -> int:
return a[None]
@@ -1751,10 +1954,10 @@ class TestDict(JitTestCase):
def fn(x: int) -> Dict[str, int]:
a = torch.jit.annotate(Dict[str, int], {})
for i in range(x):
- a['ok'] = i
+ a["ok"] = i
return a
- self.assertEqual(fn(10), {'ok': 9})
+ self.assertEqual(fn(10), {"ok": 9})
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_view(self):
@@ -1765,6 +1968,7 @@ class TestDict(JitTestCase):
x_view.add_(y)
b = x + x
return a == b
+
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
@@ -1783,7 +1987,10 @@ class TestDict(JitTestCase):
self.checkScript(fn, (d, 3))
self.checkScript(fn, (d, 2))
- with self.assertRaisesRegexWithHighlight(RuntimeError, "is actually of type Optional", "return x.get(y"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "is actually of type Optional", "return x.get(y"
+ ):
+
@torch.jit.script
def bad_types(x: Dict[int, int], y: int) -> int:
return x.get(y) # noqa: T484
@@ -1797,8 +2004,8 @@ class TestDict(JitTestCase):
def fn(my_dict: Dict[str, int], keys: List[str]) -> List[int]:
return python_lookup(my_dict, keys)
- a_dict = {'a': torch.ones(1), 'b': torch.ones(1) + 1, 'c': torch.ones(1) + 2}
- self.checkScript(fn, (a_dict, ('a', 'c')))
+ a_dict = {"a": torch.ones(1), "b": torch.ones(1) + 1, "c": torch.ones(1) + 2}
+ self.checkScript(fn, (a_dict, ("a", "c")))
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_ordered_dict(self):
@@ -1838,7 +2045,9 @@ class TestDict(JitTestCase):
a[1] = 2
return a
- with self.assertRaisesRegexWithHighlight(Exception, "Arguments for call are not", "a[1] = 2"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Arguments for call are not", "a[1] = 2"
+ ):
torch.jit.script(test_dict_error)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
@@ -1847,6 +2056,7 @@ class TestDict(JitTestCase):
Test that the use of a Dict type annotation without contained
key and value types produces an error.
"""
+
# This function uses a type comment.
def fn_with_comment(input: Dict) -> Any:
return input
@@ -1855,24 +2065,32 @@ class TestDict(JitTestCase):
def annotated_fn(input: Dict) -> Any:
return input
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Dict without contained types"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Dict without contained types"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Dict without contained types"
+ ):
m = torch.jit.script(fn_with_comment)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Dict without contained types"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Dict without contained types"
+ ):
m = torch.jit.script(annotated_fn)
@skipIfTorchDynamo("TorchDynamo fails for this test for unknown reason")
def test_dict_preserves_order(self):
def dict_ordering():
- a : Dict[int, int] = {}
+ a: Dict[int, int] = {}
for i in range(1000):
a[i] = i + 1
return a
@@ -1973,7 +2191,7 @@ class TestNamedTuple(JitTestCase):
class MyModule(types.ModuleType):
def __init__(self):
- super().__init__('MyModule')
+ super().__init__("MyModule")
def __getattr__(self, attr):
return TheType
@@ -1987,12 +2205,12 @@ class TestNamedTuple(JitTestCase):
def test_namedtuple_slice_unpack(self):
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
@torch.jit.script
- def foo(a : int, b : float, c : List[int]):
+ def foo(a: int, b: float, c: List[int]):
tup = MyCoolNamedTuple(a, b, c)
my_a, my_b, my_c = tup
return tup[:1], my_a, my_c
@@ -2001,29 +2219,29 @@ class TestNamedTuple(JitTestCase):
def test_namedtuple_lower(self):
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
@torch.jit.script
- def foo(a : int):
+ def foo(a: int):
tup = MyCoolNamedTuple(a, 3.14, [9])
return tup
- FileCheck().check('TupleConstruct').run(foo.graph)
+ FileCheck().check("TupleConstruct").run(foo.graph)
torch._C._jit_pass_lower_all_tuples(foo.graph)
- FileCheck().check_not('TupleConstruct').run(foo.graph)
+ FileCheck().check_not("TupleConstruct").run(foo.graph)
def test_namedtuple_type_annotation(self):
global MyCoolNamedTuple # see [local resolution in python]
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
@torch.jit.script
- def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:
+ def foo(x: MyCoolNamedTuple) -> MyCoolNamedTuple:
return x
mnt = MyCoolNamedTuple(42, 420.0, [666])
@@ -2031,22 +2249,26 @@ class TestNamedTuple(JitTestCase):
def test_namedtuple_wrong_types(self):
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
+
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a value of type 'int' for argument 'a'"
+ " but instead found type 'str'",
+ ):
- with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int' for argument 'a'"
- " but instead found type 'str'"):
@torch.jit.script
def foo():
- tup = MyCoolNamedTuple('foo', 'bar', 'baz')
+ tup = MyCoolNamedTuple("foo", "bar", "baz")
return tup
def test_namedtuple_kwarg_construct(self):
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
@torch.jit.script
def foo():
@@ -2061,9 +2283,9 @@ class TestNamedTuple(JitTestCase):
@unittest.skipIf(True, "broken while these tests were not in CI")
def test_namedtuple_serialization(self):
class MyCoolNamedTuple(NamedTuple):
- a : int
- b : float
- c : List[int]
+ a: int
+ b: float
+ c: List[int]
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
@@ -2071,21 +2293,21 @@ class TestNamedTuple(JitTestCase):
return MyCoolNamedTuple(3, 3.5, [3, 4, 5])
mm = MyMod()
- mm.save('foo.zip')
+ mm.save("foo.zip")
torch.testing._internal.jit_utils.clear_class_registry()
- loaded = torch.jit.load('foo.zip')
+ loaded = torch.jit.load("foo.zip")
out = mm()
out_loaded = loaded()
- for name in ['a', 'b', 'c']:
+ for name in ["a", "b", "c"]:
self.assertEqual(getattr(out_loaded, name), getattr(out, name))
def test_namedtuple_inside_forwardref(self):
class FeatureVector(NamedTuple):
- float_features: 'float'
- sequence_features: 'List[float]'
- time_since_first: 'float'
+ float_features: "float"
+ sequence_features: "List[float]"
+ time_since_first: "float"
@torch.jit.script
def foo(x) -> float:
@@ -2100,9 +2322,9 @@ class TestNamedTuple(JitTestCase):
def test_namedtuple_input_forwardref(self):
class MyNamedTuple(NamedTuple):
- a : 'int'
- b : 'float'
- c : 'torch.Tensor'
+ a: "int"
+ b: "float"
+ c: "torch.Tensor"
make_global(MyNamedTuple)
@@ -2120,11 +2342,11 @@ class TestNamedTuple(JitTestCase):
@unittest.expectedFailure
def test_namedtuple_resolution_forwardref(self):
class TheType(NamedTuple):
- t: 'int'
+ t: "int"
class MyModule(types.ModuleType):
def __init__(self):
- super().__init__('MyModule')
+ super().__init__("MyModule")
def __getattr__(self, attr):
return TheType
@@ -2150,12 +2372,14 @@ class TestScriptDict(JitTestCase):
by torch.jit.script behave like dictionaries do so that they are fungible
in almost all cirumstances with regular dictionaries.
"""
+
def _script_dict_add(self, d: torch._C.ScriptDict, k: int, v: int):
"""
This is a helper function that inserts the pair (k, v) into the
dictionary d in TorchScript. It is used for testing reference
semantics.
"""
+
@torch.jit.script
def dict_add(d: Dict[int, int], k: int, v: int):
d[k] = v
@@ -2223,6 +2447,7 @@ class TestScriptDict(JitTestCase):
"""
Test iteration over a dictionary's keys.
"""
+
def sum_keys(input_dict):
s = 0
for k in input_dict:
@@ -2236,6 +2461,7 @@ class TestScriptDict(JitTestCase):
"""
Test .items().
"""
+
def sum_pair_product(input_dict):
s = 0
for k, v in input_dict.items():
@@ -2287,7 +2513,12 @@ class TestScriptDict(JitTestCase):
data = {1: 2, 3: 4}
def fn(input_dict):
- return 1 in input_dict, 2 not in input_dict, 3 in input_dict, 4 not in input_dict
+ return (
+ 1 in input_dict,
+ 2 not in input_dict,
+ 3 in input_dict,
+ 4 not in input_dict,
+ )
self._compare_eager_and_script(fn, data)
@@ -2327,13 +2558,17 @@ class TestScriptDict(JitTestCase):
self._compare_eager_and_script(lambda d: len(d), {1: 2})
self._compare_eager_and_script(lambda d: len(d), {})
- @unittest.skip("Cannot pass until all dicts returned from TorchScript are ScriptDicts")
+ @unittest.skip(
+ "Cannot pass until all dicts returned from TorchScript are ScriptDicts"
+ )
def test_nested(self):
"""
Test that reference semantics are honoured when the ScriptDict that is
mutated using TorchScript is inside another.
"""
- nested = torch.jit.script({1: {1: 2}, 2: {3: 4}}, type_hint=Dict[int, Dict[int, int]])
+ nested = torch.jit.script(
+ {1: {1: 2}, 2: {3: 4}}, type_hint=Dict[int, Dict[int, int]]
+ )
one = nested[1]
two = nested[2]
@@ -2374,12 +2609,14 @@ class TestScriptList(JitTestCase):
torch._C.ScriptList behave like lists do so that they are fungible
in almost all cirumstances with regular list.
"""
+
def _script_list_add(self, l: torch._C.ScriptList, e: int):
"""
This is a helper function that inserts the element e into the
list l in TorchScript. It is used for testing reference
semantics.
"""
+
@torch.jit.script
def list_add(l: List[int], e: int):
l.append(e)
@@ -2446,6 +2683,7 @@ class TestScriptList(JitTestCase):
"""
Test iteration over a list's elements.
"""
+
def sum_elements(input_list):
s = 0
for k in input_list:
@@ -2533,7 +2771,12 @@ class TestScriptList(JitTestCase):
data = [1, 2, 3, 4]
def fn(input_list):
- return 1 in input_list, 2 not in input_list, 3 in input_list, 4 not in input_list
+ return (
+ 1 in input_list,
+ 2 not in input_list,
+ 3 in input_list,
+ 4 not in input_list,
+ )
self._compare_eager_and_script(fn, data)
@@ -2617,6 +2860,7 @@ class TestScriptList(JitTestCase):
"""
Test extend.
"""
+
class Iterable:
def __init__(self, limit: int):
self.limit = limit
@@ -2687,7 +2931,9 @@ class TestScriptList(JitTestCase):
# Test error cases.
self._compare_eager_and_script(lambda l: l.pop(10), data)
- @unittest.skip("Cannot pass until all list returned from TorchScript are ScriptLists")
+ @unittest.skip(
+ "Cannot pass until all list returned from TorchScript are ScriptLists"
+ )
def test_nested(self):
"""
Test that reference semantics are honoured when the ScriptList that is
diff --git a/test/jit/test_logging.py b/test/jit/test_logging.py
index bbefffadea..596b1439b3 100644
--- a/test/jit/test_logging.py
+++ b/test/jit/test_logging.py
@@ -10,10 +10,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestLogging(JitTestCase):
def test_bump_numeric_counter(self):
@@ -22,30 +25,29 @@ class TestLogging(JitTestCase):
def forward(self, x):
for i in range(x.size(0)):
x += 1.0
- torch.jit._logging.add_stat_value('foo', 1)
+ torch.jit._logging.add_stat_value("foo", 1)
if bool(x.sum() > 0.0):
- torch.jit._logging.add_stat_value('positive', 1)
+ torch.jit._logging.add_stat_value("positive", 1)
else:
- torch.jit._logging.add_stat_value('negative', 1)
+ torch.jit._logging.add_stat_value("negative", 1)
return x
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
-
mtl = ModuleThatLogs()
for i in range(5):
mtl(torch.rand(3, 4, 5))
- self.assertEqual(logger.get_counter_val('foo'), 15)
- self.assertEqual(logger.get_counter_val('positive'), 5)
+ self.assertEqual(logger.get_counter_val("foo"), 15)
+ self.assertEqual(logger.get_counter_val("positive"), 5)
finally:
torch.jit._logging.set_logger(old_logger)
def test_trace_numeric_counter(self):
def foo(x):
- torch.jit._logging.add_stat_value('foo', 1)
+ torch.jit._logging.add_stat_value("foo", 1)
return x + 1.0
traced = torch.jit.trace(foo, torch.rand(3, 4))
@@ -54,7 +56,7 @@ class TestLogging(JitTestCase):
try:
traced(torch.rand(3, 4))
- self.assertEqual(logger.get_counter_val('foo'), 1)
+ self.assertEqual(logger.get_counter_val("foo"), 1)
finally:
torch.jit._logging.set_logger(old_logger)
@@ -65,7 +67,7 @@ class TestLogging(JitTestCase):
for i in range(30):
x += 1.0
tp_end = torch.jit._logging.time_point()
- torch.jit._logging.add_stat_value('mytimer', tp_end - tp_start)
+ torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
return x
mtm = ModuleThatTimes()
@@ -73,7 +75,7 @@ class TestLogging(JitTestCase):
old_logger = torch.jit._logging.set_logger(logger)
try:
mtm(torch.rand(3, 4))
- self.assertGreater(logger.get_counter_val('mytimer'), 0)
+ self.assertGreater(logger.get_counter_val("mytimer"), 0)
finally:
torch.jit._logging.set_logger(old_logger)
@@ -85,7 +87,7 @@ class TestLogging(JitTestCase):
for i in range(30):
x += 1.0
tp_end = torch.jit._logging.time_point()
- torch.jit._logging.add_stat_value('mytimer', tp_end - tp_start)
+ torch.jit._logging.add_stat_value("mytimer", tp_end - tp_start)
return x
mtm = ModuleThatTimes()
@@ -93,27 +95,27 @@ class TestLogging(JitTestCase):
old_logger = torch.jit._logging.set_logger(logger)
try:
mtm(torch.rand(3, 4))
- self.assertGreater(logger.get_counter_val('mytimer'), 0)
+ self.assertGreater(logger.get_counter_val("mytimer"), 0)
finally:
torch.jit._logging.set_logger(old_logger)
def test_counter_aggregation(self):
def foo(x):
for i in range(3):
- torch.jit._logging.add_stat_value('foo', 1)
+ torch.jit._logging.add_stat_value("foo", 1)
return x + 1.0
traced = torch.jit.trace(foo, torch.rand(3, 4))
logger = torch.jit._logging.LockingLogger()
- logger.set_aggregation_type('foo', torch.jit._logging.AggregationType.AVG)
+ logger.set_aggregation_type("foo", torch.jit._logging.AggregationType.AVG)
old_logger = torch.jit._logging.set_logger(logger)
try:
traced(torch.rand(3, 4))
- self.assertEqual(logger.get_counter_val('foo'), 1)
+ self.assertEqual(logger.get_counter_val("foo"), 1)
finally:
torch.jit._logging.set_logger(old_logger)
def test_logging_levels_set(self):
- torch._C._jit_set_logging_option('foo')
- self.assertEqual('foo', torch._C._jit_get_logging_option())
+ torch._C._jit_set_logging_option("foo")
+ self.assertEqual("foo", torch._C._jit_get_logging_option())
diff --git a/test/jit/test_misc.py b/test/jit/test_misc.py
index be5efa8d99..8d58ab860d 100644
--- a/test/jit/test_misc.py
+++ b/test/jit/test_misc.py
@@ -1,28 +1,32 @@
# Owner(s): ["oncall: jit"]
-from typing import Any, Dict, List, Optional, Tuple
-
-from torch.testing._internal.jit_utils import JitTestCase, make_global
-from torch.testing import FileCheck
-from torch import jit
-from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
+import unittest
+from typing import Any, Dict, List, Optional, Tuple
+
import torch
-import torch.testing._internal.jit_utils
import torch.nn as nn
-import unittest
+import torch.testing._internal.jit_utils
+from torch import jit
+from torch.testing import FileCheck
from torch.testing._internal.common_utils import freeze_rng_state
-from torch.testing._internal.jit_utils import RUN_CUDA_HALF
+
+from torch.testing._internal.jit_utils import JitTestCase, make_global, RUN_CUDA_HALF
+
+from jit.test_module_interface import TestModuleInterface # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestMisc(JitTestCase):
def test_joined_str(self):
@@ -30,12 +34,12 @@ class TestMisc(JitTestCase):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
- hi = 'hi'
+ hi = "hi"
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
- x = torch.arange(4., requires_grad=True)
+ x = torch.arange(4.0, requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
@@ -50,10 +54,14 @@ class TestMisc(JitTestCase):
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
- with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
+ with self.assertRaisesRegex(
+ torch.jit.frontend.NotSupportedError, "variable number of arguments"
+ ):
+
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
+
torch.jit.script(M())
class M(torch.nn.Module):
@@ -62,32 +70,35 @@ class TestMisc(JitTestCase):
sm = torch.jit.script(M())
- with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
+ with self.assertRaisesRegex(
+ RuntimeError, "missing value for argument 'n_tokens'"
+ ):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
- sm(3, 'hello')
+ sm(3, "hello")
- self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
+ self.assertEqual(sm(n_tokens=3, device_name="hello"), (3, "hello"))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
+
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
+
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
-
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
- FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
+ FileCheck().check("Future[int]").check("Future[int]").run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
@@ -110,36 +121,40 @@ class TestMisc(JitTestCase):
if isinstance(x, str):
return x
return "foo"
+
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
-
@torch.jit.script
- def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
+ def fn(
+ x: Tuple[int, int, torch.jit.Future[int]]
+ ) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
- FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
+ FileCheck().check("(int, int, Future[int])").check("(int, Future[int])").run(
+ fn.graph
+ )
def test_subexpression_Dict_int_Future(self):
-
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
- FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
+ FileCheck().check("Dict(int, Future(int))").check("Future[int]").run(fn.graph)
def test_subexpression_Optional(self):
-
@torch.jit.script
- def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
+ def fn(
+ x: Optional[Dict[int, torch.jit.Future[int]]]
+ ) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
- FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
+ FileCheck().check("Dict(int, Future(int))?").run(fn.graph)
def test_if_returning_any(self):
"""
@@ -147,6 +162,7 @@ class TestMisc(JitTestCase):
types early from each branch when the return
type of the function is Any.
"""
+
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
@@ -156,14 +172,23 @@ class TestMisc(JitTestCase):
self.checkScript(if_function, (torch.randn(5),))
def test_hacked_twin(self):
-
def gen_data():
with freeze_rng_state():
return torch.randn(10), torch.randint(10, (20,)), torch.randn(20)
- input, index, value, = gen_data()
- input1, index1, value1, = gen_data()
- out1 = torch.ops.aten.index_put.hacked_twin(input, [index], value, accumulate=False)
+ (
+ input,
+ index,
+ value,
+ ) = gen_data()
+ (
+ input1,
+ index1,
+ value1,
+ ) = gen_data()
+ out1 = torch.ops.aten.index_put.hacked_twin(
+ input, [index], value, accumulate=False
+ )
out2 = torch.index_put(input1, [index1], value1, accumulate=False)
self.assertEqual(out1, out2)
@@ -172,14 +197,23 @@ class TestMisc(JitTestCase):
self.assertEqual(input, input1)
def test_unsafe_hacked_twin(self):
-
def gen_data():
with freeze_rng_state():
return torch.randn(10), torch.randint(10, (20,)), torch.randn(20)
- input, index, value, = gen_data()
- input1, index1, value1, = gen_data()
- out1 = torch.ops.aten._unsafe_index_put.hacked_twin(input, [index], value, accumulate=False)
+ (
+ input,
+ index,
+ value,
+ ) = gen_data()
+ (
+ input1,
+ index1,
+ value1,
+ ) = gen_data()
+ out1 = torch.ops.aten._unsafe_index_put.hacked_twin(
+ input, [index], value, accumulate=False
+ )
out2 = torch.index_put(input1, [index1], value1, accumulate=False)
self.assertEqual(out1, out2)
@@ -188,7 +222,9 @@ class TestMisc(JitTestCase):
self.assertEqual(input, input1)
def index_put_fn(input, index, value):
- return torch.ops.aten._unsafe_index_put(input, [index], value, accumulate=False)
+ return torch.ops.aten._unsafe_index_put(
+ input, [index], value, accumulate=False
+ )
input2, index2, value2 = gen_data()
script_index_put_fn = torch.jit.script(index_put_fn)
@@ -197,7 +233,9 @@ class TestMisc(JitTestCase):
self.assertEqual(expect, actual)
def index_fn(input, index, value):
- return torch.ops.aten._unsafe_index_put(input, [index], value, accumulate=False)
+ return torch.ops.aten._unsafe_index_put(
+ input, [index], value, accumulate=False
+ )
script_index_fn = torch.jit.script(index_fn)
expect = index_fn(input2.clone(), index2, value2)
@@ -205,7 +243,6 @@ class TestMisc(JitTestCase):
self.assertEqual(expect, actual)
def test_export_opnames_interface(self):
-
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
@@ -240,7 +277,7 @@ class TestMisc(JitTestCase):
make_global(OneTwoModule)
class M(nn.Module):
- sub : OneTwoModule
+ sub: OneTwoModule
def __init__(self):
super().__init__()
@@ -254,12 +291,18 @@ class TestMisc(JitTestCase):
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
- self.assertTrue({'aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal'}.issubset(
- set(torch.jit.export_opnames(scripted_M_mod))))
+ self.assertTrue(
+ {"aten::mul.Scalar", "aten::mul.Tensor", "aten::reciprocal"}.issubset(
+ set(torch.jit.export_opnames(scripted_M_mod))
+ )
+ )
scripted_M_mod.sub = torch.jit.script(FooMod())
- self.assertTrue({'aten::add.Tensor', 'aten::mul.Scalar'}.issubset(
- set(torch.jit.export_opnames(scripted_M_mod))))
+ self.assertTrue(
+ {"aten::add.Tensor", "aten::mul.Scalar"}.issubset(
+ set(torch.jit.export_opnames(scripted_M_mod))
+ )
+ )
def test_math_inf(self):
from math import inf
@@ -292,7 +335,6 @@ class TestMisc(JitTestCase):
with self.assertRaises(RuntimeError):
torch.jit.script(non_temporary_fail)
-
@torch.jit.script
def test_return():
return []
@@ -335,7 +377,9 @@ class TestMisc(JitTestCase):
def multiple_args():
return torch.LongTensor(1, [2])
- with self.assertRaisesRegex(RuntimeError, "multiple positional arguments that were not all integers"):
+ with self.assertRaisesRegex(
+ RuntimeError, "multiple positional arguments that were not all integers"
+ ):
torch.jit.script(multiple_args)
# kwarg bad schema
@@ -345,7 +389,6 @@ class TestMisc(JitTestCase):
with self.assertRaisesRegex(RuntimeError, "hello"):
torch.jit.script(bad_kwarg)
-
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
@@ -360,7 +403,7 @@ class TestMisc(JitTestCase):
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
- self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
+ self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.0)
def test_parse_ir_annotate(self):
ir = """
@@ -397,7 +440,6 @@ class TestMisc(JitTestCase):
self.assertTrue(ret.numel() == 1)
self.assertTrue(len(ret.size()) == 1)
-
def test_script_many_decorators(self):
def no_op_decorator(f):
return f
@@ -410,7 +452,9 @@ class TestMisc(JitTestCase):
def foo(x, dim: int):
return x.unsqueeze(dim)
- x = torch.randn(1,)
+ x = torch.randn(
+ 1,
+ )
expected = foo(x, 0)
scripted = torch.jit.script(foo)
actual = scripted(x, 0)
@@ -421,10 +465,10 @@ class TestMisc(JitTestCase):
# https://github.com/pytorch/pytorch/issues/75476
def fn(p: torch.Tensor, gamma: float = 2.0) -> torch.Tensor:
p = torch.sigmoid(p)
- result = p ** gamma
+ result = p**gamma
return result
- x = torch.rand((2, 2), dtype=torch.half, device='cuda')
+ x = torch.rand((2, 2), dtype=torch.half, device="cuda")
ref = fn(x)
@@ -450,8 +494,12 @@ class TestMisc(JitTestCase):
# We want "Scalar" to come before "complex".
op, override_names = torch._C._jit_get_operation("aten::add")
print(override_names)
- complex_indices = [i for i, name in enumerate(override_names) if name == "complex"]
- Scalar_indices = [i for i, name in enumerate(override_names) if name == "Scalar"]
+ complex_indices = [
+ i for i, name in enumerate(override_names) if name == "complex"
+ ]
+ Scalar_indices = [
+ i for i, name in enumerate(override_names) if name == "Scalar"
+ ]
self.assertTrue(len(complex_indices) > 0)
self.assertTrue(len(Scalar_indices) > 0)
diff --git a/test/jit/test_models.py b/test/jit/test_models.py
index e3db89e5a3..acaf1a0bf8 100644
--- a/test/jit/test_models.py
+++ b/test/jit/test_models.py
@@ -3,27 +3,33 @@
import os
import sys
import unittest
-from torch.testing._internal.common_utils import (
- enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode,
- set_default_dtype,
-)
+
import torch
import torch.nn as nn
import torch.nn.functional as F
+from torch.testing._internal.common_utils import (
+ enable_profiling_mode_for_profiling_tests,
+ GRAPH_EXECUTOR,
+ ProfilingMode,
+ set_default_dtype,
+)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
from torch.testing._internal.common_utils import slowTest, suppress_warnings
+from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
try:
import torchvision
+
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
@@ -31,6 +37,7 @@ except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
+
class MnistNet(nn.Module):
def __init__(self):
super().__init__()
@@ -49,6 +56,7 @@ class MnistNet(nn.Module):
x = self.fc2(x)
return F.log_softmax(x, dim=1)
+
class TestModels(JitTestCase):
@staticmethod
def _test_dcgan_models(self, device, check_export_import=True):
@@ -102,31 +110,38 @@ class TestModels(JitTestCase):
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
- nn.Sigmoid()
+ nn.Sigmoid(),
)
def forward(self, input):
return self.main(input).view(-1, 1).squeeze(1)
bs, nz, ngf, nc, ndf = 5, 6, 9, 3, 10
- self.checkTrace(DCGANGenerator(nz, ngf, nc).to(device),
- (torch.rand(bs, nz, 1, 1, device=device),),
- export_import=check_export_import)
- example_input = DCGANGenerator(nz, ngf, nc).to(device)(torch.rand(bs, nz, 1, 1, device=device))
- self.checkTrace(DCGANDiscriminator(nc, ndf).to(device), (example_input,),
- export_import=check_export_import)
+ self.checkTrace(
+ DCGANGenerator(nz, ngf, nc).to(device),
+ (torch.rand(bs, nz, 1, 1, device=device),),
+ export_import=check_export_import,
+ )
+ example_input = DCGANGenerator(nz, ngf, nc).to(device)(
+ torch.rand(bs, nz, 1, 1, device=device)
+ )
+ self.checkTrace(
+ DCGANDiscriminator(nc, ndf).to(device),
+ (example_input,),
+ export_import=check_export_import,
+ )
def test_dcgan_models(self):
# Note: Can sometimes fail with low precision if run with float dtype
with set_default_dtype(torch.double):
- self._test_dcgan_models(self, device='cpu')
+ self._test_dcgan_models(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_dcgan_models_cuda(self):
# Note: Can sometimes fail with low precision if run with float dtype
with set_default_dtype(torch.double):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_dcgan_models(self, device='cuda', check_export_import=False)
+ self._test_dcgan_models(self, device="cuda", check_export_import=False)
@staticmethod
def _test_neural_style(self, device, check_export_import=True):
@@ -147,9 +162,13 @@ class TestModels(JitTestCase):
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
- self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
+ self.deconv1 = UpsampleConvLayer(
+ 128, 64, kernel_size=3, stride=1, upsample=2
+ )
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
- self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
+ self.deconv2 = UpsampleConvLayer(
+ 64, 32, kernel_size=3, stride=1, upsample=2
+ )
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
@@ -174,7 +193,9 @@ class TestModels(JitTestCase):
super().__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
- self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
+ self.conv2d = torch.nn.Conv2d(
+ in_channels, out_channels, kernel_size, stride
+ )
def forward(self, x):
out = self.reflection_pad(x)
@@ -209,14 +230,20 @@ class TestModels(JitTestCase):
ref: http://distill.pub/2016/deconv-checkerboard/
"""
- def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
+ def __init__(
+ self, in_channels, out_channels, kernel_size, stride, upsample=None
+ ):
super().__init__()
self.upsample = upsample
if upsample:
- self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample)
+ self.upsample_layer = torch.nn.Upsample(
+ mode="nearest", scale_factor=upsample
+ )
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
- self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
+ self.conv2d = torch.nn.Conv2d(
+ in_channels, out_channels, kernel_size, stride
+ )
def forward(self, x):
x_in = x
@@ -226,44 +253,54 @@ class TestModels(JitTestCase):
out = self.conv2d(out)
return out
- self.checkTrace(TransformerNet(), (torch.rand(5, 3, 16, 16),), export_import=check_export_import)
+ self.checkTrace(
+ TransformerNet(),
+ (torch.rand(5, 3, 16, 16),),
+ export_import=check_export_import,
+ )
@slowTest
def test_neural_style(self):
- self._test_neural_style(self, device='cpu')
+ self._test_neural_style(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_neural_style_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_neural_style(self, device='cuda', check_export_import=False)
+ self._test_neural_style(self, device="cuda", check_export_import=False)
- @unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "Bug found in deprecated executor")
+ @unittest.skipIf(
+ GRAPH_EXECUTOR == ProfilingMode.LEGACY, "Bug found in deprecated executor"
+ )
@staticmethod
def _test_mnist(self, device, check_export_import=True):
# eval() is present because dropout makes this nondeterministic
with enable_profiling_mode_for_profiling_tests():
- self.checkTrace(MnistNet().to(device).eval(), (torch.rand(5, 1, 28, 28, device=device),),
- export_import=check_export_import)
+ self.checkTrace(
+ MnistNet().to(device).eval(),
+ (torch.rand(5, 1, 28, 28, device=device),),
+ export_import=check_export_import,
+ )
def test_mnist(self):
- self._test_mnist(self, device='cpu')
+ self._test_mnist(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_mnist_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_mnist(self, device='cuda', check_export_import=False)
+ self._test_mnist(self, device="cuda", check_export_import=False)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_mnist_training_leaks_no_memory_cuda(self):
net = MnistNet().cuda()
# MnistNet uses dropout, don't check its trace
- traced_net = torch.jit.trace(net, [torch.randn(5, 1, 28, 28, device='cuda')],
- check_trace=False)
+ traced_net = torch.jit.trace(
+ net, [torch.randn(5, 1, 28, 28, device="cuda")], check_trace=False
+ )
def train(iters):
for _ in range(iters):
# Get some fake data
- inp = torch.randn(5, 1, 28, 28, device='cuda')
+ inp = torch.randn(5, 1, 28, 28, device="cuda")
out = traced_net(inp)
# Here's some fake loss
@@ -292,21 +329,23 @@ class TestModels(JitTestCase):
return F.softmax(action_scores, dim=1)
with enable_profiling_mode_for_profiling_tests():
- self.checkTrace(Policy().to(device), (torch.rand(1, 4, device=device),),
- export_import=test_export_import)
+ self.checkTrace(
+ Policy().to(device),
+ (torch.rand(1, 4, device=device),),
+ export_import=test_export_import,
+ )
def test_reinforcement_learning(self):
- self._test_reinforcement_learning(self, device='cpu')
+ self._test_reinforcement_learning(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_reinforcement_learning_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_reinforcement_learning(self, device='cuda', test_export_import=False)
+ self._test_reinforcement_learning(self, device="cuda", test_export_import=False)
@staticmethod
def _test_snli(self, device, check_export_import=True):
class Bottle(nn.Module):
-
def forward(self, input):
if len(input.size()) <= 2:
return super().forward(input)
@@ -318,25 +357,31 @@ class TestModels(JitTestCase):
pass
class Encoder(nn.Module):
-
def __init__(self, config):
super().__init__()
self.config = config
input_size = config.d_proj if config.projection else config.d_embed
dropout = 0 if config.n_layers == 1 else config.dp_ratio
- self.rnn = nn.LSTM(input_size=input_size, hidden_size=config.d_hidden,
- num_layers=config.n_layers, dropout=dropout,
- bidirectional=config.birnn)
+ self.rnn = nn.LSTM(
+ input_size=input_size,
+ hidden_size=config.d_hidden,
+ num_layers=config.n_layers,
+ dropout=dropout,
+ bidirectional=config.birnn,
+ )
def forward(self, inputs):
batch_size = inputs.size()[1]
state_shape = self.config.n_cells, batch_size, self.config.d_hidden
h0 = c0 = inputs.new_zeros(state_shape)
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
- return ht[-1] if not self.config.birnn else ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)
+ return (
+ ht[-1]
+ if not self.config.birnn
+ else ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)
+ )
class SNLIClassifier(nn.Module):
-
def __init__(self, config):
super().__init__()
self.config = config
@@ -359,7 +404,8 @@ class TestModels(JitTestCase):
Linear(*lin_config),
self.relu,
self.dropout,
- Linear(seq_in_size, config.d_out))
+ Linear(seq_in_size, config.d_out),
+ )
def forward(self, premise, hypothesis):
prem_embed = self.embed(premise)
@@ -391,22 +437,25 @@ class TestModels(JitTestCase):
premise = torch.LongTensor(48, 64).random_(0, 100).to(device)
hypothesis = torch.LongTensor(24, 64).random_(0, 100).to(device)
- self.checkTrace(SNLIClassifier(Config()).to(device), (premise, hypothesis),
- inputs_require_grads=False, export_import=check_export_import)
+ self.checkTrace(
+ SNLIClassifier(Config()).to(device),
+ (premise, hypothesis),
+ inputs_require_grads=False,
+ export_import=check_export_import,
+ )
@slowTest
def test_snli(self):
- self._test_snli(self, device='cpu')
+ self._test_snli(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_snli_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_snli(self, device='cuda', check_export_import=False)
+ self._test_snli(self, device="cuda", check_export_import=False)
@staticmethod
def _test_super_resolution(self, device, check_export_import=True):
class Net(nn.Module):
-
def __init__(self, upscale_factor):
super().__init__()
@@ -414,7 +463,7 @@ class TestModels(JitTestCase):
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
- self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
+ self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
@@ -425,17 +474,20 @@ class TestModels(JitTestCase):
return x
net = Net(upscale_factor=4).to(device)
- self.checkTrace(net, (torch.rand(5, 1, 32, 32, device=device),),
- export_import=check_export_import)
+ self.checkTrace(
+ net,
+ (torch.rand(5, 1, 32, 32, device=device),),
+ export_import=check_export_import,
+ )
@slowTest
def test_super_resolution(self):
- self._test_super_resolution(self, device='cpu')
+ self._test_super_resolution(self, device="cpu")
- @unittest.skipIf(not RUN_CUDA, 'no CUDA')
+ @unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_super_resolution_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_super_resolution(self, device='cuda', check_export_import=False)
+ self._test_super_resolution(self, device="cuda", check_export_import=False)
@suppress_warnings
def test_time_sequence_prediction(self):
@@ -485,8 +537,7 @@ class TestModels(JitTestCase):
# disabled due to a jitter issues that will be fixed by using load/store in the compiler
with torch._jit_internal._disable_emit_hooks():
# TODO: toggle export_import once above issues are fixed
- self.checkTrace(Traced(), (torch.rand(3, 4),),
- export_import=False)
+ self.checkTrace(Traced(), (torch.rand(3, 4),), export_import=False)
@staticmethod
def _test_vae(self, device, check_export_import=True):
@@ -523,22 +574,27 @@ class TestModels(JitTestCase):
with enable_profiling_mode_for_profiling_tests():
# eval() is present because randn_like makes this nondeterministic
- self.checkTrace(VAE().to(device).eval(), (torch.rand(128, 1, 28, 28, device=device),),
- export_import=check_export_import)
+ self.checkTrace(
+ VAE().to(device).eval(),
+ (torch.rand(128, 1, 28, 28, device=device),),
+ export_import=check_export_import,
+ )
def test_vae(self):
- self._test_vae(self, device='cpu')
+ self._test_vae(self, device="cpu")
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_vae_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
- self._test_vae(self, device='cuda', check_export_import=False)
+ self._test_vae(self, device="cuda", check_export_import=False)
@slowTest
@skipIfNoTorchVision
def test_script_module_trace_resnet18(self):
x = torch.ones(1, 3, 224, 224)
- m_orig = torch.jit.trace(torchvision.models.resnet18(), torch.ones(1, 3, 224, 224))
+ m_orig = torch.jit.trace(
+ torchvision.models.resnet18(), torch.ones(1, 3, 224, 224)
+ )
m_import = self.getExportImportCopy(m_orig)
input = torch.randn(1, 3, 224, 224, requires_grad=True)
@@ -559,16 +615,24 @@ class TestModels(JitTestCase):
def test_script_module_script_resnet(self):
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
+ return nn.Conv2d(
+ in_planes, out_planes, kernel_size=1, stride=stride, bias=False
+ )
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
+ return nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False,
+ )
class BasicBlock(torch.jit.ScriptModule):
expansion = 1
- __constants__ = ['downsample']
+ __constants__ = ["downsample"]
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
@@ -600,13 +664,14 @@ class TestModels(JitTestCase):
return out
class ResNet(torch.jit.ScriptModule):
- __constants__ = ['layer1', 'layer2', 'layer3', 'layer4']
+ __constants__ = ["layer1", "layer2", "layer3", "layer4"]
def __init__(self, block, layers, num_classes=1000):
super().__init__()
self.inplanes = 64
- self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
- bias=False)
+ self.conv1 = nn.Conv2d(
+ 3, 64, kernel_size=7, stride=2, padding=3, bias=False
+ )
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
@@ -619,7 +684,9 @@ class TestModels(JitTestCase):
for m in self.modules():
if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ nn.init.kaiming_normal_(
+ m.weight, mode="fan_out", nonlinearity="relu"
+ )
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@@ -679,8 +746,10 @@ class TestModels(JitTestCase):
x = torch.ones(1, 3, 224, 224)
model = torchvision.models.AlexNet()
with torch.random.fork_rng(devices=[]):
- g, outputs, inputs = torch.jit._get_trace_graph(model, x, return_inputs=True)
- self.run_pass('cse', g)
+ g, outputs, inputs = torch.jit._get_trace_graph(
+ model, x, return_inputs=True
+ )
+ self.run_pass("cse", g)
m = self.createFunctionFromGraph(g)
with torch.random.fork_rng(devices=[]):
self.assertEqual(outputs, m(*inputs))
diff --git a/test/jit/test_module_apis.py b/test/jit/test_module_apis.py
index 06769764fb..960734f5f2 100644
--- a/test/jit/test_module_apis.py
+++ b/test/jit/test_module_apis.py
@@ -1,19 +1,23 @@
# Owner(s): ["oncall: jit"]
-import torch
import os
import sys
+from typing import Any, Dict, List
+
+import torch
from torch.testing._internal.jit_utils import JitTestCase
-from typing import Dict, Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestModuleAPIs(JitTestCase):
def test_default_state_dict_methods(self):
@@ -52,18 +56,23 @@ class TestModuleAPIs(JitTestCase):
return x
@torch.jit.export
- def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
- prefix: str, keep_vars: bool):
+ def _save_to_state_dict(
+ self, destination: Dict[str, torch.Tensor], prefix: str, keep_vars: bool
+ ):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
- def _load_from_state_dict(self,
- state_dict: Dict[str, torch.Tensor],
- prefix: str, local_metadata: Any,
- strict: bool, missing_keys: List[str],
- unexpected_keys: List[str],
- error_msgs: List[str]):
+ def _load_from_state_dict(
+ self,
+ state_dict: Dict[str, torch.Tensor],
+ prefix: str,
+ local_metadata: Any,
+ strict: bool,
+ missing_keys: List[str],
+ unexpected_keys: List[str],
+ error_msgs: List[str],
+ ):
self.customized_load_state_dict_called = True
return
@@ -94,18 +103,23 @@ class TestModuleAPIs(JitTestCase):
return x
@torch.jit.export
- def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
- prefix: str, keep_vars: bool):
+ def _save_to_state_dict(
+ self, destination: Dict[str, torch.Tensor], prefix: str, keep_vars: bool
+ ):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
- def _load_from_state_dict(self,
- state_dict: Dict[str, torch.Tensor],
- prefix: str, local_metadata: Any,
- strict: bool, missing_keys: List[str],
- unexpected_keys: List[str],
- error_msgs: List[str]):
+ def _load_from_state_dict(
+ self,
+ state_dict: Dict[str, torch.Tensor],
+ prefix: str,
+ local_metadata: Any,
+ strict: bool,
+ missing_keys: List[str],
+ unexpected_keys: List[str],
+ error_msgs: List[str],
+ ):
self.customized_load_state_dict_called = True
return
diff --git a/test/jit/test_module_containers.py b/test/jit/test_module_containers.py
index 62699e0958..058dbca313 100644
--- a/test/jit/test_module_containers.py
+++ b/test/jit/test_module_containers.py
@@ -2,9 +2,10 @@
import os
import sys
+from collections import OrderedDict
from typing import Any, List, Tuple
-from collections import OrderedDict
+
import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
@@ -13,10 +14,13 @@ from torch.testing._internal.jit_utils import JitTestCase
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestModuleContainers(JitTestCase):
def test_sequential_intermediary_types(self):
@@ -54,11 +58,13 @@ class TestModuleContainers(JitTestCase):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
- modules = OrderedDict([
- ('one', Inner()),
- ('two', Inner2()),
- ('three', Inner3()),
- ])
+ modules = OrderedDict(
+ [
+ ("one", Inner()),
+ ("two", Inner2()),
+ ("three", Inner3()),
+ ]
+ )
self.moduledict = nn.ModuleDict(modules)
def forward(self, x, skip_name):
@@ -115,7 +121,6 @@ class TestModuleContainers(JitTestCase):
return x, x2, names, iter
-
for name in ["", "one", "two", "three"]:
inp = torch.tensor(1)
self.checkModule(M(), (inp, name))
@@ -136,7 +141,7 @@ class TestModuleContainers(JitTestCase):
x = mod(x)
return x - 5
- self.checkModule(CustomSequential(), (torch.tensor(.5),))
+ self.checkModule(CustomSequential(), (torch.tensor(0.5),))
class CustomModuleList(nn.ModuleList):
def __init__(self):
@@ -148,16 +153,19 @@ class TestModuleContainers(JitTestCase):
x = mod(x)
return x - 5
- self.checkModule(CustomModuleList(), (torch.tensor(.5),))
+ self.checkModule(CustomModuleList(), (torch.tensor(0.5),))
class CustomModuleDict(nn.ModuleDict):
def __init__(self):
super().__init__(
- OrderedDict([
- ('one', Inner()),
- ('two', nn.ReLU()),
- ('three', Inner()),
- ]))
+ OrderedDict(
+ [
+ ("one", Inner()),
+ ("two", nn.ReLU()),
+ ("three", Inner()),
+ ]
+ )
+ )
def forward(self, x):
x = x + 3
@@ -167,7 +175,7 @@ class TestModuleContainers(JitTestCase):
names.append(name)
return names, x - 5
- self.checkModule(CustomModuleDict(), (torch.tensor(.5),))
+ self.checkModule(CustomModuleDict(), (torch.tensor(0.5),))
def test_script_module_list_sequential(self):
class M(torch.jit.ScriptModule):
@@ -225,7 +233,9 @@ class TestModuleContainers(JitTestCase):
def forward(self, v):
return self.mods[-11].forward(v)
- with self.assertRaisesRegexWithHighlight(Exception, "Index -11 out of range", "self.mods[-11]"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Index -11 out of range", "self.mods[-11]"
+ ):
torch.jit.script(M2())
class M3(M):
@@ -233,7 +243,9 @@ class TestModuleContainers(JitTestCase):
i = 3
return self.mods[i].forward(v)
- with self.assertRaisesRegexWithHighlight(Exception, "Enumeration is supported", "self.mods[i]"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Enumeration is supported", "self.mods[i]"
+ ):
torch.jit.script(M3())
class M4(M):
@@ -273,17 +285,23 @@ class TestModuleContainers(JitTestCase):
self.moduledict = CustomModuleDict({"submod": self.submod})
def forward(self, inputs):
- assert self.modulelist[0] is self.submod, "__getitem__ failing for ModuleList"
+ assert (
+ self.modulelist[0] is self.submod
+ ), "__getitem__ failing for ModuleList"
assert len(self.modulelist) == 1, "__len__ failing for ModuleList"
for module in self.modulelist:
assert module is self.submod, "__iter__ failing for ModuleList"
- assert self.sequential[0] is self.submod, "__getitem__ failing for Sequential"
+ assert (
+ self.sequential[0] is self.submod
+ ), "__getitem__ failing for Sequential"
assert len(self.sequential) == 1, "__len__ failing for Sequential"
for module in self.sequential:
assert module is self.submod, "__iter__ failing for Sequential"
- assert self.moduledict["submod"] is self.submod, "__getitem__ failing for ModuleDict"
+ assert (
+ self.moduledict["submod"] is self.submod
+ ), "__getitem__ failing for ModuleDict"
assert len(self.moduledict) == 1, "__len__ failing for ModuleDict"
# note: unable to index moduledict with a string variable currently
@@ -345,12 +363,13 @@ class TestModuleContainers(JitTestCase):
super().__init__()
self.relu = torch.jit.script(torch.nn.ReLU())
self.tanh = torch.jit.script(torch.nn.Tanh())
- self.moduledict = torch.nn.ModuleDict({"relu": self.relu,
- "tanh": self.tanh})
+ self.moduledict = torch.nn.ModuleDict(
+ {"relu": self.relu, "tanh": self.tanh}
+ )
def forward(self, input):
- assert self.moduledict['relu'] is self.relu
- assert self.moduledict['tanh'] is self.tanh
+ assert self.moduledict["relu"] is self.relu
+ assert self.moduledict["tanh"] is self.tanh
return input
m = MyModule()
@@ -360,31 +379,34 @@ class TestModuleContainers(JitTestCase):
class BadModule(torch.nn.Module):
def __init__(self):
super().__init__()
- self.moduledict = torch.nn.ModuleDict({"foo": None,
- "bar": None})
+ self.moduledict = torch.nn.ModuleDict({"foo": None, "bar": None})
def forward(self, input):
- assert self.moduledict['blah'] == "blah", "this is a keyerror"
+ assert self.moduledict["blah"] == "blah", "this is a keyerror"
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Key Error, blah", "self.moduledict['blah'"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Key Error, blah", 'self.moduledict["blah"'
+ ):
b = BadModule()
torch.jit.script(b)
class AnotherBadModule(torch.nn.Module):
def __init__(self):
super().__init__()
- self.moduledict = torch.nn.ModuleDict({"foo": None,
- "bar": None})
+ self.moduledict = torch.nn.ModuleDict({"foo": None, "bar": None})
def forward(self, input):
- idx = 'blah'
+ idx = "blah"
assert self.moduledict[idx] == "blah", "this is a string literal error"
- with self.assertRaisesRegexWithHighlight(RuntimeError, "Unable to extract string literal index. "
- "ModuleDict indexing is only supported with string literals. "
- "For example, 'i = \"a\"; self.layers\\[i\\]\\(x\\)' will fail "
- "because i is not a literal.",
- "self.moduledict[idx]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "Unable to extract string literal index. "
+ "ModuleDict indexing is only supported with string literals. "
+ "For example, 'i = \"a\"; self.layers\\[i\\]\\(x\\)' will fail "
+ "because i is not a literal.",
+ "self.moduledict[idx]",
+ ):
b = AnotherBadModule()
torch.jit.script(b)
@@ -393,6 +415,7 @@ class TestModuleContainers(JitTestCase):
Test that an attempt to script a module with a regular list attribute
containing other modules fails with a relevant error message.
"""
+
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -422,7 +445,9 @@ class TestModuleContainers(JitTestCase):
self.moduledict = CustomModuleDict()
def forward(self, inputs):
- assert "submod" not in self.moduledict, "__contains__ fails for ModuleDict"
+ assert (
+ "submod" not in self.moduledict
+ ), "__contains__ fails for ModuleDict"
return inputs
m = MyModule()
@@ -433,6 +458,7 @@ class TestModuleContainers(JitTestCase):
Test that a type annotation can be provided for a ModuleDict that allows
non-static indexing.
"""
+
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
@@ -485,7 +511,9 @@ class TestModuleContainers(JitTestCase):
submodule: ModuleInterface = self.d[key]
return submodule.forward(x)
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute module is not of annotated type", "self.d[key]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"Attribute module is not of annotated type", "self.d[key]"
+ ):
torch.jit.script(ModWithWrongAnnotation())
def test_typed_module_list(self):
@@ -493,6 +521,7 @@ class TestModuleContainers(JitTestCase):
Test that a type annotation can be provided for a ModuleList that allows
non-static indexing.
"""
+
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
@@ -545,7 +574,9 @@ class TestModuleContainers(JitTestCase):
submodule: ModuleInterface = self.l[idx]
return submodule.forward(x)
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute 0 is not of annotated type", "self.l[idx]"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"Attribute 0 is not of annotated type", "self.l[idx]"
+ ):
torch.jit.script(ModWithWrongAnnotation())
def test_module_properties(self):
@@ -596,10 +627,34 @@ class TestModuleContainers(JitTestCase):
def attr(self):
return self.a + 1
- self.checkModule(ModuleWithProperties(5), (5, 6,))
- self.checkModule(ModuleWithProperties(5), (-5, -6,))
- self.checkModule(ModuleWithNoSetter(5), (5, 6,))
- self.checkModule(ModuleWithNoSetter(5), (-5, -6,))
+ self.checkModule(
+ ModuleWithProperties(5),
+ (
+ 5,
+ 6,
+ ),
+ )
+ self.checkModule(
+ ModuleWithProperties(5),
+ (
+ -5,
+ -6,
+ ),
+ )
+ self.checkModule(
+ ModuleWithNoSetter(5),
+ (
+ 5,
+ 6,
+ ),
+ )
+ self.checkModule(
+ ModuleWithNoSetter(5),
+ (
+ -5,
+ -6,
+ ),
+ )
mod = ModuleWithProperties(3)
scripted_mod = torch.jit.script(mod)
@@ -625,7 +680,6 @@ class TestModuleContainers(JitTestCase):
def forward(self, x):
return self.linear(self.linear(x))
-
class N(nn.Module):
def __init__(self):
super().__init__()
@@ -659,7 +713,9 @@ class TestModuleContainers(JitTestCase):
def __init__(self):
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
- self.parameter_list = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(10)])
+ self.parameter_list = nn.ParameterList(
+ [nn.Parameter(torch.zeros(1)) for _ in range(10)]
+ )
def forward(self, x):
self.module_list[0]
@@ -673,7 +729,9 @@ class TestModuleContainers(JitTestCase):
def __init__(self):
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
- self.parameter_list = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(10)])
+ self.parameter_list = nn.ParameterList(
+ [nn.Parameter(torch.zeros(1)) for _ in range(10)]
+ )
def forward(self, x):
r = x
@@ -687,9 +745,14 @@ class TestModuleContainers(JitTestCase):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
- self.parameter_dict = nn.ParameterDict({k: nn.Parameter(torch.zeros(1)) for k in ['a', 'b', 'c']})
+ self.parameter_dict = nn.ParameterDict(
+ {k: nn.Parameter(torch.zeros(1)) for k in ["a", "b", "c"]}
+ )
def forward(self, x):
- return self.parameter_dict['a'] * x + self.parameter_dict['b'] * self.parameter_dict['c']
+ return (
+ self.parameter_dict["a"] * x
+ + self.parameter_dict["b"] * self.parameter_dict["c"]
+ )
self.checkModule(MyModule(), (torch.ones(1),))
diff --git a/test/jit/test_module_interface.py b/test/jit/test_module_interface.py
index f9e9aea235..4a2648b9a8 100644
--- a/test/jit/test_module_interface.py
+++ b/test/jit/test_module_interface.py
@@ -1,10 +1,11 @@
# Owner(s): ["oncall: jit"]
-from typing import List, Any
-import torch
-import torch.nn as nn
import os
import sys
+from typing import Any, List
+
+import torch
+import torch.nn as nn
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global
@@ -12,10 +13,13 @@ from torch.testing._internal.jit_utils import JitTestCase, make_global
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class OrigModule(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
@@ -27,6 +31,7 @@ class OrigModule(nn.Module):
def forward(self, input: Tensor) -> Tensor:
return input + self.one(input, input) + 1
+
class NewModule(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
@@ -34,6 +39,7 @@ class NewModule(nn.Module):
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
+
class TestModuleInterface(JitTestCase):
def test_not_submodule_interface_call(self):
@torch.jit.interface
@@ -42,7 +48,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestNotModuleInterfaceCall(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
@@ -51,7 +57,9 @@ class TestModuleInterface(JitTestCase):
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.two(input)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "self.proxy_mod.two"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "object has no attribute or method", "self.proxy_mod.two"
+ ):
torch.jit.script(TestNotModuleInterfaceCall())
def test_module_interface(self):
@@ -108,17 +116,37 @@ class TestModuleInterface(JitTestCase):
scripted_foo_mod = torch.jit.script(FooMod())
scripted_bar_mod = torch.jit.script(BarMod())
- self.checkScript(use_module_interface,
- ([scripted_foo_mod, scripted_bar_mod], torch.rand(3, 4),))
- self.checkScript(use_class_interface,
- ([scripted_foo_mod, scripted_bar_mod], torch.rand(3, 4),))
-
- def call_module_interface_on_other_method(mod_interface: OneTwoModule, x: Tensor) -> Tensor:
+ self.checkScript(
+ use_module_interface,
+ (
+ [scripted_foo_mod, scripted_bar_mod],
+ torch.rand(3, 4),
+ ),
+ )
+ self.checkScript(
+ use_class_interface,
+ (
+ [scripted_foo_mod, scripted_bar_mod],
+ torch.rand(3, 4),
+ ),
+ )
+
+ def call_module_interface_on_other_method(
+ mod_interface: OneTwoModule, x: Tensor
+ ) -> Tensor:
return mod_interface.forward2(x)
# ensure error out when we call the module on the method other than the interface specified.
- with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "mod_interface.forward2"):
- self.checkScript(call_module_interface_on_other_method, (scripted_bar_mod, torch.rand(3, 4),))
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "object has no attribute or method", "mod_interface.forward2"
+ ):
+ self.checkScript(
+ call_module_interface_on_other_method,
+ (
+ scripted_bar_mod,
+ torch.rand(3, 4),
+ ),
+ )
def test_module_doc_string(self):
@torch.jit.interface
@@ -135,7 +163,7 @@ class TestModuleInterface(JitTestCase):
r"""stuff 3"""
class TestModule(nn.Module):
- proxy_mod : TestInterface
+ proxy_mod: TestInterface
def __init__(self):
super().__init__()
@@ -178,7 +206,9 @@ class TestModuleInterface(JitTestCase):
return self.one(self.two(x), x)
# check class object is not a subtype of module interface
- with self.assertRaisesRegex(RuntimeError, "ScriptModule class can be subtype of module interface"):
+ with self.assertRaisesRegex(
+ RuntimeError, "ScriptModule class can be subtype of module interface"
+ ):
as_module_interface(Foo())
class WrongMod(nn.Module):
@@ -233,9 +263,11 @@ class TestModuleInterface(JitTestCase):
as_tensor_to_any(torch.jit.script(TensorToAnyImplB()))
as_any_to_any(torch.jit.script(AnyToAnyImpl()))
-
def test_module_interface_inheritance(self):
- with self.assertRaisesRegex(RuntimeError, "does not support inheritance yet. Please directly"):
+ with self.assertRaisesRegex(
+ RuntimeError, "does not support inheritance yet. Please directly"
+ ):
+
@torch.jit.interface
class InheritMod(nn.ReLU):
def three(self, x: Tensor) -> Tensor:
@@ -251,7 +283,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
@@ -269,7 +301,9 @@ class TestModuleInterface(JitTestCase):
self.assertEqual(scripted_mod(input), input * (input + 1) + 1)
# module swap with non-scripted module should throw error
- with self.assertRaisesRegex(RuntimeError, "a ScriptModule with non-scripted module"):
+ with self.assertRaisesRegex(
+ RuntimeError, "a ScriptModule with non-scripted module"
+ ):
scripted_mod.proxy_mod = NewModule()
def test_module_swap_wrong_module(self):
@@ -286,7 +320,7 @@ class TestModuleInterface(JitTestCase):
return input + 1
class TestModule(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
@@ -310,7 +344,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
@@ -358,9 +392,11 @@ class TestModuleInterface(JitTestCase):
# proxy mod is swapped with the new ScriptModule that share the same JIT type, should succeed.
scripted_no_module_interface.proxy_mod = torch.jit.script(OrigModule())
# proxy_mod is neither a module interface or have the same JIT type, should fail
- with self.assertRaisesRegex(RuntimeError,
- r"Expected a value of type '__torch__.jit.test_module_interface.OrigModule \(.*\)' " +
- r"for field 'proxy_mod', but found '__torch__.jit.test_module_interface.NewModule \(.*\)'"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected a value of type '__torch__.jit.test_module_interface.OrigModule \(.*\)' "
+ + r"for field 'proxy_mod', but found '__torch__.jit.test_module_interface.NewModule \(.*\)'",
+ ):
scripted_no_module_interface.proxy_mod = torch.jit.script(NewModule())
def test_script_module_as_interface_swap(self):
@@ -391,7 +427,7 @@ class TestModuleInterface(JitTestCase):
return self.one(input, input + 1)
class TestNNModuleWithScriptModule(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
@@ -432,7 +468,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(torch.nn.Module):
- proxy_mod : ModInterface
+ proxy_mod: ModInterface
def __init__(self):
super().__init__()
@@ -480,7 +516,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(torch.nn.Module):
- proxy_mod : ModInterface
+ proxy_mod: ModInterface
def __init__(self):
super().__init__()
@@ -523,7 +559,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(torch.nn.Module):
- proxy_mod : ModInterface
+ proxy_mod: ModInterface
def __init__(self):
super().__init__()
@@ -568,7 +604,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(torch.nn.Module):
- proxy_mod : ModInterface
+ proxy_mod: ModInterface
def __init__(self):
super().__init__()
@@ -583,7 +619,9 @@ class TestModuleInterface(JitTestCase):
m = torch.jit.script(TestModule())
m.eval()
- with self.assertRaisesRegex(RuntimeError, "Freezing does not support SetAttr on an interface type."):
+ with self.assertRaisesRegex(
+ RuntimeError, "Freezing does not support SetAttr on an interface type."
+ ):
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_interface_and_fork(self):
@@ -610,7 +648,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(torch.nn.Module):
- proxy_mod : ModInterface
+ proxy_mod: ModInterface
def __init__(self):
super().__init__()
@@ -644,7 +682,7 @@ class TestModuleInterface(JitTestCase):
pass
class TestModule(nn.Module):
- proxy_mod : ModuleInterface
+ proxy_mod: ModuleInterface
def __init__(self):
super().__init__()
diff --git a/test/jit/test_modules.py b/test/jit/test_modules.py
index 07c0bb5052..b620e11704 100644
--- a/test/jit/test_modules.py
+++ b/test/jit/test_modules.py
@@ -1,18 +1,22 @@
# Owner(s): ["oncall: jit"]
-import torch
import os
import sys
+
+import torch
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
diff --git a/test/jit/test_op_decompositions.py b/test/jit/test_op_decompositions.py
index 6b4569cd6e..1de6258632 100644
--- a/test/jit/test_op_decompositions.py
+++ b/test/jit/test_op_decompositions.py
@@ -4,10 +4,13 @@ import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestOpDecompositions(JitTestCase):
def test_op_decomposition(self):
@@ -31,7 +34,9 @@ class TestOpDecompositions(JitTestCase):
def square_decomp(x):
return torch.pow(x, 2)
- torch.jit._register_decomposition(torch.ops.aten.square.default, square_decomp.graph)
+ torch.jit._register_decomposition(
+ torch.ops.aten.square.default, square_decomp.graph
+ )
torch._C._jit_pass_run_decompositions(foo.graph)
FileCheck().check_not("aten::square").check("aten::pow").run(foo.graph)
x = torch.rand([4])
diff --git a/test/jit/test_optimize_for_mobile_preserve_debug_info.py b/test/jit/test_optimize_for_mobile_preserve_debug_info.py
index 78d3fae593..9ccc796c92 100644
--- a/test/jit/test_optimize_for_mobile_preserve_debug_info.py
+++ b/test/jit/test_optimize_for_mobile_preserve_debug_info.py
@@ -3,8 +3,9 @@
import torch
import torch._C
import torch.nn.functional as F
-from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfNoXNNPACK
+from torch.testing._internal.jit_utils import JitTestCase
+
class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
def check_replacement(
@@ -133,10 +134,8 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
"prepacked::linear_clamp_run": "aten::linear",
"prepacked::conv2d_clamp_prepack": "aten::conv2d",
"prepacked::conv2d_clamp_run": "aten::conv2d",
- "prepacked::conv2d_transpose_clamp_prepack":
- "aten::conv_transpose2d",
- "prepacked::conv2d_transpose_clamp_run":
- "aten::conv_transpose2d",
+ "prepacked::conv2d_transpose_clamp_prepack": "aten::conv_transpose2d",
+ "prepacked::conv2d_transpose_clamp_run": "aten::conv_transpose2d",
},
jit_pass=torch._C._jit_pass_insert_prepacked_ops,
)
@@ -147,7 +146,7 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
model=torch.jit.trace(torch.nn.Linear(5, 4), torch.rand(3, 2, 5)),
replacements={
"prepacked::linear_clamp_prepack": "aten::linear",
- "prepacked::linear_clamp_run": "aten::linear"
+ "prepacked::linear_clamp_run": "aten::linear",
},
jit_pass=torch._C._jit_pass_insert_prepacked_ops,
)
@@ -223,11 +222,9 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
self.check_replacement(
model=model,
replacements={
- "prepacked::linear_clamp_prepack":
- "prepacked::linear_clamp_prepack",
+ "prepacked::linear_clamp_prepack": "prepacked::linear_clamp_prepack",
"prepacked::linear_clamp_run": linear_activation_kind,
- "prepacked::conv2d_clamp_prepack":
- "prepacked::conv2d_clamp_prepack",
+ "prepacked::conv2d_clamp_prepack": "prepacked::conv2d_clamp_prepack",
"prepacked::conv2d_clamp_run": conv2d_activation_kind,
},
jit_pass=torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv,
@@ -239,7 +236,7 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
linear_activation=F.hardtanh,
linear_activation_kind="aten::hardtanh",
conv2d_activation=F.hardtanh_,
- conv2d_activation_kind="aten::hardtanh_"
+ conv2d_activation_kind="aten::hardtanh_",
)
@skipIfNoXNNPACK
@@ -248,7 +245,7 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
linear_activation=F.hardtanh_,
linear_activation_kind="aten::hardtanh_",
conv2d_activation=F.hardtanh,
- conv2d_activation_kind="aten::hardtanh"
+ conv2d_activation_kind="aten::hardtanh",
)
@skipIfNoXNNPACK
@@ -257,7 +254,7 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
linear_activation=F.relu,
linear_activation_kind="aten::relu",
conv2d_activation=F.relu_,
- conv2d_activation_kind="aten::relu_"
+ conv2d_activation_kind="aten::relu_",
)
@skipIfNoXNNPACK
@@ -266,5 +263,5 @@ class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
linear_activation=F.relu_,
linear_activation_kind="aten::relu_",
conv2d_activation=F.relu,
- conv2d_activation_kind="aten::relu"
+ conv2d_activation_kind="aten::relu",
)
diff --git a/test/jit/test_parametrization.py b/test/jit/test_parametrization.py
index 8882a431f2..41aff00a30 100644
--- a/test/jit/test_parametrization.py
+++ b/test/jit/test_parametrization.py
@@ -2,16 +2,19 @@
import torch
-from torch import nn
import torch.nn.utils.parametrize as parametrize
+from torch import nn
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestParametrization(JitTestCase):
# Define some parametrization
@@ -29,7 +32,7 @@ class TestParametrization(JitTestCase):
# Check the tracing works. Because traced functions cannot be called
# directly, we run the comparison on the activations.
- traced_model = torch.jit.trace_module(model, {'forward': x})
+ traced_model = torch.jit.trace_module(model, {"forward": x})
y_hat = traced_model(x)
self.assertEqual(y, y_hat)
@@ -39,10 +42,9 @@ class TestParametrization(JitTestCase):
self.assertEqual(y, y_hat)
# Check the tracing throws an error when caching
- with self.assertRaisesRegex(RuntimeError,
- 'Cannot trace a model while caching'):
+ with self.assertRaisesRegex(RuntimeError, "Cannot trace a model while caching"):
with parametrize.cached():
- traced_model = torch.jit.trace_module(model, {'forward': x})
+ traced_model = torch.jit.trace_module(model, {"forward": x})
def test_scriptable(self):
# TODO: Need to fix the scripting in parametrizations
@@ -65,5 +67,5 @@ class TestParametrization(JitTestCase):
self.assertEqual(y, y_hat)
# Check the scripting process throws an error when caching
- with self.assertRaisesRegex(RuntimeError, 'Caching is not implemented'):
+ with self.assertRaisesRegex(RuntimeError, "Caching is not implemented"):
scripted_model = torch.jit.trace_module(model)
diff --git a/test/jit/test_pdt.py b/test/jit/test_pdt.py
index 5fa39b8cac..43225ebb5e 100644
--- a/test/jit/test_pdt.py
+++ b/test/jit/test_pdt.py
@@ -2,18 +2,22 @@
import os
import sys
+from typing import Any, Dict, List, NamedTuple, Optional, Tuple # noqa: F401
+
import torch
-from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
-from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
from torch.testing._internal.common_utils import NoTest
+from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if not _IS_MONKEYTYPE_INSTALLED:
- print("monkeytype is not installed. Skipping tests for Profile-Directed Typing", file=sys.stderr)
+ print(
+ "monkeytype is not installed. Skipping tests for Profile-Directed Typing",
+ file=sys.stderr,
+ )
JitTestCase = NoTest # type: ignore[misc, assignment] # noqa: F811
if __name__ == "__main__":
@@ -23,10 +27,12 @@ if __name__ == "__main__":
"instead."
)
+
class TestPDT(JitTestCase):
"""
A suite of tests for profile directed typing in TorchScript.
"""
+
def test_nn_module(self):
class TestPDTModel(torch.nn.Module):
def forward(self, x) -> Any:
@@ -39,8 +45,14 @@ class TestPDT(JitTestCase):
make_global(TestPDTModel)
pdt_model = TestPDTModel()
- inp: List[Tuple[Any, ...]] = [(20, ), (2.7, ), (False, ), ]
- scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model: inp})
+ inp: List[Tuple[Any, ...]] = [
+ (20,),
+ (2.7,),
+ (False,),
+ ]
+ scripted_pdt_model = torch.jit.script(
+ pdt_model, example_inputs={pdt_model: inp}
+ )
self.assertEqual(scripted_pdt_model(50), pdt_model(50))
self.assertEqual(scripted_pdt_model(1.8), pdt_model(1.8))
self.assertTrue(scripted_pdt_model(True), pdt_model(True))
@@ -63,8 +75,10 @@ class TestPDT(JitTestCase):
make_global(NestedPDTInner, NestedModulePDTWrapper)
inner_pdt_model = NestedPDTInner()
wrapped_pdt_model = NestedModulePDTWrapper(inner_pdt_model)
- inp: List[Tuple[Any, ...]] = [(20, ), (False, )]
- scripted_pdt_model = torch.jit.script(wrapped_pdt_model, example_inputs={wrapped_pdt_model: inp})
+ inp: List[Tuple[Any, ...]] = [(20,), (False,)]
+ scripted_pdt_model = torch.jit.script(
+ wrapped_pdt_model, example_inputs={wrapped_pdt_model: inp}
+ )
self.assertEqual(scripted_pdt_model(30), wrapped_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), wrapped_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), wrapped_pdt_model(True))
@@ -87,10 +101,18 @@ class TestPDT(JitTestCase):
make_global(NestedModulePDTInner, NestedModulePDTOuter)
inner_pdt_model = NestedModulePDTInner()
outer_pdt_model = NestedModulePDTOuter(inner_pdt_model)
- inner_input: List[Tuple[Any, ...]] = [(10, 10), (1.9, 20), ]
- outer_input: List[Tuple[Any, ...]] = [(20, ), (False, )]
- scripted_pdt_model = torch.jit.script(outer_pdt_model, example_inputs={inner_pdt_model: inner_input,
- outer_pdt_model: outer_input, })
+ inner_input: List[Tuple[Any, ...]] = [
+ (10, 10),
+ (1.9, 20),
+ ]
+ outer_input: List[Tuple[Any, ...]] = [(20,), (False,)]
+ scripted_pdt_model = torch.jit.script(
+ outer_pdt_model,
+ example_inputs={
+ inner_pdt_model: inner_input,
+ outer_pdt_model: outer_input,
+ },
+ )
self.assertEqual(scripted_pdt_model(30), outer_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), outer_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), outer_pdt_model(True))
@@ -109,8 +131,10 @@ class TestPDT(JitTestCase):
make_global(NestedFunctionInForward)
pdt_model = NestedFunctionInForward()
- inp: List[Tuple[Any, ...]] = [(-1, ), (False, )]
- scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model: inp})
+ inp: List[Tuple[Any, ...]] = [(-1,), (False,)]
+ scripted_pdt_model = torch.jit.script(
+ pdt_model, example_inputs={pdt_model: inp}
+ )
self.assertEqual(scripted_pdt_model(30), pdt_model(30))
self.assertEqual(scripted_pdt_model(True), pdt_model(True))
@@ -126,14 +150,26 @@ class TestPDT(JitTestCase):
else:
return -1
-
make_global(TestModelWithExport)
pdt_model = TestModelWithExport()
- inp: List[Tuple[Any, ...]] = [(20, 10, ), (2.7, 8.9, ), ]
- scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model.fn: inp})
+ inp: List[Tuple[Any, ...]] = [
+ (
+ 20,
+ 10,
+ ),
+ (
+ 2.7,
+ 8.9,
+ ),
+ ]
+ scripted_pdt_model = torch.jit.script(
+ pdt_model, example_inputs={pdt_model.fn: inp}
+ )
self.assertEqual(scripted_pdt_model.fn(10, 90), pdt_model.fn(10, 90))
self.assertEqual(scripted_pdt_model.fn(1.8, 2.2), pdt_model.fn(1.8, 2.2))
- self.assertTrue(scripted_pdt_model.fn(torch.ones(1), 2), pdt_model.fn(torch.ones(1), 2))
+ self.assertTrue(
+ scripted_pdt_model.fn(torch.ones(1), 2), pdt_model.fn(torch.ones(1), 2)
+ )
def test_class_methods(self):
class PDTModel:
@@ -142,10 +178,34 @@ class TestPDT(JitTestCase):
make_global(PDTModel)
pdt_model = PDTModel()
- inp: List[Tuple[Any, ...]] = [([10, 20, ], ), ]
- scripted_pdt_model = torch.jit.script(PDTModel, example_inputs={pdt_model.test_sum: inp})
+ inp: List[Tuple[Any, ...]] = [
+ (
+ [
+ 10,
+ 20,
+ ],
+ ),
+ ]
+ scripted_pdt_model = torch.jit.script(
+ PDTModel, example_inputs={pdt_model.test_sum: inp}
+ )
script_model = scripted_pdt_model()
- self.assertEqual(script_model.test_sum([10, 20, 30, ], ), pdt_model.test_sum([10, 20, 30, ], ))
+ self.assertEqual(
+ script_model.test_sum(
+ [
+ 10,
+ 20,
+ 30,
+ ],
+ ),
+ pdt_model.test_sum(
+ [
+ 10,
+ 20,
+ 30,
+ ],
+ ),
+ )
def test_class_with_multiple_methods(self):
class PDTModelWithManyMethods:
@@ -160,14 +220,64 @@ class TestPDT(JitTestCase):
make_global(PDTModelWithManyMethods)
pdt_model = PDTModelWithManyMethods()
- list_inp: List[Tuple[Any, ...]] = [([1.2, 2.3, ], ), ]
- str_inp: List[Tuple[Any, ...]] = [("abc", "b", ), ]
- scripted_pdt_model = torch.jit.script(PDTModelWithManyMethods, example_inputs={pdt_model.test_list_to_dict: list_inp,
- pdt_model.test_substring: str_inp})
+ list_inp: List[Tuple[Any, ...]] = [
+ (
+ [
+ 1.2,
+ 2.3,
+ ],
+ ),
+ ]
+ str_inp: List[Tuple[Any, ...]] = [
+ (
+ "abc",
+ "b",
+ ),
+ ]
+ scripted_pdt_model = torch.jit.script(
+ PDTModelWithManyMethods,
+ example_inputs={
+ pdt_model.test_list_to_dict: list_inp,
+ pdt_model.test_substring: str_inp,
+ },
+ )
script_model = scripted_pdt_model()
- self.assertEqual(script_model.test_list_to_dict([1.1, 2.2, 3.3, ], ), pdt_model.test_list_to_dict([1.1, 2.2, 3.3, ], ))
- self.assertEqual(script_model.test_substring("helloworld", "world", ), pdt_model.test_substring("helloworld", "world", ))
- self.assertEqual(script_model.test_substring("helloworld", "def", ), pdt_model.test_substring("helloworld", "def", ))
+ self.assertEqual(
+ script_model.test_list_to_dict(
+ [
+ 1.1,
+ 2.2,
+ 3.3,
+ ],
+ ),
+ pdt_model.test_list_to_dict(
+ [
+ 1.1,
+ 2.2,
+ 3.3,
+ ],
+ ),
+ )
+ self.assertEqual(
+ script_model.test_substring(
+ "helloworld",
+ "world",
+ ),
+ pdt_model.test_substring(
+ "helloworld",
+ "world",
+ ),
+ )
+ self.assertEqual(
+ script_model.test_substring(
+ "helloworld",
+ "def",
+ ),
+ pdt_model.test_substring(
+ "helloworld",
+ "def",
+ ),
+ )
def test_multiple_class_with_same_method(self):
class PDTModelOne:
@@ -181,16 +291,69 @@ class TestPDT(JitTestCase):
make_global(PDTModelOne, PDTModelTwo)
pdt_model_one = PDTModelOne()
pdt_model_two = PDTModelTwo()
- dict_inp: List[Tuple[Any, ...]] = [({1.2: True, 2.3: False, }, 1.2), ]
- list_inp: List[Tuple[Any, ...]] = [(["abc", "b", ], "c"), ]
- scripted_pdt_model_one = torch.jit.script(PDTModelOne, example_inputs={pdt_model_one.test_find: dict_inp})
- scripted_pdt_model_two = torch.jit.script(PDTModelTwo, example_inputs={pdt_model_two.test_find: list_inp})
-
- script_model_one, script_model_two = scripted_pdt_model_one(), scripted_pdt_model_two()
- self.assertEqual(script_model_one.test_find({1.1: True, 2.2: True, 3.3: False, }, 4.4),
- pdt_model_one.test_find({1.1: True, 2.2: True, 3.3: False, }, 4.4))
- self.assertEqual(script_model_two.test_find(["hello", "world", ], "world"),
- pdt_model_two.test_find(["hello", "world", ], "world"))
+ dict_inp: List[Tuple[Any, ...]] = [
+ (
+ {
+ 1.2: True,
+ 2.3: False,
+ },
+ 1.2,
+ ),
+ ]
+ list_inp: List[Tuple[Any, ...]] = [
+ (
+ [
+ "abc",
+ "b",
+ ],
+ "c",
+ ),
+ ]
+ scripted_pdt_model_one = torch.jit.script(
+ PDTModelOne, example_inputs={pdt_model_one.test_find: dict_inp}
+ )
+ scripted_pdt_model_two = torch.jit.script(
+ PDTModelTwo, example_inputs={pdt_model_two.test_find: list_inp}
+ )
+
+ script_model_one, script_model_two = (
+ scripted_pdt_model_one(),
+ scripted_pdt_model_two(),
+ )
+ self.assertEqual(
+ script_model_one.test_find(
+ {
+ 1.1: True,
+ 2.2: True,
+ 3.3: False,
+ },
+ 4.4,
+ ),
+ pdt_model_one.test_find(
+ {
+ 1.1: True,
+ 2.2: True,
+ 3.3: False,
+ },
+ 4.4,
+ ),
+ )
+ self.assertEqual(
+ script_model_two.test_find(
+ [
+ "hello",
+ "world",
+ ],
+ "world",
+ ),
+ pdt_model_two.test_find(
+ [
+ "hello",
+ "world",
+ ],
+ "world",
+ ),
+ )
def test_pdt(self):
def test_sum(a, b):
@@ -218,7 +381,9 @@ class TestPDT(JitTestCase):
return torch.complex(real, img)
make_global(test_args_complex)
- scripted_fn_complex = torch.jit.script(test_args_complex, example_inputs=[(torch.rand(3, 4), torch.rand(3, 4))])
+ scripted_fn_complex = torch.jit.script(
+ test_args_complex, example_inputs=[(torch.rand(3, 4), torch.rand(3, 4))]
+ )
arg1, arg2 = torch.rand(3, 4), torch.rand(3, 4)
self.assertEqual(scripted_fn_complex(arg1, arg2), test_args_complex(arg1, arg2))
@@ -248,25 +413,49 @@ class TestPDT(JitTestCase):
make_global(test_list_and_tuple)
- scripted_fn_float_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([4.9, 8.9],)])
- self.assertEqual(scripted_fn_float_list_input([11.9, 7.6]), test_list_and_tuple([11.9, 7.6]))
-
- scripted_fn_bool_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([True, False, True],)])
- self.assertEqual(scripted_fn_bool_list_input([True, True, True]), test_list_and_tuple([True, True, True]))
-
- scripted_fn_int_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([3, 4, 5], )])
- self.assertEqual(scripted_fn_int_list_input([1, 2, 3]), test_list_and_tuple([1, 2, 3]))
-
- scripted_fn_float_tuple_input = torch.jit.script(test_list_and_tuple, example_inputs=[((4.9, 8.9),)])
- self.assertEqual(scripted_fn_float_tuple_input((11.9, 7.6)), test_list_and_tuple((11.9, 7.6)))
-
- scripted_fn_bool_tuple_input = torch.jit.script(test_list_and_tuple,
- example_inputs=[((True, False, True),)])
- self.assertEqual(scripted_fn_bool_tuple_input((True, True, True)),
- test_list_and_tuple((True, True, True)))
-
- scripted_fn_int_tuple_input = torch.jit.script(test_list_and_tuple, example_inputs=[((3, 4, 5), )])
- self.assertEqual(scripted_fn_int_tuple_input((1, 2, 3)), test_list_and_tuple((1, 2, 3)))
+ scripted_fn_float_list_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[([4.9, 8.9],)]
+ )
+ self.assertEqual(
+ scripted_fn_float_list_input([11.9, 7.6]), test_list_and_tuple([11.9, 7.6])
+ )
+
+ scripted_fn_bool_list_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[([True, False, True],)]
+ )
+ self.assertEqual(
+ scripted_fn_bool_list_input([True, True, True]),
+ test_list_and_tuple([True, True, True]),
+ )
+
+ scripted_fn_int_list_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[([3, 4, 5],)]
+ )
+ self.assertEqual(
+ scripted_fn_int_list_input([1, 2, 3]), test_list_and_tuple([1, 2, 3])
+ )
+
+ scripted_fn_float_tuple_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[((4.9, 8.9),)]
+ )
+ self.assertEqual(
+ scripted_fn_float_tuple_input((11.9, 7.6)), test_list_and_tuple((11.9, 7.6))
+ )
+
+ scripted_fn_bool_tuple_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[((True, False, True),)]
+ )
+ self.assertEqual(
+ scripted_fn_bool_tuple_input((True, True, True)),
+ test_list_and_tuple((True, True, True)),
+ )
+
+ scripted_fn_int_tuple_input = torch.jit.script(
+ test_list_and_tuple, example_inputs=[((3, 4, 5),)]
+ )
+ self.assertEqual(
+ scripted_fn_int_tuple_input((1, 2, 3)), test_list_and_tuple((1, 2, 3))
+ )
def test_nested_list_and_tuple(self):
def test_nested_list(inp):
@@ -282,43 +471,207 @@ class TestPDT(JitTestCase):
make_global(test_nested_list, test_nested_tuple)
- list_inp = [[1, 2, 3, ], [5, 6, 7, ]]
- scripted_fn = torch.jit.script(test_nested_list, example_inputs=[(list_inp, ), ])
- inp = [[0, 4, 7, ], [8, 11, ], [6, -1, -20, ]]
- self.assertEqual(scripted_fn(inp, ), test_nested_list(inp, ))
-
- list_inp = ([1, 2, 3, ], [5, 6, 7, ])
- scripted_fn = torch.jit.script(test_nested_list, example_inputs=[(list_inp, ), ])
- inp = ([0, 4, 7, ], [8, 11, ], [6, -1, -20, ])
- self.assertEqual(scripted_fn(inp, ), test_nested_list(inp, ))
-
- tup_inp = [(1.0, 2.6, 3.7, ), (5.7, 6.1, 1.7, )]
- scripted_fn = torch.jit.script(test_nested_tuple, example_inputs=[(tup_inp, ), ])
- inp = [(1.0, 4.1, 7.4, ), (4.8, 1.1, -1.2, ), (6.3, -1.3, -2.0, )]
- self.assertEqual(scripted_fn(inp, ), test_nested_tuple(inp, ))
-
- tup_inp = ((True, False, True, ), (False, False, False, ))
- scripted_fn = torch.jit.script(test_nested_tuple, example_inputs=[(tup_inp, ), ])
- inp = ((True, True, True, ), (False, False, True, ))
- self.assertEqual(scripted_fn(inp, ), test_nested_tuple(inp, ))
+ list_inp = [
+ [
+ 1,
+ 2,
+ 3,
+ ],
+ [
+ 5,
+ 6,
+ 7,
+ ],
+ ]
+ scripted_fn = torch.jit.script(
+ test_nested_list,
+ example_inputs=[
+ (list_inp,),
+ ],
+ )
+ inp = [
+ [
+ 0,
+ 4,
+ 7,
+ ],
+ [
+ 8,
+ 11,
+ ],
+ [
+ 6,
+ -1,
+ -20,
+ ],
+ ]
+ self.assertEqual(
+ scripted_fn(
+ inp,
+ ),
+ test_nested_list(
+ inp,
+ ),
+ )
+
+ list_inp = (
+ [
+ 1,
+ 2,
+ 3,
+ ],
+ [
+ 5,
+ 6,
+ 7,
+ ],
+ )
+ scripted_fn = torch.jit.script(
+ test_nested_list,
+ example_inputs=[
+ (list_inp,),
+ ],
+ )
+ inp = (
+ [
+ 0,
+ 4,
+ 7,
+ ],
+ [
+ 8,
+ 11,
+ ],
+ [
+ 6,
+ -1,
+ -20,
+ ],
+ )
+ self.assertEqual(
+ scripted_fn(
+ inp,
+ ),
+ test_nested_list(
+ inp,
+ ),
+ )
+
+ tup_inp = [
+ (
+ 1.0,
+ 2.6,
+ 3.7,
+ ),
+ (
+ 5.7,
+ 6.1,
+ 1.7,
+ ),
+ ]
+ scripted_fn = torch.jit.script(
+ test_nested_tuple,
+ example_inputs=[
+ (tup_inp,),
+ ],
+ )
+ inp = [
+ (
+ 1.0,
+ 4.1,
+ 7.4,
+ ),
+ (
+ 4.8,
+ 1.1,
+ -1.2,
+ ),
+ (
+ 6.3,
+ -1.3,
+ -2.0,
+ ),
+ ]
+ self.assertEqual(
+ scripted_fn(
+ inp,
+ ),
+ test_nested_tuple(
+ inp,
+ ),
+ )
+
+ tup_inp = (
+ (
+ True,
+ False,
+ True,
+ ),
+ (
+ False,
+ False,
+ False,
+ ),
+ )
+ scripted_fn = torch.jit.script(
+ test_nested_tuple,
+ example_inputs=[
+ (tup_inp,),
+ ],
+ )
+ inp = (
+ (
+ True,
+ True,
+ True,
+ ),
+ (
+ False,
+ False,
+ True,
+ ),
+ )
+ self.assertEqual(
+ scripted_fn(
+ inp,
+ ),
+ test_nested_tuple(
+ inp,
+ ),
+ )
def test_pdt_dict(self):
def test_dict(a):
- return a['foo']
+ return a["foo"]
def test_dict_int_list(a):
return a[1]
make_global(test_dict, test_dict_int_list)
- str_bool_inp = {'foo' : True, 'bar': False}
+ str_bool_inp = {"foo": True, "bar": False}
scripted_fn = torch.jit.script(test_dict, example_inputs=[(str_bool_inp,)])
- self.assertEqual(scripted_fn({'foo' : False, 'bar': True}, ), test_dict({'foo' : False, 'bar': True}, ))
-
- str_list_inp = {0 : [True, False], 1: [False, True]}
- scripted_fn = torch.jit.script(test_dict_int_list, example_inputs=[(str_list_inp,)])
- self.assertEqual(scripted_fn({0 : [False, False], 1: [True, True]}, ),
- test_dict_int_list({0 : [False, False], 1: [True, True]}, ))
+ self.assertEqual(
+ scripted_fn(
+ {"foo": False, "bar": True},
+ ),
+ test_dict(
+ {"foo": False, "bar": True},
+ ),
+ )
+
+ str_list_inp = {0: [True, False], 1: [False, True]}
+ scripted_fn = torch.jit.script(
+ test_dict_int_list, example_inputs=[(str_list_inp,)]
+ )
+ self.assertEqual(
+ scripted_fn(
+ {0: [False, False], 1: [True, True]},
+ ),
+ test_dict_int_list(
+ {0: [False, False], 1: [True, True]},
+ ),
+ )
def test_any(self):
def test_multiple_types(a):
@@ -337,20 +690,36 @@ class TestPDT(JitTestCase):
make_global(test_multiple_types, test_multiple_type_refinement)
- scripted_fn = torch.jit.script(test_multiple_types, example_inputs=[(1,), ("abc", ), (8.9,), ([3, 4, 5], )])
+ scripted_fn = torch.jit.script(
+ test_multiple_types, example_inputs=[(1,), ("abc",), (8.9,), ([3, 4, 5],)]
+ )
self.assertEqual(scripted_fn(10), test_multiple_types(10))
self.assertEqual(scripted_fn("def"), test_multiple_types("def"))
self.assertEqual(scripted_fn(7.89999), test_multiple_types(7.89999))
self.assertEqual(scripted_fn([10, 11, 14]), test_multiple_types([10, 11, 14]))
- scripted_fn = torch.jit.script(test_multiple_type_refinement, example_inputs=[(1,), ("abc", ), (8.9,),
- ([3, 4, 5],), (True, ), ({"a": True}, ), ])
+ scripted_fn = torch.jit.script(
+ test_multiple_type_refinement,
+ example_inputs=[
+ (1,),
+ ("abc",),
+ (8.9,),
+ ([3, 4, 5],),
+ (True,),
+ ({"a": True},),
+ ],
+ )
self.assertEqual(scripted_fn(10), test_multiple_type_refinement(10))
self.assertEqual(scripted_fn("def"), test_multiple_type_refinement("def"))
self.assertEqual(scripted_fn(7.89999), test_multiple_type_refinement(7.89999))
- self.assertEqual(scripted_fn([10, 11, 14]), test_multiple_type_refinement([10, 11, 14]))
+ self.assertEqual(
+ scripted_fn([10, 11, 14]), test_multiple_type_refinement([10, 11, 14])
+ )
self.assertEqual(scripted_fn(False), test_multiple_type_refinement(False))
- self.assertEqual(scripted_fn({"abc" : True, "def": False}), test_multiple_type_refinement({"abc" : True, "def": False}))
+ self.assertEqual(
+ scripted_fn({"abc": True, "def": False}),
+ test_multiple_type_refinement({"abc": True, "def": False}),
+ )
def test_class_as_profiled_types(self):
class UserDefinedClass:
@@ -369,9 +738,33 @@ class TestPDT(JitTestCase):
make_global(UserDefinedClass, test_model)
user_class = UserDefinedClass()
- scripted_fn = torch.jit.script(test_model, example_inputs=[(10, user_class, ), (10.9, user_class, ), ])
- self.assertEqual(scripted_fn(100, user_class, ), test_model(100, user_class))
- self.assertEqual(scripted_fn(1.9, user_class, ), test_model(1.9, user_class))
+ scripted_fn = torch.jit.script(
+ test_model,
+ example_inputs=[
+ (
+ 10,
+ user_class,
+ ),
+ (
+ 10.9,
+ user_class,
+ ),
+ ],
+ )
+ self.assertEqual(
+ scripted_fn(
+ 100,
+ user_class,
+ ),
+ test_model(100, user_class),
+ )
+ self.assertEqual(
+ scripted_fn(
+ 1.9,
+ user_class,
+ ),
+ test_model(1.9, user_class),
+ )
def test_class_with_args_as_profiled_types(self):
class ClassWithArgs:
@@ -391,8 +784,26 @@ class TestPDT(JitTestCase):
make_global(ClassWithArgs, test_model_with_args)
user_class = ClassWithArgs(False)
- scripted_fn = torch.jit.script(test_model_with_args, example_inputs=[(10, user_class, ), (10.9, user_class, ), ])
- self.assertEqual(scripted_fn(100, ClassWithArgs(True), ), test_model_with_args(100, ClassWithArgs(True)))
+ scripted_fn = torch.jit.script(
+ test_model_with_args,
+ example_inputs=[
+ (
+ 10,
+ user_class,
+ ),
+ (
+ 10.9,
+ user_class,
+ ),
+ ],
+ )
+ self.assertEqual(
+ scripted_fn(
+ 100,
+ ClassWithArgs(True),
+ ),
+ test_model_with_args(100, ClassWithArgs(True)),
+ )
def test_nn_parameter_as_arg(self):
class TestNNParameter(torch.nn.Module):
@@ -408,7 +819,14 @@ class TestPDT(JitTestCase):
make_global(TestNNParameter)
pdt_model = TestNNParameter()
- scripted_fn = torch.jit.script(pdt_model, example_inputs={pdt_model: [(10, ), ], })
+ scripted_fn = torch.jit.script(
+ pdt_model,
+ example_inputs={
+ pdt_model: [
+ (10,),
+ ],
+ },
+ )
self.assertEqual(scripted_fn(20), pdt_model(20))
def test_fx_tracing_with_typing(self):
@@ -422,7 +840,19 @@ class TestPDT(JitTestCase):
make_global(FXModel, FXModelOutput)
pdt_model = FXModel()
- scripted_fn = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], })
+ scripted_fn = torch.jit.script(
+ pdt_model,
+ example_inputs={
+ pdt_model: [
+ (
+ [
+ 10,
+ 20,
+ ],
+ ),
+ ],
+ },
+ )
self.assertEqual(scripted_fn([20]), pdt_model([20]))
def test_nonetype_as_optional_of_type(self):
@@ -434,11 +864,34 @@ class TestPDT(JitTestCase):
make_global(test_none)
- scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (10.6, )])
- self.assertEqual(scripted_fn(30.9, ), test_none(30.9, ))
-
- scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (10, )])
- self.assertEqual(scripted_fn(2, ), test_none(2, ))
-
- scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (torch.Tensor(1), )])
- self.assertEqual(scripted_fn(torch.ones(1), ), test_none(torch.ones(1), ))
+ scripted_fn = torch.jit.script(test_none, example_inputs=[(None,), (10.6,)])
+ self.assertEqual(
+ scripted_fn(
+ 30.9,
+ ),
+ test_none(
+ 30.9,
+ ),
+ )
+
+ scripted_fn = torch.jit.script(test_none, example_inputs=[(None,), (10,)])
+ self.assertEqual(
+ scripted_fn(
+ 2,
+ ),
+ test_none(
+ 2,
+ ),
+ )
+
+ scripted_fn = torch.jit.script(
+ test_none, example_inputs=[(None,), (torch.Tensor(1),)]
+ )
+ self.assertEqual(
+ scripted_fn(
+ torch.ones(1),
+ ),
+ test_none(
+ torch.ones(1),
+ ),
+ )
diff --git a/test/jit/test_peephole.py b/test/jit/test_peephole.py
index e79fbf6504..d78c4fb91e 100644
--- a/test/jit/test_peephole.py
+++ b/test/jit/test_peephole.py
@@ -1,17 +1,20 @@
# Owner(s): ["oncall: jit"]
+import unittest
+from typing import Callable, List
+
import torch
-from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
-from typing import Callable, List
+from torch.testing._internal.jit_utils import _inline_everything, JitTestCase, RUN_CUDA
-import unittest
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestPeephole(JitTestCase):
def test_peephole_with_writes(self):
@@ -62,11 +65,11 @@ class TestPeephole(JitTestCase):
tf = torch.jit.trace(f, (a, b))
FileCheck().check("type_as").run(str(tf.graph))
- self.run_pass('peephole', tf.graph)
+ self.run_pass("peephole", tf.graph)
FileCheck().check_not("type_as").run(str(tf.graph))
tf2 = torch.jit.trace(f, (a, c))
s = str(tf2.graph)
- self.run_pass('peephole', tf2.graph)
+ self.run_pass("peephole", tf2.graph)
self.assertEqual(s, str(s))
def test_peephole_dynamic(self):
@@ -83,7 +86,7 @@ class TestPeephole(JitTestCase):
def foo(x, y, z):
return len([x, y, z])
- self.run_pass('peephole', foo.graph)
+ self.run_pass("peephole", foo.graph)
FileCheck().check("value=3").check_next("return").run(foo.graph)
@torch.jit.script
@@ -93,7 +96,7 @@ class TestPeephole(JitTestCase):
li.append(x)
return len([x, y, z])
- self.run_pass('peephole', foo.graph)
+ self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::len").run(foo.graph)
@torch.jit.script
@@ -102,7 +105,7 @@ class TestPeephole(JitTestCase):
return li[1], li[-2]
FileCheck().check("aten::__getitem__").run(foo.graph)
- self.run_pass('peephole', foo.graph)
+ self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::__getitem__").run(foo.graph)
@torch.jit.script
@@ -110,7 +113,7 @@ class TestPeephole(JitTestCase):
li = [x, y, z]
return li[-7]
- self.run_pass('peephole', foo.graph)
+ self.run_pass("peephole", foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@torch.jit.script
@@ -120,25 +123,25 @@ class TestPeephole(JitTestCase):
li.append(x)
return li[-2]
- self.run_pass('peephole', foo.graph)
+ self.run_pass("peephole", foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
def test_peephole_cuda(self):
- a = torch.tensor([0.4], device='cpu')
- b = torch.tensor([0.7], device='cuda')
- c = torch.tensor([0.7], device='cuda')
+ a = torch.tensor([0.4], device="cpu")
+ b = torch.tensor([0.7], device="cuda")
+ c = torch.tensor([0.7], device="cuda")
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, c))
s = str(trace.graph)
- self.run_pass('peephole', trace.graph)
+ self.run_pass("peephole", trace.graph)
self.assertEqual(s, str(trace.graph))
trace = torch.jit.trace(f, (b, c))
- self.run_pass('peephole', trace.graph)
- self.run_pass('dce', trace.graph)
+ self.run_pass("peephole", trace.graph)
+ self.run_pass("dce", trace.graph)
FileCheck().check_not("type_as").run(str(trace.graph))
@_inline_everything
@@ -152,7 +155,7 @@ class TestPeephole(JitTestCase):
return refine(torch.tensor(4))
FileCheck().check("prim::unchecked_cast").run(test.graph)
- self.run_pass('peephole', test.graph)
+ self.run_pass("peephole", test.graph)
FileCheck().check_not("prim::unchecked_cast").run(test.graph)
# refinement not optimzied out
@@ -166,7 +169,7 @@ class TestPeephole(JitTestCase):
self.checkScript(is_int_tensor, (torch.tensor(2),))
self.checkScript(is_int_tensor, (torch.tensor(2.5),))
graph = torch.jit.script(is_int_tensor).graph
- self.run_pass('peephole', graph)
+ self.run_pass("peephole", graph)
FileCheck().check("prim::unchecked_cast").run(graph)
def test_short_circuit_optimization(self):
@@ -174,8 +177,11 @@ class TestPeephole(JitTestCase):
def const_expressions(x):
# type: (int) -> Tuple[bool, bool]
return x == 1 and False, x == 1 or True
- self.run_pass('constant_propagation', const_expressions.graph)
- FileCheck().check_not("prim::If").check_not("aten::eq").run(const_expressions.graph)
+
+ self.run_pass("constant_propagation", const_expressions.graph)
+ FileCheck().check_not("prim::If").check_not("aten::eq").run(
+ const_expressions.graph
+ )
self.assertEqual(const_expressions(1), (False, True))
@torch.jit.script
@@ -183,15 +189,18 @@ class TestPeephole(JitTestCase):
# type: (int) -> Tuple[bool, bool]
return x == 1 and True, x == 1 or False
- self.run_pass('peephole', redundant_expressions.graph)
+ self.run_pass("peephole", redundant_expressions.graph)
self.assertEqual(redundant_expressions(1), (True, True))
self.assertEqual(redundant_expressions(0), (False, False))
# and True / or False are removed from graph
- FileCheck().check("aten::eq").check_not("prim::If").run(redundant_expressions.graph)
+ FileCheck().check("aten::eq").check_not("prim::If").run(
+ redundant_expressions.graph
+ )
def test_conv_dim_folding(self):
modules = [nn.Conv1d, nn.Conv2d, nn.Conv3d]
for mod in modules:
+
class ConvDim(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -233,7 +242,6 @@ class TestPeephole(JitTestCase):
FileCheck().check_count("aten::sub", 2, exactly=True).run(op_graph)
FileCheck().check_count("aten::rsub", 0, exactly=True).run(op_graph)
-
def test_normalized_is_op(self):
def convertible_is_op(x: bool, y: bool):
return x is True, False is x, x is y
@@ -558,7 +566,7 @@ class TestPeephole(JitTestCase):
def foo4():
x = torch.zeros([2, 2])
- return x + 0.
+ return x + 0.0
funcs = foo1, foo2, foo3, foo4
inps = (torch.ones([2]),), (), (), ()
@@ -582,7 +590,7 @@ class TestPeephole(JitTestCase):
self.assertEqual(func(torch.ones([2, 2])), func_s(torch.ones([2, 2])))
def func(x):
- return (x + 0.) - 5
+ return (x + 0.0) - 5
func_s = torch.jit.script(func)
inp = next(func_s.graph.inputs())
@@ -640,6 +648,7 @@ class TestPeephole(JitTestCase):
return z
else:
return z2
+
out = next(foo.graph.findNode("prim::If").outputs())
out.setType(torch._C.OptionalType(torch._C.IntType.get()))
self.run_pass("peephole", foo.graph)
@@ -665,12 +674,13 @@ class TestPeephole(JitTestCase):
_6 = torch.add(1 * torch.sub(_3, 3) // 1, 1) / 1
return [_5, int(_6)]
- FileCheck().check("aten::add").check("aten::sub") \
- .check("aten::mul").check("aten::floordiv") \
- .check("aten::div").run(foo.graph)
+ FileCheck().check("aten::add").check("aten::sub").check("aten::mul").check(
+ "aten::floordiv"
+ ).check("aten::div").run(foo.graph)
self.run_pass("peephole", foo.graph)
- FileCheck().check("graph").check("):") \
- .check_next("ListConstruct").check_next("return").run(foo.graph)
+ FileCheck().check("graph").check("):").check_next("ListConstruct").check_next(
+ "return"
+ ).run(foo.graph)
self.assertEqual(foo(0, 1, 2, 3), [1, 3])
def test_peephole_dict_getitem_simple(self):
@@ -687,9 +697,9 @@ class TestPeephole(JitTestCase):
@torch.jit.script
def foo(a: int, b: int):
- d = {'0': a, '1': b}
- x = d['1']
- y = d['0']
+ d = {"0": a, "1": b}
+ x = d["1"]
+ y = d["0"]
return x, y
self.run_pass("peephole", foo.graph)
@@ -815,14 +825,14 @@ class TestPeephole(JitTestCase):
graph = torch.jit.script(foo).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
- self.checkScript(foo, (3, ))
+ self.checkScript(foo, (3,))
def test_peephole_slice_one_empty_arg(self):
def check_helper(fn: Callable[[int], None]) -> None:
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
- self.checkScript(fn, (3, ))
+ self.checkScript(fn, (3,))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][1::2]
@@ -844,7 +854,7 @@ class TestPeephole(JitTestCase):
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
- self.checkScript(fn, (3, ))
+ self.checkScript(fn, (3,))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][::2]
diff --git a/test/jit/test_profiler.py b/test/jit/test_profiler.py
index 5389751a5b..4bc45b7832 100644
--- a/test/jit/test_profiler.py
+++ b/test/jit/test_profiler.py
@@ -9,12 +9,15 @@ from torch.testing._internal.common_utils import skipIfTorchDynamo
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, warmup_backward, FileCheck
+from torch.testing._internal.jit_utils import FileCheck, JitTestCase, warmup_backward
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
@skipIfTorchDynamo()
class TestProfiler(JitTestCase):
@@ -58,8 +61,9 @@ class TestProfiler(JitTestCase):
# item & add should not get pulled into the fusion group -
# we expect to see Fusion Group (item / add) Fusion Group in ir dump
- FileCheck().check("TensorExpr").check("Scalar = aten::item").check_next("Tensor = aten::add").check("TensorExpr").run(g)
-
+ FileCheck().check("TensorExpr").check("Scalar = aten::item").check_next(
+ "Tensor = aten::add"
+ ).check("TensorExpr").run(g)
@torch.jit.script
def non_const_dtype(x, y, cond: bool):
@@ -70,7 +74,9 @@ class TestProfiler(JitTestCase):
non_const_dtype(x, x, True)
g = torch.jit.last_executed_optimized_graph()
# because dtype is non-const, sum should not get pulled into the Fusion Group
- FileCheck().check("TensorExpr").check("TensorExpr").check_not("aten::sum").run(g)
+ FileCheck().check("TensorExpr").check("TensorExpr").check_not("aten::sum").run(
+ g
+ )
def test_specialize_backward(self):
def test_fuse(a, b):
@@ -118,13 +124,15 @@ class TestProfiler(JitTestCase):
d = c * b
return d
- x = torch.tensor([.5])
+ x = torch.tensor([0.5])
for _ in range(3):
test_fuse(x, x)
g = torch.jit.last_executed_optimized_graph()
# Types should remain specialized for typecheck outputs & fusion outputs
- FileCheck().check("Double(").check_same("prim::TypeCheck").check_same("\n").check("Double").check_same("TensorExpr").run(g)
+ FileCheck().check("Double(").check_same("prim::TypeCheck").check_same(
+ "\n"
+ ).check("Double").check_same("TensorExpr").run(g)
# other outputs should not be specialized
FileCheck().check("Tensor = prim::If").run(g)
@@ -201,7 +209,9 @@ class TestProfiler(JitTestCase):
foo(x, y)
foo(x, y)
g = torch.jit.last_executed_optimized_graph()
- FileCheck().check("CallFunction").check_next("Tensor = prim::TupleUnpack").run(g)
+ FileCheck().check("CallFunction").check_next("Tensor = prim::TupleUnpack").run(
+ g
+ )
def test_autograd_fallback_graph(self):
@torch.jit.script
diff --git a/test/jit/test_python_builtins.py b/test/jit/test_python_builtins.py
index 34fd2fe036..2269631b4d 100644
--- a/test/jit/test_python_builtins.py
+++ b/test/jit/test_python_builtins.py
@@ -1,13 +1,13 @@
# Owner(s): ["oncall: jit"]
import os
+import random
import sys
import tempfile
-import random
from textwrap import dedent
import torch
-from torch.testing._internal.jit_utils import JitTestCase, execWrapper
+from torch.testing._internal.jit_utils import execWrapper, JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -20,14 +20,17 @@ if __name__ == "__main__":
"instead."
)
+
def get_fn(file_name, script_path):
import importlib.util
+
spec = importlib.util.spec_from_file_location(file_name, script_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
fn = module.fn
return fn
+
class TestPythonBuiltinOP(JitTestCase):
def test_add(self):
def func(a, b):
@@ -48,16 +51,18 @@ class TestPythonBuiltinOP(JitTestCase):
self.checkScript(func, (a, b), optimize=True)
def test_matmul_py3(self):
- code = dedent("""
+ code = dedent(
+ """
def fn(a, b):
return a @ b
- """)
+ """
+ )
with tempfile.TemporaryDirectory() as tmp_dir:
- script_path = os.path.join(tmp_dir, 'script.py')
- with open(script_path, 'w') as f:
+ script_path = os.path.join(tmp_dir, "script.py")
+ with open(script_path, "w") as f:
f.write(code)
- fn = get_fn('test_matmul_py3', script_path)
+ fn = get_fn("test_matmul_py3", script_path)
a = torch.rand(4, 3, requires_grad=True)
b = torch.rand(3, 2, requires_grad=True)
@@ -65,18 +70,18 @@ class TestPythonBuiltinOP(JitTestCase):
def test_pow(self):
def func(a, b):
- return a ** b
+ return a**b
def func2(a, b, c, d):
- return c + a ** b ** d
+ return c + a**b**d
def func3(a, b):
# type: (int, float) -> float
- return a ** b
+ return a**b
def func4():
# type: () -> float
- return 2 ** -2
+ return 2**-2
def func5(x, y):
return x.item() ** y.item()
@@ -90,7 +95,12 @@ class TestPythonBuiltinOP(JitTestCase):
self.checkScript(func3, (4, -0.5), optimize=True)
self.checkScript(func4, ())
- inputs = [torch.tensor(2), torch.tensor(-2), torch.tensor(.5), torch.tensor(.2)]
+ inputs = [
+ torch.tensor(2),
+ torch.tensor(-2),
+ torch.tensor(0.5),
+ torch.tensor(0.2),
+ ]
for x in inputs:
for y in inputs:
if x < 0:
@@ -100,7 +110,7 @@ class TestPythonBuiltinOP(JitTestCase):
def test_triple(self):
def func(x):
- return 3. * x
+ return 3.0 * x
x = torch.rand(1, dtype=torch.float, requires_grad=True)
self.checkScript(func, [x], optimize=True)
@@ -154,22 +164,36 @@ class TestPythonBuiltinOP(JitTestCase):
def test_stepped_tuple_slicing(self):
def check_slicing_tuple(slicing, tuple_type, tuple):
- template = dedent("""
+ template = dedent(
+ """
def func(x):
# type: ({}) -> Any
return x{}
- """)
+ """
+ )
self._check_code(template.format(tuple_type, slicing), "func", [tuple])
check_slicing_tuple("[-3:3:2]", "Tuple[int, int, int]", (0, 1, 2))
check_slicing_tuple("[::55]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
check_slicing_tuple("[:4:4]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
- check_slicing_tuple("[::-1]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
- check_slicing_tuple("[7:5:2]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
- check_slicing_tuple("[5:7:-2]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
+ check_slicing_tuple(
+ "[::-1]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6)
+ )
+ check_slicing_tuple(
+ "[7:5:2]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6)
+ )
+ check_slicing_tuple(
+ "[5:7:-2]",
+ "Tuple[int, int, int, int, int, int, int]",
+ (0, 1, 2, 3, 4, 5, 6),
+ )
check_slicing_tuple("[::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
- check_slicing_tuple("[:4:-3]", "Tuple[int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5))
- check_slicing_tuple("[3::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
+ check_slicing_tuple(
+ "[:4:-3]", "Tuple[int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5)
+ )
+ check_slicing_tuple(
+ "[3::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4)
+ )
def test_index(self):
def consec(size, start=0):
@@ -177,10 +201,12 @@ class TestPythonBuiltinOP(JitTestCase):
return torch.arange(numel).view(size)
def check_indexing(indexing, tensor):
- template = dedent("""
+ template = dedent(
+ """
def func(x):
return x{}
- """)
+ """
+ )
self._check_code(template.format(indexing), "func", [tensor])
@@ -188,62 +214,66 @@ class TestPythonBuiltinOP(JitTestCase):
value1 = torch.tensor(value1)
value2 = torch.tensor(value2)
- template = dedent("""
+ template = dedent(
+ """
def func(x, value1, value2):
i = int(value1)
j = int(value2)
return x{}
- """)
+ """
+ )
- self._check_code(template.format(indexing), "func", [tensor, value1, value2])
+ self._check_code(
+ template.format(indexing), "func", [tensor, value1, value2]
+ )
# basic slices
- check_indexing('[0]', consec((3, 3)))
- check_indexing('[1]', consec((3, 3), 10))
- check_indexing('[2]', consec((3, 3), 19))
- check_indexing('[2]', consec((3,)))
- check_indexing('[-1]', consec((3, 3), 19))
- check_indexing('[0:2]', consec((3, 3, 3)))
- check_indexing('[1:-1]', consec((3, 3, 3)))
- check_indexing('[-3:-1]', consec((6, 3)))
- check_indexing('[1:]', consec((3, 3)))
- check_indexing('[:1]', consec((3, 3)))
- check_indexing('[:]', consec((3, 2)))
+ check_indexing("[0]", consec((3, 3)))
+ check_indexing("[1]", consec((3, 3), 10))
+ check_indexing("[2]", consec((3, 3), 19))
+ check_indexing("[2]", consec((3,)))
+ check_indexing("[-1]", consec((3, 3), 19))
+ check_indexing("[0:2]", consec((3, 3, 3)))
+ check_indexing("[1:-1]", consec((3, 3, 3)))
+ check_indexing("[-3:-1]", consec((6, 3)))
+ check_indexing("[1:]", consec((3, 3)))
+ check_indexing("[:1]", consec((3, 3)))
+ check_indexing("[:]", consec((3, 2)))
# multi-dim: indexes
- check_indexing('[0, 1]', consec((3, 3)))
- check_indexing('[0, 1]', consec((3, 3, 2)))
- check_indexing('[1, 0, 2]', consec((3, 3, 3)))
- check_indexing('[2, -1]', consec((3, 3)))
+ check_indexing("[0, 1]", consec((3, 3)))
+ check_indexing("[0, 1]", consec((3, 3, 2)))
+ check_indexing("[1, 0, 2]", consec((3, 3, 3)))
+ check_indexing("[2, -1]", consec((3, 3)))
# multi-dim: mixed slicing and indexing
- check_indexing('[0, 1:2]', consec((3, 3)))
- check_indexing('[0, :1]', consec((3, 3, 2)))
- check_indexing('[1, 2:]', consec((3, 3, 3)))
- check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
- check_indexing('[1:, -1, 0]', consec((3, 3, 3, 3)))
- check_indexing('[-1, 2:, 1:2]', consec((3, 3, 3, 3)))
- check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
- check_indexing('[-1, :, 0, 2]', consec((3, 3, 3, 3)))
+ check_indexing("[0, 1:2]", consec((3, 3)))
+ check_indexing("[0, :1]", consec((3, 3, 2)))
+ check_indexing("[1, 2:]", consec((3, 3, 3)))
+ check_indexing("[-1, 1:, 0]", consec((3, 3, 3, 3)))
+ check_indexing("[1:, -1, 0]", consec((3, 3, 3, 3)))
+ check_indexing("[-1, 2:, 1:2]", consec((3, 3, 3, 3)))
+ check_indexing("[-1, 1:, 0]", consec((3, 3, 3, 3)))
+ check_indexing("[-1, :, 0, 2]", consec((3, 3, 3, 3)))
# zero-sized slices
- check_indexing('[0:0]', consec((2, 2)))
- check_indexing('[0:0, 1]', consec((3, 3)))
+ check_indexing("[0:0]", consec((2, 2)))
+ check_indexing("[0:0, 1]", consec((3, 3)))
# trivial expression usage
- check_indexing('[1+1]', consec((3, 3)))
- check_indexing('[1:(0 + 2)]', consec((3, 3, 3)))
+ check_indexing("[1+1]", consec((3, 3)))
+ check_indexing("[1:(0 + 2)]", consec((3, 3, 3)))
# None for new dimensions
- check_indexing('[None, 0]', consec((3, 3)))
- check_indexing('[1, None]', consec((3, 3), 10))
- check_indexing('[None, None, 2]', consec((3, 3), 19))
- check_indexing('[None, 2, None]', consec((3,)))
- check_indexing('[0:2, None]', consec((3, 3, 3)))
- check_indexing('[None, 1:-1]', consec((3, 3, 3)))
- check_indexing('[None, -3:-1, None]', consec((6, 3)))
- check_indexing('[-1, None, 2:, None, 1:2]', consec((3, 3, 3, 3)))
- check_indexing('[None, -1, None, 2:, None, 1:2, None]', consec((3, 3, 3, 3)))
+ check_indexing("[None, 0]", consec((3, 3)))
+ check_indexing("[1, None]", consec((3, 3), 10))
+ check_indexing("[None, None, 2]", consec((3, 3), 19))
+ check_indexing("[None, 2, None]", consec((3,)))
+ check_indexing("[0:2, None]", consec((3, 3, 3)))
+ check_indexing("[None, 1:-1]", consec((3, 3, 3)))
+ check_indexing("[None, -3:-1, None]", consec((6, 3)))
+ check_indexing("[-1, None, 2:, None, 1:2]", consec((3, 3, 3, 3)))
+ check_indexing("[None, -1, None, 2:, None, 1:2, None]", consec((3, 3, 3, 3)))
# dynamic expression usage
check_dynamic_indexing("[i + j]", consec((3, 3)), 0, 1)
@@ -257,10 +287,12 @@ class TestPythonBuiltinOP(JitTestCase):
def check_indexing(indexing, tensor, **kwargs):
indices_dict = kwargs
- template = dedent("""
+ template = dedent(
+ """
def func(x{formals}):
return x{expr}
- """)
+ """
+ )
formals = []
values = []
@@ -268,17 +300,18 @@ class TestPythonBuiltinOP(JitTestCase):
formals.append(formal)
values.append(value)
- formals = ''.join(map(', {}'.format, formals))
+ formals = "".join(map(", {}".format, formals))
inputs = [tensor] + values
- self._check_code(template.format(formals=formals, expr=indexing),
- "func", inputs)
+ self._check_code(
+ template.format(formals=formals, expr=indexing), "func", inputs
+ )
# Indexing with tensor (basic)
- check_indexing('[i]', consec((3, 3)), i=torch.tensor([0]))
- check_indexing('[i]', consec((3, 3)), i=torch.tensor(1))
- check_indexing('[i]', consec((3, 3)), i=torch.tensor([-2]))
- check_indexing('[i]', consec((3, 3), 2), i=torch.tensor([0, 0]))
- check_indexing('[i]', consec((3, 3, 2, 2)), i=torch.tensor([0, -2, 1]))
+ check_indexing("[i]", consec((3, 3)), i=torch.tensor([0]))
+ check_indexing("[i]", consec((3, 3)), i=torch.tensor(1))
+ check_indexing("[i]", consec((3, 3)), i=torch.tensor([-2]))
+ check_indexing("[i]", consec((3, 3), 2), i=torch.tensor([0, 0]))
+ check_indexing("[i]", consec((3, 3, 2, 2)), i=torch.tensor([0, -2, 1]))
# NB: indexing with tensors and indexing with sequences can be implemented
# in a very similar way (sequences are converted to tensors), so only one
@@ -290,49 +323,49 @@ class TestPythonBuiltinOP(JitTestCase):
inp = consec((4, 8, 5))
to_check = [
# [[0, 1, 3]]
- ['[i]', {'i': [0, 1, 3]}],
+ ["[i]", {"i": [0, 1, 3]}],
# [[0, 2], [1, 3]]
- ['[i, j]', {'i': [0, 2], 'j': [1, 3]}],
+ ["[i, j]", {"i": [0, 2], "j": [1, 3]}],
# [[[0, 1], [0, 1]], [[0, 1], [0, 1]]]
- ['[i, j]', {'i': [[0, 1], [0, 1]], 'j': [[0, 1], [0, 1]]}],
+ ["[i, j]", {"i": [[0, 1], [0, 1]], "j": [[0, 1], [0, 1]]}],
# [[0, 2], [1, 3], [1, 1]]
- ['[i, j, k]', {'i': [0, 2], 'j': [1, 3], 'k': [1, 1]}],
+ ["[i, j, k]", {"i": [0, 2], "j": [1, 3], "k": [1, 1]}],
# [[0, 2], 1, [1, 1]]
- ['[i, j, k]', {'i': [0, 2], 'j': 1, 'k': [1, 1]}],
+ ["[i, j, k]", {"i": [0, 2], "j": 1, "k": [1, 1]}],
# [:, :, [0, 3, 4]]
- ['[:, :, i]', {'i': [0, 3, 4]}],
+ ["[:, :, i]", {"i": [0, 3, 4]}],
# [:, [2, 4, 5, 7], 2:4]
- ['[:, i, 2:4]', {'i': [0, 2, 3]}],
+ ["[:, i, 2:4]", {"i": [0, 2, 3]}],
# [[2, 3], :, :]
- ['[i, :, :]', {'i': [2, 3]}],
+ ["[i, :, :]", {"i": [2, 3]}],
# [:, [0, 2, 3], [1, 3, 4]]
- ['[:, i, j]', {'i': [0, 2, 3], 'j': [1, 3, 4]}],
+ ["[:, i, j]", {"i": [0, 2, 3], "j": [1, 3, 4]}],
# [:, [0], [1, 2, 4]]
- ['[:, i, j]', {'i': [0], 'j': [1, 2, 4]}],
+ ["[:, i, j]", {"i": [0], "j": [1, 2, 4]}],
# [:, [0, 1, 3], [4]]
- ['[:, i, j]', {'i': [0, 1, 3], 'j': [4]}],
+ ["[:, i, j]", {"i": [0, 1, 3], "j": [4]}],
# [:, [[0, 1], [1, 0]], [[2, 3]]]
- ['[:, i, j]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}],
+ ["[:, i, j]", {"i": [[0, 1], [1, 0]], "j": [[2, 3]]}],
# [:, [[0, 1], [2, 3]], [[0]]]
- ['[:, i, j]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}],
+ ["[:, i, j]", {"i": [[0, 1], [2, 3]], "j": [[0]]}],
# [:, [[5, 6]], [[0, 3], [4, 4]]]
- ['[:, i, j]', {'i': [[5, 6]], 'j': [[0, 3], [4, 4]]}],
+ ["[:, i, j]", {"i": [[5, 6]], "j": [[0, 3], [4, 4]]}],
# [[0, 2, 3], [1, 3, 4], :]
- ['[i, j, :]', {'i': [0, 2, 3], 'j': [1, 3, 4]}],
+ ["[i, j, :]", {"i": [0, 2, 3], "j": [1, 3, 4]}],
# [0, [1, 2, 4], :]
- ['[i, j, :]', {'i': 0, 'j': [1, 2, 4]}],
+ ["[i, j, :]", {"i": 0, "j": [1, 2, 4]}],
# [[0, 1, 3], 4, :]
- ['[i, j, :]', {'i': [0, 1, 3], 'j': 4}],
+ ["[i, j, :]", {"i": [0, 1, 3], "j": 4}],
# [[[0, 1], [1, 0]], [[2, 1], [3, 5]], :]
- ['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 1], [3, 5]]}],
+ ["[i, j, :]", {"i": [[0, 1], [1, 0]], "j": [[2, 1], [3, 5]]}],
# [[[0, 1], [1, 0]], [[2, 3]], :]
- ['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}],
+ ["[i, j, :]", {"i": [[0, 1], [1, 0]], "j": [[2, 3]]}],
# [[[0, 1], [2, 3]], [[0]], :]
- ['[i, j, :]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}],
+ ["[i, j, :]", {"i": [[0, 1], [2, 3]], "j": [[0]]}],
# [[[2, 1]], [[0, 3], [4, 4]], :]
- ['[i, j, :]', {'i': [[2, 1]], 'j': [[0, 3], [4, 4]]}],
+ ["[i, j, :]", {"i": [[2, 1]], "j": [[0, 3], [4, 4]]}],
# [[[2]], [[0, 3], [4, 1]], 0:2]
- ['[i, j, 0:2]', {'i': [[2]], 'j': [[0, 3], [4, 1]]}],
+ ["[i, j, 0:2]", {"i": [[2]], "j": [[0, 3], [4, 1]]}],
]
for expr, argdict in to_check:
@@ -372,29 +405,35 @@ class TestPythonBuiltinOP(JitTestCase):
for _ in range(100):
indices = [random.choice(vals) for _ in range(4)]
indices[random.randint(0, len(indices) - 1)] = "..."
- test_str = dedent("""
+ test_str = dedent(
+ """
def f():
x = torch.ones(10, 9, 8, 7, 6)
return x{indices}.shape
- """.format(indices=indices))
- test_str = test_str.replace(r"'", r'')
+ """.format(
+ indices=indices
+ )
+ )
+ test_str = test_str.replace(r"'", r"")
scope = {}
execWrapper(test_str, globals(), scope)
cu = torch.jit.CompilationUnit(test_str)
res1 = cu.f()
- res2 = scope['f']()
+ res2 = scope["f"]()
self.assertEqual(res1, res2)
def test_inf(self):
@torch.jit.script
def foo(a):
- return a < float('inf')
+ return a < float("inf")
+
s = torch.rand(1)
self.assertTrue(foo(s))
@torch.jit.script
def bar(a):
- return a > float('-inf')
+ return a > float("-inf")
+
s = torch.rand(1)
self.assertTrue(foo(s))
@@ -414,19 +453,22 @@ class TestPythonBuiltinOP(JitTestCase):
def test_str_to_float(self):
@torch.jit.script
def foo(a):
- return 0.5 == float('0.5 hello')
+ return 0.5 == float("0.5 hello")
+
s = torch.rand(1)
with self.assertRaisesRegex(RuntimeError, "could not convert string to float"):
self.assertTrue(foo(s))
@torch.jit.script
def foo(a):
- return 0.5 == float('0.5')
+ return 0.5 == float("0.5")
+
s = torch.rand(1)
self.assertTrue(foo(s))
@torch.jit.script
def foo(a):
- return 0. == float('0')
+ return 0.0 == float("0")
+
s = torch.rand(1)
self.assertTrue(foo(s))
diff --git a/test/jit/test_python_ir.py b/test/jit/test_python_ir.py
index f3a17e3062..ff00823f11 100644
--- a/test/jit/test_python_ir.py
+++ b/test/jit/test_python_ir.py
@@ -1,22 +1,26 @@
# Owner(s): ["oncall: jit"]
+import unittest
+
+import numpy as np
import torch
from torch.testing import FileCheck
-from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_MACOS
+from torch.testing._internal.jit_utils import JitTestCase
-import numpy as np
-import unittest
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestPythonIr(JitTestCase):
def test_param_strides(self):
def trace_me(arg):
return arg
+
t = torch.zeros(1, 3, 16, 16)
traced = torch.jit.trace(trace_me, t)
value = list(traced.graph.param_node().outputs())[0]
@@ -78,8 +82,12 @@ class TestPythonIr(JitTestCase):
g = foo.graph
muls = g.findAllNodes("aten::mul")
- scalar_muls = filter(lambda x: x.matches("aten::mul(Tensor self, Scalar other) -> Tensor"), muls)
- mul_constant_int = filter(lambda x: isinstance(list(x.inputs())[1].toIValue(), int), scalar_muls)
+ scalar_muls = filter(
+ lambda x: x.matches("aten::mul(Tensor self, Scalar other) -> Tensor"), muls
+ )
+ mul_constant_int = filter(
+ lambda x: isinstance(list(x.inputs())[1].toIValue(), int), scalar_muls
+ )
for mul in mul_constant_int:
with g.insert_point_guard(mul):
outputs = g.insertGraph(unrolled_mul.graph, list(mul.inputs()))
diff --git a/test/jit/test_recursive_script.py b/test/jit/test_recursive_script.py
index 6ade2c1b3e..0dfa00adc6 100644
--- a/test/jit/test_recursive_script.py
+++ b/test/jit/test_recursive_script.py
@@ -5,25 +5,31 @@ import re
import sys
import types
import typing
-import typing_extensions
-from typing import List, Dict, Optional, Tuple
+from collections import OrderedDict
+from typing import Dict, List, Optional, Tuple
import torch
import torch.jit.frontend
import torch.nn as nn
+import typing_extensions
from torch import Tensor
from torch.testing import FileCheck
-from collections import OrderedDict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, _tmp_donotuse_dont_inline_everything
+from torch.testing._internal.jit_utils import (
+ _tmp_donotuse_dont_inline_everything,
+ JitTestCase,
+)
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestRecursiveScript(JitTestCase):
def test_inferred_nonetype(self):
@@ -87,7 +93,9 @@ class TestRecursiveScript(JitTestCase):
return self.fn(x)
m = M(fn)
- with self.assertRaisesRegexWithHighlight(RuntimeError, "failed to compile", "i_dont_exist"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "failed to compile", "i_dont_exist"
+ ):
torch.jit.script(m)
def test_init_error(self):
@@ -119,12 +127,12 @@ class TestRecursiveScript(JitTestCase):
# sm1 was created while m had training = True
self.assertTrue(sm1.training)
- self.assertEqual(sm1.training, sm1._c.getattr('training'))
+ self.assertEqual(sm1.training, sm1._c.getattr("training"))
self.assertEqual(sm1(), 2)
# sm2 was created after m was eval'ed
self.assertFalse(sm2.training)
- self.assertEqual(sm2.training, sm2._c.getattr('training'))
+ self.assertEqual(sm2.training, sm2._c.getattr("training"))
self.assertEqual(sm2(), 0)
def test_module_name(self):
@@ -165,7 +173,7 @@ class TestRecursiveScript(JitTestCase):
def test_constants_with_final(self):
class M1(torch.nn.Module):
- x : torch.jit.Final[int]
+ x: torch.jit.Final[int]
def __init__(self):
super().__init__()
@@ -177,7 +185,7 @@ class TestRecursiveScript(JitTestCase):
self.checkModule(M1(), (torch.randn(2, 2),))
class M2(torch.nn.Module):
- x : typing_extensions.Final[int]
+ x: typing_extensions.Final[int]
def __init__(self):
super().__init__()
@@ -189,7 +197,7 @@ class TestRecursiveScript(JitTestCase):
self.checkModule(M2(), (torch.randn(2, 2),))
class M3(torch.nn.Module):
- x : typing.Final[int]
+ x: typing.Final[int]
def __init__(self):
super().__init__()
@@ -206,12 +214,15 @@ class TestRecursiveScript(JitTestCase):
def unscriptable(self):
return "a" + 200
-
class TestModule(torch.nn.Module):
def forward(self, x):
return MyScriptClass()
- with self.assertRaisesRegexWithHighlight(torch.jit.frontend.FrontendError, "Cannot instantiate class", "MyScriptClass"):
+ with self.assertRaisesRegexWithHighlight(
+ torch.jit.frontend.FrontendError,
+ "Cannot instantiate class",
+ "MyScriptClass",
+ ):
t = torch.jit.script(TestModule())
def test_method_call(self):
@@ -246,13 +257,13 @@ class TestRecursiveScript(JitTestCase):
print(m)
f = FileCheck()
- f.check('MyModule')
- f.check('Conv2d')
- f.check('Linear')
- f.check('Submodule')
+ f.check("MyModule")
+ f.check("Conv2d")
+ f.check("Linear")
+ f.check("Submodule")
f.run(out[0])
- self.assertEqual(m.original_name, 'MyModule')
+ self.assertEqual(m.original_name, "MyModule")
def test_dir(self):
def test_module_dir(mod):
@@ -260,8 +271,17 @@ class TestRecursiveScript(JitTestCase):
scripted_mod = torch.jit.script(mod)
dir_scripted = set(dir(scripted_mod))
# set not currently copied over
- ignore_set = ["training", "__delitem__", "__setitem__", "clear", "items",
- "keys", "pop", "update", "values"]
+ ignore_set = [
+ "training",
+ "__delitem__",
+ "__setitem__",
+ "clear",
+ "items",
+ "keys",
+ "pop",
+ "update",
+ "values",
+ ]
for attr in dir_set:
if attr in ignore_set:
continue
@@ -283,7 +303,9 @@ class TestRecursiveScript(JitTestCase):
linear = nn.Linear(10, 10)
test_module_dir(nn.Sequential(conv, linear))
- test_module_dir(nn.ModuleDict(OrderedDict([("conv", conv), ("linear", linear)])))
+ test_module_dir(
+ nn.ModuleDict(OrderedDict([("conv", conv), ("linear", linear)]))
+ )
def test_class_compile(self):
def other_fn(a: int, b: Tensor) -> Tensor:
@@ -296,7 +318,6 @@ class TestRecursiveScript(JitTestCase):
def helper(self, a):
return self.x + a + other_fn(self.x, a)
-
class N(torch.nn.Module):
def forward(self, x):
b = B(x)
@@ -411,7 +432,7 @@ class TestRecursiveScript(JitTestCase):
def test_module_basic(self):
class Other(torch.nn.Module):
- __constants__ = ['x']
+ __constants__ = ["x"]
def __init__(self, x):
super().__init__()
@@ -426,7 +447,6 @@ class TestRecursiveScript(JitTestCase):
def forward(self, t):
return t + self.x + self.param
-
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -439,7 +459,7 @@ class TestRecursiveScript(JitTestCase):
def test_module_function_export(self):
class Other(torch.nn.Module):
- __constants__ = ['x']
+ __constants__ = ["x"]
def __init__(self, x):
super().__init__()
@@ -453,7 +473,6 @@ class TestRecursiveScript(JitTestCase):
def forward(self, t):
return t + self.x + self.param
-
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -473,9 +492,7 @@ class TestRecursiveScript(JitTestCase):
def __init__(self):
super().__init__()
self.sequential = nn.Sequential(
- Inner(),
- Inner(),
- nn.Sequential(Inner(), Inner())
+ Inner(), Inner(), nn.Sequential(Inner(), Inner())
)
self.module_list = nn.ModuleList([Inner(), Inner()])
@@ -511,12 +528,14 @@ class TestRecursiveScript(JitTestCase):
self.sequential = nn.Sequential(
SeluButReluWhenScripted(),
SeluButReluWhenScripted(),
- nn.Sequential(SeluButReluWhenScripted(), shared, SeluButReluWhenScripted()),
+ nn.Sequential(
+ SeluButReluWhenScripted(), shared, SeluButReluWhenScripted()
+ ),
shared,
)
- self.module_list = nn.ModuleList([SeluButReluWhenScripted(),
- shared,
- SeluButReluWhenScripted()])
+ self.module_list = nn.ModuleList(
+ [SeluButReluWhenScripted(), shared, SeluButReluWhenScripted()]
+ )
def forward(self, x):
for mod in self.module_list:
@@ -553,7 +572,8 @@ class TestRecursiveScript(JitTestCase):
self.assertEqual(obj(1, 2), 3)
self.assertEqual(obj(1, 2, 3, 4), 10)
with self.assertRaisesRegex(
- torch.jit.frontend.NotSupportedError, expected_regex="can't take variable number of arguments"
+ torch.jit.frontend.NotSupportedError,
+ expected_regex="can't take variable number of arguments",
):
torch.jit.script(obj)
@@ -568,7 +588,10 @@ class TestRecursiveScript(JitTestCase):
self.assertEqual(jit_obj(1, 2), 3)
with self.assertRaisesRegex(
- RuntimeError, expected_regex=re.escape("expected at most 2 argument(s) but received 4 argument(s)")
+ RuntimeError,
+ expected_regex=re.escape(
+ "expected at most 2 argument(s) but received 4 argument(s)"
+ ),
):
jit_obj(1, 2, 3, 4)
@@ -598,27 +621,26 @@ class TestRecursiveScript(JitTestCase):
def __getstate__(self):
return (self.a, self.inner)
-
untyped_values = (
- ('my_dict', {"I": "am", "a test": "test"}),
- ('my_float', 2.3),
- ('my_int', 99),
- ('my_bool', False),
- ('my_tuple', (1, 2, 3, 4)),
- ('my_list', [(1, 2), (3, 4)]),
+ ("my_dict", {"I": "am", "a test": "test"}),
+ ("my_float", 2.3),
+ ("my_int", 99),
+ ("my_bool", False),
+ ("my_tuple", (1, 2, 3, 4)),
+ ("my_list", [(1, 2), (3, 4)]),
# ('my_tensor', torch.randn(2, 2)),
- ('my_int_list', [1, 2, 3, 4]),
+ ("my_int_list", [1, 2, 3, 4]),
# ('my_tensor_list', [torch.ones(2, 2) + i for i in range(4)]),
- ('my_bool_list', [True, True, False, True]),
- ('my_float_list', [1., 2., 3., 4.]),
- ('my_str_list', ['hello', 'bye']),
+ ("my_bool_list", [True, True, False, True]),
+ ("my_float_list", [1.0, 2.0, 3.0, 4.0]),
+ ("my_str_list", ["hello", "bye"]),
)
typed_values = (
- ('my_empty_list', []),
- ('my_empty_dict', {}),
- ('my_none', None),
- ('my_object', Foo()),
- ('my_object2', SFoo()),
+ ("my_empty_list", []),
+ ("my_empty_dict", {}),
+ ("my_none", None),
+ ("my_object", Foo()),
+ ("my_object2", SFoo()),
)
class M(torch.nn.Module):
@@ -659,11 +681,11 @@ class TestRecursiveScript(JitTestCase):
# since there's no string frontend for Python classes (so the `define`)
# trick doesn't work.
M.__annotations__ = {
- 'my_empty_list': List[int],
- 'my_empty_dict': Dict[str, int],
- 'my_none': Optional[int],
- 'my_object': Foo,
- 'my_object2': SFoo,
+ "my_empty_list": List[int],
+ "my_empty_dict": Dict[str, int],
+ "my_none": Optional[int],
+ "my_object": Foo,
+ "my_object2": SFoo,
}
m = M()
@@ -694,7 +716,7 @@ class TestRecursiveScript(JitTestCase):
return self.encoder(x)
m = M()
- self.checkModule(m, (torch.randn(5, 5), ))
+ self.checkModule(m, (torch.randn(5, 5),))
def test_inner_traced_module(self):
class Dummy(nn.Module):
@@ -715,12 +737,13 @@ class TestRecursiveScript(JitTestCase):
dummy = torch.jit.trace(Dummy(), torch.randn(1, 2))
dummies = nn.ModuleList([dummy])
model = Model(dummies)
- self.checkModule(model, (torch.rand(5, 5), ))
+ self.checkModule(model, (torch.rand(5, 5),))
def test_script_loaded_module(self):
"""
Test that we can hold a loaded ScriptModule as a submodule.
"""
+
class Dummy(nn.Module):
def forward(self, x):
return x
@@ -736,7 +759,7 @@ class TestRecursiveScript(JitTestCase):
def forward(self, input):
return self.encoder(input)
- self.checkModule(ContainsLoaded(), (torch.rand(2, 3), ))
+ self.checkModule(ContainsLoaded(), (torch.rand(2, 3),))
def test_optional_module(self):
class Dummy(nn.Module):
diff --git a/test/jit/test_remove_mutation.py b/test/jit/test_remove_mutation.py
index 3d76f20144..89f963e45b 100644
--- a/test/jit/test_remove_mutation.py
+++ b/test/jit/test_remove_mutation.py
@@ -2,20 +2,23 @@
import os
import sys
+from typing import List
import torch
from torch.testing import FileCheck
-from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, freeze_rng_state
+from torch.testing._internal.jit_utils import freeze_rng_state, JitTestCase
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestRemoveMutation(JitTestCase):
def test_aten_inplace(self):
@@ -26,7 +29,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_not_new_alias)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check("aten::add_").run(graph)
self.assertEqual(fn(torch.ones([2, 2])), test_not_new_alias(torch.ones([2, 2])))
@@ -38,7 +41,7 @@ class TestRemoveMutation(JitTestCase):
# there is no functional equivalent of x[0] = ...
fn = torch.jit.script(test_no_lowering)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check("aten::copy_").run(graph)
self.assertEqual(fn(), test_no_lowering())
@@ -50,7 +53,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_move_before_not_valid)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check("aten::add_").run(graph)
self.assertEqual(fn(), test_move_before_not_valid())
@@ -63,7 +66,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_successful)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check_not("aten::add_").run(graph)
self.assertEqual(test_successful(), fn())
@@ -77,7 +80,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_intermediary_use)
graph = fn.graph
FileCheck().check_count("aten::add_", 2).run(graph)
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
# Unable to remove the second add_ because of the y = x + 4 use
# In the future we could duplicating the value of x as a temporary and replacing
# its intermediary use (so long as aliasing is safe)
@@ -96,7 +99,7 @@ class TestRemoveMutation(JitTestCase):
out_eager = foo(torch.tensor(5), True)
foo_script = torch.jit.script(foo)
FileCheck().check("aten::add_").run(foo_script.graph)
- self.run_pass('remove_mutation', foo_script.graph)
+ self.run_pass("remove_mutation", foo_script.graph)
FileCheck().check_not("aten::add_").run(foo_script.graph)
self.assertEqual(out_eager, foo_script(torch.tensor(5), True))
@@ -113,8 +116,8 @@ class TestRemoveMutation(JitTestCase):
y = x.add_(2)
return y, li
- self.run_pass('inline', foo.graph)
- self.run_pass('remove_mutation', foo.graph)
+ self.run_pass("inline", foo.graph)
+ self.run_pass("remove_mutation", foo.graph)
FileCheck().check("aten::add_").run(foo.graph)
@torch.jit.script
@@ -126,8 +129,8 @@ class TestRemoveMutation(JitTestCase):
z = x.add_(2)
return z
- self.run_pass('inline', foo.graph)
- self.run_pass('remove_mutation', foo.graph)
+ self.run_pass("inline", foo.graph)
+ self.run_pass("remove_mutation", foo.graph)
FileCheck().check("aten::add_").run(foo.graph)
def test_special_mapped_op(self):
@@ -140,7 +143,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_successful)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check_not("aten::zero_").check_not("aten::fill_").run(graph)
self.assertEqual(test_successful(), fn())
@@ -154,8 +157,8 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(test_successful)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
- FileCheck().check_not('aten::fill_').run(graph)
+ self.run_pass("remove_mutation", graph)
+ FileCheck().check_not("aten::fill_").run(graph)
def normal():
# NOTE: For some unknown reason, the
@@ -167,7 +170,7 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(normal)
graph = fn.graph
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
FileCheck().check_not("normal_").run(graph)
with freeze_rng_state():
out_eager = normal()
@@ -181,10 +184,12 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(successful_remove)
graph = fn.graph
- self.run_pass('loop_unrolling', graph)
- self.run_pass('remove_mutation', graph)
- self.run_pass('constant_propagation', graph)
- FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
+ self.run_pass("loop_unrolling", graph)
+ self.run_pass("remove_mutation", graph)
+ self.run_pass("constant_propagation", graph)
+ FileCheck().check("graph").check_next("Constant").check_next("return").run(
+ graph
+ )
self.assertEqual(successful_remove(), successful_remove())
def intermediary_use():
@@ -196,14 +201,14 @@ class TestRemoveMutation(JitTestCase):
fn = torch.jit.script(intermediary_use)
graph = fn.graph
FileCheck().check("append").run(graph)
- self.run_pass('remove_mutation', graph)
+ self.run_pass("remove_mutation", graph)
# it is possible to remove the append here but don't currently have the logic for it
FileCheck().check_not("append").run(graph)
self.assertEqual(intermediary_use(), fn())
def test_lists_insert(self):
def successful_remove():
- a : List[int] = []
+ a: List[int] = []
a.insert(0, 1)
a.insert(0, 2)
a.insert(-10, 3)
@@ -215,7 +220,9 @@ class TestRemoveMutation(JitTestCase):
graph = fn.graph
torch._C._jit_pass_remove_mutation(graph)
torch._C._jit_pass_constant_propagation(graph)
- FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
+ FileCheck().check("graph").check_next("Constant").check_next("return").run(
+ graph
+ )
self.assertEqual(successful_remove(), fn())
def test_list_indexing_removal(self):
@@ -271,6 +278,7 @@ class TestRemoveMutation(JitTestCase):
def test_common_pytorch_list_ops(self):
for op in ["cat", "stack", "vstack", "hstack", "dstack"]:
+
class OpMod(torch.nn.Module):
def __init__(self, op):
super().__init__()
@@ -285,7 +293,7 @@ class TestRemoveMutation(JitTestCase):
torch_op = getattr(torch, op)
mod = OpMod(torch_op)
mod_script = torch.jit.script(mod)
- self.run_pass('remove_mutation', mod_script.forward.graph)
+ self.run_pass("remove_mutation", mod_script.forward.graph)
FileCheck().check_not("aten::add_").run(mod_script.forward.graph)
self.assertEqual(mod(), mod_script())
@@ -299,7 +307,6 @@ class TestRemoveMutation(JitTestCase):
self.assertEqual(sums, [ten.sum() for ten in result])
-
@torch.jit.script
def test_multiple_uses():
x = torch.tensor([1, 2, 3, 4])
@@ -307,5 +314,5 @@ class TestRemoveMutation(JitTestCase):
y = [x, x]
return torch.cat(y), y
- self.run_pass('remove_mutation', mod_script.forward.graph)
+ self.run_pass("remove_mutation", mod_script.forward.graph)
FileCheck().check("aten::add_").run(test_multiple_uses.graph)
diff --git a/test/jit/test_save_load.py b/test/jit/test_save_load.py
index 0774b2477f..d16f039798 100644
--- a/test/jit/test_save_load.py
+++ b/test/jit/test_save_load.py
@@ -8,12 +8,12 @@ from typing import NamedTuple, Optional
import torch
from torch import Tensor
-from torch.testing._internal.common_utils import TemporaryFileName, skipIfTorchDynamo
+from torch.testing._internal.common_utils import skipIfTorchDynamo, TemporaryFileName
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase, clear_class_registry
+from torch.testing._internal.jit_utils import clear_class_registry, JitTestCase
if __name__ == "__main__":
@@ -439,7 +439,7 @@ class TestSaveLoad(JitTestCase):
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
- a: 'int'
+ a: "int"
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
@@ -608,7 +608,6 @@ class TestSaveLoad(JitTestCase):
self.assertTrue(m_params["bar.bias"].is_cpu)
self.assertTrue(m_loaded_params["bar.bias"].is_cpu)
-
def test_save_load_with_saved_traced_inputs(self):
"""
Check that saving and loading with traced inputs works as expected
@@ -637,14 +636,18 @@ class TestSaveLoad(JitTestCase):
# Validate that with no input specified the traced inputs are stored
traced_module = torch.jit.trace(module, input_tensor)
traced_inputs = list(traced_module.graph.inputs())
- self.assertEqual(traced_module._c._retrieve_traced_inputs()['forward'], [input_tensor])
+ self.assertEqual(
+ traced_module._c._retrieve_traced_inputs()["forward"], [input_tensor]
+ )
with TemporaryFileName() as fname:
path = pathlib.Path(fname)
traced_module.save(path)
loaded_module = torch.jit.load(path, _restore_shapes=True)
loaded_inputs = list(loaded_module.graph.inputs())
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
- self.assertEqual(traced_inputs[1].type().sizes(), loaded_inputs[1].type().sizes())
+ self.assertEqual(
+ traced_inputs[1].type().sizes(), loaded_inputs[1].type().sizes()
+ )
# Validate that if no shapes are requested previous functionality remains
loaded_module = torch.jit.load(path)
loaded_inputs = list(loaded_module.graph.inputs())
@@ -672,7 +675,7 @@ class TestSaveLoad(JitTestCase):
"1000": (
torch.tensor([0]),
torch.tensor([], dtype=torch.int64),
- torch.tensor([])
+ torch.tensor([]),
)
}
traced_inputs, loaded_inputs = get_loaded_inputs(input1)
@@ -683,28 +686,32 @@ class TestSaveLoad(JitTestCase):
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
- torch.tensor([2.0, 3.0])
+ torch.tensor([2.0, 3.0]),
)
}
traced_inputs, loaded_inputs = get_loaded_inputs(input2)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
# Testing list
- input3 = [torch.tensor([0]),
- torch.tensor([1500000, 1500004], dtype=torch.int64),
- torch.tensor([2.0, 3.0])]
+ input3 = [
+ torch.tensor([0]),
+ torch.tensor([1500000, 1500004], dtype=torch.int64),
+ torch.tensor([2.0, 3.0]),
+ ]
traced_inputs, loaded_inputs = get_loaded_inputs(input3)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
# Testing list of dict of list
- input4 = [{
- "1000": (
- torch.tensor([0]),
- torch.tensor([1500000, 1500004], dtype=torch.int64),
- torch.tensor([2.0, 3.0])
- )
- }]
+ input4 = [
+ {
+ "1000": (
+ torch.tensor([0]),
+ torch.tensor([1500000, 1500004], dtype=torch.int64),
+ torch.tensor([2.0, 3.0]),
+ )
+ }
+ ]
traced_inputs, loaded_inputs = get_loaded_inputs(input4)
self.assertEqual(traced_inputs[1].type(), loaded_inputs[1].type())
@@ -715,14 +722,17 @@ class TestSaveLoad(JitTestCase):
Check if the model with string > 4GB can be loaded.
"""
import psutil
+
if psutil.virtual_memory().available < 60 * 1024 * 1024 * 1024:
# Profiled the test execution, and got this number to be safe to run the test
- self.skipTest("Doesn't have enough memory to run test_save_load_large_string_attribute")
+ self.skipTest(
+ "Doesn't have enough memory to run test_save_load_large_string_attribute"
+ )
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
- self.x = "x" * (2 ** 32 + 1)
+ self.x = "x" * (2**32 + 1)
def forward(self, i) -> int:
return len(self.x) + i.numel()
@@ -793,12 +803,8 @@ class TestSaveLoadFlatbuffer(JitTestCase):
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
- self.add_module(
- "second", torch.jit.load(second_saved_module)
- )
- self.add_module(
- "first", torch.jit.load(first_saved_module)
- )
+ self.add_module("second", torch.jit.load(second_saved_module))
+ self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
@@ -846,12 +852,8 @@ class TestSaveLoadFlatbuffer(JitTestCase):
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
- self.add_module(
- "second", torch.jit.load(second_saved_module)
- )
- self.add_module(
- "first", torch.jit.load(first_saved_module)
- )
+ self.add_module("second", torch.jit.load(second_saved_module))
+ self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
@@ -931,12 +933,8 @@ class TestSaveLoadFlatbuffer(JitTestCase):
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
- self.add_module(
- "second", torch.jit.load(second_saved_module)
- )
- self.add_module(
- "first", torch.jit.load(first_saved_module)
- )
+ self.add_module("second", torch.jit.load(second_saved_module))
+ self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
@@ -1035,12 +1033,8 @@ class TestSaveLoadFlatbuffer(JitTestCase):
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
- self.add_module(
- "second", torch.jit.load(second_saved_module)
- )
- self.add_module(
- "first", torch.jit.load(first_saved_module)
- )
+ self.add_module("second", torch.jit.load(second_saved_module))
+ self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x, named_tuple_1 = self.first(x)
@@ -1118,18 +1112,18 @@ class TestSaveLoadFlatbuffer(JitTestCase):
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
- torch.jit.save_jit_module_to_flatbuffer(
- first_script_module, first_saved_module)
+ torch.jit.save_jit_module_to_flatbuffer(first_script_module, first_saved_module)
first_saved_module.seek(0)
- ff_info = torch.jit._serialization.get_flatbuffer_module_info(first_saved_module)
- self.assertEqual(ff_info['bytecode_version'], 9)
- self.assertEqual(ff_info['operator_version'], 1)
- self.assertEqual(ff_info['type_names'], set())
- self.assertEqual(ff_info['opname_to_num_args'], {'aten::linear': 3})
-
- self.assertEqual(len(ff_info['function_names']), 1)
- self.assertTrue(next(iter(ff_info['function_names'])).endswith('forward'))
+ ff_info = torch.jit._serialization.get_flatbuffer_module_info(
+ first_saved_module
+ )
+ self.assertEqual(ff_info["bytecode_version"], 9)
+ self.assertEqual(ff_info["operator_version"], 1)
+ self.assertEqual(ff_info["type_names"], set())
+ self.assertEqual(ff_info["opname_to_num_args"], {"aten::linear": 3})
+ self.assertEqual(len(ff_info["function_names"]), 1)
+ self.assertTrue(next(iter(ff_info["function_names"])).endswith("forward"))
def test_save_load_params_buffers_submodules(self):
"""
@@ -1179,7 +1173,6 @@ class TestSaveLoadFlatbuffer(JitTestCase):
self.assertEqual(m_name, loaded_name)
self.assertEqual(m_buffer, loaded_buffer)
-
def test_save_load_with_extra_files(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
@@ -1194,7 +1187,8 @@ class TestSaveLoadFlatbuffer(JitTestCase):
extra_files = {"abc.json": b"[1,2,3]"}
script_module_io = script_module._save_to_buffer_for_lite_interpreter(
- _extra_files=extra_files, _use_flatbuffer=True)
+ _extra_files=extra_files, _use_flatbuffer=True
+ )
re_extra_files = {}
torch._C._get_model_extra_files_from_buffer(script_module_io, re_extra_files)
diff --git a/test/jit/test_save_load_for_op_version.py b/test/jit/test_save_load_for_op_version.py
index 328f65684a..d9eb62bc42 100644
--- a/test/jit/test_save_load_for_op_version.py
+++ b/test/jit/test_save_load_for_op_version.py
@@ -1,20 +1,21 @@
# Owner(s): ["oncall: jit"]
-from itertools import product as product
import io
import os
import sys
-import hypothesis.strategies as st
-from hypothesis import example, settings, given
+from itertools import product as product
from typing import Union
+import hypothesis.strategies as st
+
import torch
+from hypothesis import example, given, settings
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase
from torch.jit.mobile import _load_for_lite_interpreter
+from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
@@ -23,6 +24,7 @@ if __name__ == "__main__":
"instead."
)
+
class TestSaveLoadForOpVersion(JitTestCase):
# Helper that returns the module after saving and loading
def _save_load_module(self, m):
@@ -53,7 +55,6 @@ class TestSaveLoadForOpVersion(JitTestCase):
node_count = sum(str(n).count(kind) for n in m.graph.nodes())
self.assertEqual(node_count, count)
-
"""
Tests that verify Torchscript remaps aten::div(_) from versions 0-3
to call either aten::true_divide(_), if an input is a float type,
@@ -62,16 +63,21 @@ class TestSaveLoadForOpVersion(JitTestCase):
div behavior has not yet been updated.
"""
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor(self, sample_input):
def historic_div(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide(other)
- return self.divide(other, rounding_mode='trunc')
+ return self.divide(other, rounding_mode="trunc")
# Tensor x Tensor
class MyModule(torch.nn.Module):
@@ -85,7 +91,9 @@ class TestSaveLoadForOpVersion(JitTestCase):
# Loads historic module
try:
v3_mobile_module = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -108,16 +116,21 @@ class TestSaveLoadForOpVersion(JitTestCase):
_helper(v3_mobile_module, historic_div)
_helper(current_mobile_module, torch.div)
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_inplace(self, sample_input):
def historic_div_(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide_(other)
- return self.divide_(other, rounding_mode='trunc')
+ return self.divide_(other, rounding_mode="trunc")
class MyModule(torch.nn.Module):
def forward(self, a, b):
@@ -126,7 +139,9 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -151,16 +166,25 @@ class TestSaveLoadForOpVersion(JitTestCase):
a = torch.tensor((val_a,))
_helper(current_mobile_module, torch.Tensor.div_)
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_out(self, sample_input):
def historic_div_out(self, other, out):
- if self.is_floating_point() or other.is_floating_point() or out.is_floating_point():
+ if (
+ self.is_floating_point()
+ or other.is_floating_point()
+ or out.is_floating_point()
+ ):
return torch.true_divide(self, other, out=out)
- return torch.divide(self, other, out=out, rounding_mode='trunc')
+ return torch.divide(self, other, out=out, rounding_mode="trunc")
class MyModule(torch.nn.Module):
def forward(self, a, b, out):
@@ -168,7 +192,9 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -179,6 +205,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
b = torch.tensor((val_b,))
for out in (torch.empty((1,)), torch.empty((1,), dtype=torch.long)):
+
def _helper(m, fn):
fn_result = None
if fn is torch.div:
@@ -196,9 +223,14 @@ class TestSaveLoadForOpVersion(JitTestCase):
_helper(v3_mobile_module, historic_div_out)
_helper(current_mobile_module, torch.div)
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar(self, sample_input):
@@ -208,7 +240,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
def historic_div_scalar_int(self, other: int):
if self.is_floating_point():
return torch.true_divide(self, other)
- return torch.divide(self, other, rounding_mode='trunc')
+ return torch.divide(self, other, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
@@ -220,9 +252,13 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module_float = _load_for_lite_interpreter(
- pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v2.ptl")
+ pytorch_test_dir
+ + "/jit/fixtures/test_versioned_div_scalar_float_v2.ptl"
+ )
v3_mobile_module_int = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_int_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_int_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -249,9 +285,14 @@ class TestSaveLoadForOpVersion(JitTestCase):
_helper(v3_mobile_module_int, historic_div_scalar_int)
_helper(current_mobile_module_int, torch.div)
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_reciprocal(self, sample_input):
@@ -261,7 +302,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
def historic_div_scalar_int_reciprocal(self, other: int):
if self.is_floating_point():
return other / self
- return torch.divide(other, self, rounding_mode='trunc')
+ return torch.divide(other, self, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
@@ -273,9 +314,13 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module_float = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_float_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_float_v2.ptl"
+ )
v3_mobile_module_int = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_int_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_int_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -311,9 +356,14 @@ class TestSaveLoadForOpVersion(JitTestCase):
_helper(v3_mobile_module_int, current_mobile_module_int)
_helper(current_mobile_module_int, torch.div)
- @settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
+ @settings(
+ max_examples=10, deadline=200000
+ ) # A total of 10 examples will be generated
@given(
- sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
+ sample_input=st.tuples(
+ st.integers(min_value=5, max_value=199),
+ st.floats(min_value=5.0, max_value=199.0),
+ )
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_inplace(self, sample_input):
@@ -324,7 +374,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
if self.is_floating_point():
return self.true_divide_(other)
- return self.divide_(other, rounding_mode='trunc')
+ return self.divide_(other, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
@@ -338,9 +388,13 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module_float = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl"
+ )
v3_mobile_module_int = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
@@ -378,14 +432,16 @@ class TestSaveLoadForOpVersion(JitTestCase):
try:
v3_mobile_module = _load_for_lite_interpreter(
- pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl")
+ pytorch_test_dir
+ + "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl"
+ )
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
def _helper(m, fn):
- vals = (5., 3, 2., 7)
+ vals = (5.0, 3, 2.0, 7)
m_result = m(*vals)
fn_result = fn(*vals)
for mr, hr in zip(m_result, fn_result):
@@ -395,13 +451,16 @@ class TestSaveLoadForOpVersion(JitTestCase):
def test_versioned_linspace(self):
class Module(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
+ def forward(
+ self, a: Union[int, float, complex], b: Union[int, float, complex]
+ ):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
- pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl")
+ pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl"
+ )
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -410,7 +469,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
- for (a, b) in sample_inputs:
+ for a, b in sample_inputs:
(output_with_step, output_without_step) = v7_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
@@ -422,10 +481,17 @@ class TestSaveLoadForOpVersion(JitTestCase):
def test_versioned_linspace_out(self):
class Module(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
+ def forward(
+ self,
+ a: Union[int, float, complex],
+ b: Union[int, float, complex],
+ out: torch.Tensor,
+ ):
return torch.linspace(a, b, steps=100, out=out)
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
+ )
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -433,12 +499,32 @@ class TestSaveLoadForOpVersion(JitTestCase):
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
- (3, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
- (-10, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
- (4.0, 6.0, torch.empty((100,), dtype=torch.float64), torch.empty((100,), dtype=torch.float64)),
- (3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64), torch.empty((100,), dtype=torch.complex64)),
+ (
+ 3,
+ 10,
+ torch.empty((100,), dtype=torch.int64),
+ torch.empty((100,), dtype=torch.int64),
+ ),
+ (
+ -10,
+ 10,
+ torch.empty((100,), dtype=torch.int64),
+ torch.empty((100,), dtype=torch.int64),
+ ),
+ (
+ 4.0,
+ 6.0,
+ torch.empty((100,), dtype=torch.float64),
+ torch.empty((100,), dtype=torch.float64),
+ ),
+ (
+ 3 + 4j,
+ 4 + 5j,
+ torch.empty((100,), dtype=torch.complex64),
+ torch.empty((100,), dtype=torch.complex64),
+ ),
)
- for (start, end, out_for_old, out_for_new) in sample_inputs:
+ for start, end, out_for_old, out_for_new in sample_inputs:
output = v7_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
@@ -448,13 +534,16 @@ class TestSaveLoadForOpVersion(JitTestCase):
def test_versioned_logspace(self):
class Module(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
+ def forward(
+ self, a: Union[int, float, complex], b: Union[int, float, complex]
+ ):
c = torch.logspace(a, b, steps=5)
d = torch.logspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
- pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl")
+ pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl"
+ )
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -463,7 +552,7 @@ class TestSaveLoadForOpVersion(JitTestCase):
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
- for (a, b) in sample_inputs:
+ for a, b in sample_inputs:
(output_with_step, output_without_step) = v8_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
@@ -475,10 +564,17 @@ class TestSaveLoadForOpVersion(JitTestCase):
def test_versioned_logspace_out(self):
class Module(torch.nn.Module):
- def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
+ def forward(
+ self,
+ a: Union[int, float, complex],
+ b: Union[int, float, complex],
+ out: torch.Tensor,
+ ):
return torch.logspace(a, b, steps=100, out=out)
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
+ )
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
@@ -486,12 +582,32 @@ class TestSaveLoadForOpVersion(JitTestCase):
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
- (3, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
- (-10, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
- (4.0, 6.0, torch.empty((100,), dtype=torch.float64), torch.empty((100,), dtype=torch.float64)),
- (3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64), torch.empty((100,), dtype=torch.complex64)),
+ (
+ 3,
+ 10,
+ torch.empty((100,), dtype=torch.int64),
+ torch.empty((100,), dtype=torch.int64),
+ ),
+ (
+ -10,
+ 10,
+ torch.empty((100,), dtype=torch.int64),
+ torch.empty((100,), dtype=torch.int64),
+ ),
+ (
+ 4.0,
+ 6.0,
+ torch.empty((100,), dtype=torch.float64),
+ torch.empty((100,), dtype=torch.float64),
+ ),
+ (
+ 3 + 4j,
+ 4 + 5j,
+ torch.empty((100,), dtype=torch.complex64),
+ torch.empty((100,), dtype=torch.complex64),
+ ),
)
- for (start, end, out_for_old, out_for_new) in sample_inputs:
+ for start, end, out_for_old, out_for_new in sample_inputs:
output = v8_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
diff --git a/test/jit/test_script_profile.py b/test/jit/test_script_profile.py
index 1a1e8dbb34..4b67df2ed1 100644
--- a/test/jit/test_script_profile.py
+++ b/test/jit/test_script_profile.py
@@ -11,10 +11,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class Sequence(nn.Module):
def __init__(self):
@@ -38,8 +41,8 @@ class Sequence(nn.Module):
outputs = torch.cat(outputs, dim=1)
return outputs
-class TestScriptProfile(JitTestCase):
+class TestScriptProfile(JitTestCase):
def test_basic(self):
seq = torch.jit.script(Sequence())
p = torch.jit._ScriptProfile()
@@ -57,6 +60,7 @@ class TestScriptProfile(JitTestCase):
@torch.jit.script
def fn():
_ = seq(torch.rand((10, 100)))
+
fn()
p.disable()
@@ -83,7 +87,7 @@ class TestScriptProfile(JitTestCase):
seq = Sequence()
@torch.jit.script
- def fn(max : int):
+ def fn(max: int):
_ = seq(torch.rand((10, max)))
p = torch.jit._ScriptProfile()
diff --git a/test/jit/test_scriptmod_ann.py b/test/jit/test_scriptmod_ann.py
index 3a9f2fd4d2..5d9856744d 100644
--- a/test/jit/test_scriptmod_ann.py
+++ b/test/jit/test_scriptmod_ann.py
@@ -3,22 +3,24 @@
import os
import sys
import warnings
+from typing import Dict, List, Optional
import torch
-from typing import List, Dict, Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
+class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
# NB: There are no tests for `Tuple` or `NamedTuple` here. In fact,
# reassigning a non-empty Tuple to an attribute previously typed
# as containing an empty Tuple SHOULD fail. See note in `_check.py`
@@ -81,7 +83,6 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
def test_annotated_class_level_annotation_only(self):
class M(torch.nn.Module):
-
x: List[int]
def __init__(self):
@@ -96,10 +97,8 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.checkModule(M(), ([1, 2, 3],))
assert len(w) == 0
-
def test_annotated_class_level_annotation_and_init_annotation(self):
class M(torch.nn.Module):
-
x: List[int]
def __init__(self):
@@ -116,7 +115,6 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
def test_annotated_class_level_jit_annotation(self):
class M(torch.nn.Module):
-
x: List[int]
def __init__(self):
@@ -141,12 +139,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Tried to set nonexistent attribute",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set nonexistent attribute", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_empty_dict(self):
@@ -159,12 +160,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Tried to set nonexistent attribute",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set nonexistent attribute", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_empty_optional(self):
@@ -177,12 +181,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Wrong type for attribute assignment",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Wrong type for attribute assignment", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_with_jit_empty_list(self):
@@ -195,12 +202,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Tried to set nonexistent attribute",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set nonexistent attribute", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_with_jit_empty_dict(self):
@@ -213,12 +223,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Tried to set nonexistent attribute",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set nonexistent attribute", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_with_jit_empty_optional(self):
@@ -231,12 +244,15 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Wrong type for attribute assignment",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Wrong type for attribute assignment", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
def test_annotated_with_torch_jit_import(self):
@@ -251,10 +267,13 @@ class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
self.x = x
return 1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Wrong type for attribute assignment",
- "self.x = x"):
- with self.assertWarnsRegex(UserWarning, "doesn't support "
- "instance-level annotations on "
- "empty non-base types"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Wrong type for attribute assignment", "self.x = x"
+ ):
+ with self.assertWarnsRegex(
+ UserWarning,
+ "doesn't support "
+ "instance-level annotations on "
+ "empty non-base types",
+ ):
torch.jit.script(M())
diff --git a/test/jit/test_slice.py b/test/jit/test_slice.py
index ceb3c3b48e..3f4763ff1a 100644
--- a/test/jit/test_slice.py
+++ b/test/jit/test_slice.py
@@ -2,19 +2,22 @@
import os
import sys
+from typing import List
import torch
-from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
# Tests that Python slice class is supported in TorchScript
class TestSlice(JitTestCase):
@@ -22,7 +25,9 @@ class TestSlice(JitTestCase):
def slice_kwarg(x: List[int]):
return x[slice(1, stop=2)]
- with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Slice does not accept any keyword arguments"
+ ):
torch.jit.script(slice_kwarg)
def test_slice_three_nones(self):
@@ -46,11 +51,13 @@ class TestSlice(JitTestCase):
def test_slice_stop_only(self):
def fn(x: List[int]):
return x[slice(5)]
+
self.checkScript(fn, (range(10),))
def test_slice_stop_only_with_nones(self):
def fn(x: List[int]):
return x[slice(None, 5, None)]
+
self.checkScript(fn, (range(10),))
def test_slice_start_stop(self):
@@ -136,8 +143,8 @@ class TestSlice(JitTestCase):
num_outputs = {len(x.output().type().elements()) for x in slices}
# there should be only one tupleSlice with length of 2
self.assertTrue(num_outputs == {2})
- self.run_pass('lower_all_tuples', tuple_graph)
- self.assertTrue('Tuple' not in str(tuple_graph))
+ self.run_pass("lower_all_tuples", tuple_graph)
+ self.assertTrue("Tuple" not in str(tuple_graph))
def test_module_list_slicing(self):
class Bar(torch.nn.Module):
diff --git a/test/jit/test_sparse.py b/test/jit/test_sparse.py
index 00102ccc1c..6dc9f0b8b6 100644
--- a/test/jit/test_sparse.py
+++ b/test/jit/test_sparse.py
@@ -1,8 +1,9 @@
# Owner(s): ["oncall: jit"]
import io
-import torch
import unittest
+
+import torch
from torch.testing._internal.common_utils import IS_WINDOWS, TEST_MKL
from torch.testing._internal.jit_utils import JitTestCase
@@ -70,9 +71,7 @@ class TestSparse(JitTestCase):
self.a = torch.rand(4, 4).to_sparse_csr()
self.b = torch.rand(4, 4).to_sparse_csr()
-
def forward(self, x):
-
return x.matmul(self.a).matmul(self.b)
x = torch.rand(4, 4).to_sparse_csr()
diff --git a/test/jit/test_string_formatting.py b/test/jit/test_string_formatting.py
index e739de3be2..016c28e739 100644
--- a/test/jit/test_string_formatting.py
+++ b/test/jit/test_string_formatting.py
@@ -2,65 +2,76 @@
import os
import sys
+from typing import List
import torch
-from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-class TestStringFormatting(JitTestCase):
+class TestStringFormatting(JitTestCase):
def test_modulo_operator(self):
def fn(dividend: int, divisor: int) -> int:
return dividend % divisor
+
self.checkScript(fn, (5, 2))
def test_string_interpolation_with_string_placeholder_and_string_variable(self):
def fn(arg1: str):
return "%s in template" % arg1
+
self.checkScript(fn, ("foo",))
- def test_string_interpolation_with_string_placeholder_and_format_string_variable(self):
+ def test_string_interpolation_with_string_placeholder_and_format_string_variable(
+ self,
+ ):
def fn(arg1: str):
return arg1 % "foo"
+
self.checkScript(fn, ("%s in template",))
def test_string_interpolation_with_double_percent_in_string(self):
def fn(arg1: str):
return "%s in template %%" % arg1
+
self.checkScript(fn, ("foo",))
def test_string_interpolation_with_percent_in_string(self):
@torch.jit.script
def fn(arg1: str) -> str:
- return "%s in template %" % arg1 # noqa: F501
+ return "%s in template %" % arg1 # noqa: F501
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Incomplete format specifier",
- "\"%s in template %\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Incomplete format specifier", '"%s in template %" % arg1'
+ ):
fn("foo")
def test_string_interpolation_with_string_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%s in template" % arg1
+
self.checkScript(fn, (1,))
def test_string_interpolation_with_digit_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%d in template" % arg1
+
self.checkScript(fn, (1,))
def test_string_interpolation_with_alternate_digit_placeholder(self):
def fn(arg1: int) -> str:
return "%i in template" % arg1
+
self.checkScript(fn, (1,))
def test_string_interpolation_with_digit_placeholder_and_string_variable(self):
@@ -68,9 +79,11 @@ class TestStringFormatting(JitTestCase):
def fn(arg1: str) -> str:
return "%d in template" % arg1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "%d requires a number for formatting, but got String",
- "\"%d in template\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "%d requires a number for formatting, but got String",
+ '"%d in template" % arg1',
+ ):
fn("1")
def test_string_interpolation_with_exponent_placeholder_and_string_variable(self):
@@ -78,39 +91,51 @@ class TestStringFormatting(JitTestCase):
def fn(arg1: str) -> str:
return "%e in template" % arg1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "%e requires a number for formatting, but got String",
- "\"%e in template\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "%e requires a number for formatting, but got String",
+ '"%e in template" % arg1',
+ ):
fn("1")
- def test_string_interpolation_with_lowercase_exponent_placeholder_and_digit_variable(self):
+ def test_string_interpolation_with_lowercase_exponent_placeholder_and_digit_variable(
+ self,
+ ):
def fn(arg1: int) -> str:
return "%e in template" % arg1
+
self.checkScript(fn, (1,))
- def test_string_interpolation_with_capital_exponent_placeholder_and_digit_variable(self):
+ def test_string_interpolation_with_capital_exponent_placeholder_and_digit_variable(
+ self,
+ ):
def fn(arg1: int) -> str:
return "%E in template" % arg1
+
self.checkScript(fn, (1,))
def test_string_interpolation_with_float_placeholder_and_float_variable(self):
def fn(arg1: float) -> str:
return "%f in template" % arg1
+
self.checkScript(fn, (1.0,))
def test_string_interpolation_with_float_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%f in template" % arg1
+
self.checkScript(fn, (1,))
def test_string_interpolation_with_char_placeholder_and_char_variable(self):
def fn(arg1: str) -> str:
return "%c in template" % arg1
+
self.checkScript(fn, ("a",))
def test_string_interpolation_with_char_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%c in template" % arg1
+
self.checkScript(fn, (97,))
def test_string_interpolation_with_char_placeholder_and_true_string_variable(self):
@@ -118,19 +143,23 @@ class TestStringFormatting(JitTestCase):
def fn(arg1: str) -> str:
return "%c in template" % arg1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "%c requires an int or char for formatting, but got String",
- "\"%c in template\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "%c requires an int or char for formatting, but got String",
+ '"%c in template" % arg1',
+ ):
fn("foo")
def test_string_interpolation_with_multiple_placeholders(self):
def fn(arg1: str, arg2: int, arg3: float) -> str:
return "%s %d %f in template" % (arg1, arg2, arg3)
+
self.checkScript(fn, ("foo", 1, 1))
def test_string_interpolation_with_subscript(self):
def fn(arg1: List[str]) -> str:
return "%s in template" % arg1[0]
+
self.checkScript(fn, (["foo", "bar"],))
def test_string_interpolation_with_too_few_arguments(self):
@@ -138,27 +167,33 @@ class TestStringFormatting(JitTestCase):
def fn(arg1: str) -> str:
return "%s %s in template" % arg1
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Too few arguments for format string",
- "\"%s %s in template\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "Too few arguments for format string",
+ '"%s %s in template" % arg1',
+ ):
fn("foo")
def test_string_interpolation_with_too_many_arguments(self):
@torch.jit.script
def fn(arg1: str, arg2: str) -> str:
- return "%s in template" % (arg1, arg2) # noqa: F507
+ return "%s in template" % (arg1, arg2) # noqa: F507
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "Too many arguments for format string",
- "\"%s in template\" % (arg1, arg2"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "Too many arguments for format string",
+ '"%s in template" % (arg1, arg2',
+ ):
fn("foo", "bar")
def test_string_interpolation_with_unknown_format_specifier(self):
@torch.jit.script
def fn(arg1: str) -> str:
- return "%a in template" % arg1 # noqa: F501
+ return "%a in template" % arg1 # noqa: F501
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "The specifier %a is not supported in TorchScript format strings",
- "\"%a in template\" % arg1"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "The specifier %a is not supported in TorchScript format strings",
+ '"%a in template" % arg1',
+ ):
fn("foo")
diff --git a/test/jit/test_symbolic_shape_analysis.py b/test/jit/test_symbolic_shape_analysis.py
index bd3b6d7eca..913eaea124 100644
--- a/test/jit/test_symbolic_shape_analysis.py
+++ b/test/jit/test_symbolic_shape_analysis.py
@@ -3,29 +3,36 @@
import operator
import unittest
from textwrap import dedent
+from typing import Any, List
import torch
from torch import nn, Tensor
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import sample_inputs_cat_concat
from torch.testing._internal.common_utils import make_tensor
-from torch.testing._internal.jit_utils import JitTestCase, execWrapper
-from typing import List, Any
+from torch.testing._internal.jit_utils import execWrapper, JitTestCase
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
# XXX: still in prototype
class TestSymbolicShapeAnalysis(JitTestCase):
def setUp(self):
super(JitTestCase, self).setUp()
- self.prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
+ self.prev_symbolic_shapes_test_enabled = (
+ torch._C._jit_symbolic_shapes_test_mode_enabled()
+ )
torch._C._jit_set_symbolic_shapes_test_mode(True)
def tearDown(self):
- torch._C._jit_set_symbolic_shapes_test_mode(self.prev_symbolic_shapes_test_enabled)
+ torch._C._jit_set_symbolic_shapes_test_mode(
+ self.prev_symbolic_shapes_test_enabled
+ )
def test_shape_analysis(self):
@torch.jit.script
@@ -115,7 +122,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
def neg_to_one(li):
return [elem if elem >= 0 else -1 for elem in li]
- self.assertEqual(neg_to_one(view.output().type().symbolic_sizes()), [-1, 3, 2, -1])
+ self.assertEqual(
+ neg_to_one(view.output().type().symbolic_sizes()), [-1, 3, 2, -1]
+ )
if_out = next(foo.graph.findNode("prim::If").outputs())
self.assertEqual(neg_to_one(if_out.type().symbolic_sizes()), [-1, 3, -1, -1])
@@ -135,9 +144,7 @@ class TestSymbolicShapeAnalysis(JitTestCase):
y = x.mul_(2)
return y
- unary_ops = [
- mul_inplace
- ]
+ unary_ops = [mul_inplace]
for fn in unary_ops:
# t = torch.jit.trace(fn, torch.rand([4, 4])) # For some reason tracing is erroring out.
t = torch.jit.script(fn)
@@ -202,7 +209,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
inputs[1].setType(inputs[1].type().with_sizes([5, 8, sym1]))
torch._C._jit_pass_propagate_shapes_on_graph(graph)
- self.assertEqual(next(graph.outputs()).type().symbolic_sizes(), [5, 8, sym1])
+ self.assertEqual(
+ next(graph.outputs()).type().symbolic_sizes(), [5, 8, sym1]
+ )
def test_adaptive_avg_pool2d(self):
inps = [
@@ -227,25 +236,105 @@ class TestSymbolicShapeAnalysis(JitTestCase):
self.checkShapeAnalysis(out_size, fn.graph, assert_propagation=True)
def test_conv_deconv(self):
- for inp_shape, weight_shape, bias, stride, padding, output_padding, dilation, groups, mod in [
- ([32, 6, 10], [16, 3, 3], None, 2, 2, 1, 1, 2, torch.nn.functional.conv1d),
- ([32, 16, 10], [16, 3, 3], None, 2, 2, 1, 1, 2, torch.nn.functional.conv_transpose1d),
- ([1, 32, 5, 10], [30, 16, 3, 3], None, [2, 2], [0, 0], 0, 1, 2, torch.nn.functional.conv2d),
- ([1, 30, 5, 10], [30, 16, 3, 3], None, [2, 2], [0, 0], 0, 1, 2, torch.nn.functional.conv_transpose2d),
- ([3, 14, 10, 66, 55], [2, 7, 7, 4, 4], None, 1, 1, 2, 1, 2, torch.nn.functional.conv3d),
- ([3, 2, 10, 66, 55], [2, 7, 7, 4, 4], None, 1, 1, 0, 1, 2, torch.nn.functional.conv_transpose3d)]:
+ for (
+ inp_shape,
+ weight_shape,
+ bias,
+ stride,
+ padding,
+ output_padding,
+ dilation,
+ groups,
+ mod,
+ ) in [
+ ([32, 6, 10], [16, 3, 3], None, 2, 2, 1, 1, 2, torch.nn.functional.conv1d),
+ (
+ [32, 16, 10],
+ [16, 3, 3],
+ None,
+ 2,
+ 2,
+ 1,
+ 1,
+ 2,
+ torch.nn.functional.conv_transpose1d,
+ ),
+ (
+ [1, 32, 5, 10],
+ [30, 16, 3, 3],
+ None,
+ [2, 2],
+ [0, 0],
+ 0,
+ 1,
+ 2,
+ torch.nn.functional.conv2d,
+ ),
+ (
+ [1, 30, 5, 10],
+ [30, 16, 3, 3],
+ None,
+ [2, 2],
+ [0, 0],
+ 0,
+ 1,
+ 2,
+ torch.nn.functional.conv_transpose2d,
+ ),
+ (
+ [3, 14, 10, 66, 55],
+ [2, 7, 7, 4, 4],
+ None,
+ 1,
+ 1,
+ 2,
+ 1,
+ 2,
+ torch.nn.functional.conv3d,
+ ),
+ (
+ [3, 2, 10, 66, 55],
+ [2, 7, 7, 4, 4],
+ None,
+ 1,
+ 1,
+ 0,
+ 1,
+ 2,
+ torch.nn.functional.conv_transpose3d,
+ ),
+ ]:
inp = torch.rand(inp_shape)
weight = torch.rand(weight_shape)
- if mod in [torch.nn.functional.conv1d, torch.nn.functional.conv2d, torch.nn.functional.conv3d]:
+ if mod in [
+ torch.nn.functional.conv1d,
+ torch.nn.functional.conv2d,
+ torch.nn.functional.conv3d,
+ ]:
res = mod(inp, weight, bias, stride, padding, dilation, groups).size()
else:
- res = mod(inp, weight, bias, stride, padding, output_padding, dilation, groups).size()
+ res = mod(
+ inp, weight, bias, stride, padding, output_padding, dilation, groups
+ ).size()
def foo(inp, weight):
- if mod in [torch.nn.functional.conv1d, torch.nn.functional.conv2d, torch.nn.functional.conv3d]:
+ if mod in [
+ torch.nn.functional.conv1d,
+ torch.nn.functional.conv2d,
+ torch.nn.functional.conv3d,
+ ]:
return mod(inp, weight, bias, stride, padding, dilation, groups)
else:
- return mod(inp, weight, bias, stride, padding, output_padding, dilation, groups)
+ return mod(
+ inp,
+ weight,
+ bias,
+ stride,
+ padding,
+ output_padding,
+ dilation,
+ groups,
+ )
fn = torch.jit.trace(foo, (inp, weight))
torch._C._jit_erase_non_input_shape_information(fn.graph)
@@ -280,33 +369,58 @@ class TestSymbolicShapeAnalysis(JitTestCase):
]
for inp in inps:
- funcs_template = dedent('''
+ funcs_template = dedent(
+ """
def func():
return torch.arange({args})
- ''')
+ """
+ )
inp_s = str(inp)[1:-1] # remove tuple parens
funcs_str = funcs_template.format(args=inp_s)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
- self.checkShapeAnalysis(list(cu.func().size()), cu.func.graph, assert_propagation=True, constant_prop=False)
+ self.checkShapeAnalysis(
+ list(cu.func().size()),
+ cu.func.graph,
+ assert_propagation=True,
+ constant_prop=False,
+ )
def test_shape_embedding_bag(self):
# TODO: merge into opinfos, having difficulties there
with torch.no_grad():
+
def make_arg(shape, low=None, high=None):
- return make_tensor(shape, device='cpu', dtype=torch.int64,
- low=low, high=high, requires_grad=False)
+ return make_tensor(
+ shape,
+ device="cpu",
+ dtype=torch.int64,
+ low=low,
+ high=high,
+ requires_grad=False,
+ )
nn_inps = (
- (make_arg((40,), 0, 9), torch.nn.Embedding(20, embedding_dim=64, max_norm=1.0)),
+ (
+ make_arg((40,), 0, 9),
+ torch.nn.Embedding(20, embedding_dim=64, max_norm=1.0),
+ ),
(make_arg((2, 4), 0, 9), torch.nn.Embedding(10, 20, sparse=True)),
(make_arg((0,)), torch.nn.Embedding(0, 0, sparse=True)),
(make_arg((2, 4), 0, 9), torch.nn.Embedding(10, 0, sparse=True)),
(make_arg((4,), 0, 21), torch.nn.Embedding(22, 5, max_norm=1.0)),
- (make_arg((2,), 0, 1), torch.nn.Embedding.from_pretrained(torch.arange(6.).view(2, 3), max_norm=2.,
- norm_type=.5, scale_grad_by_freq=False, sparse=True)),
+ (
+ make_arg((2,), 0, 1),
+ torch.nn.Embedding.from_pretrained(
+ torch.arange(6.0).view(2, 3),
+ max_norm=2.0,
+ norm_type=0.5,
+ scale_grad_by_freq=False,
+ sparse=True,
+ ),
+ ),
)
for inp, module in nn_inps:
@@ -326,14 +440,16 @@ class TestSymbolicShapeAnalysis(JitTestCase):
fn = torch.jit.trace(foo, (inp.detach(),), check_trace=False)
- self.checkShapeAnalysis(out_size, fn.graph, assert_propagation=True, constant_prop=False)
+ self.checkShapeAnalysis(
+ out_size, fn.graph, assert_propagation=True, constant_prop=False
+ )
def test_shape_concat(self):
# TODO: unify with opinfo tests, traces of lists dont preserve sizes in IR
sample_inputs = sample_inputs_cat_concat(None, "cpu", torch.float, False)
class CatMod(nn.Module):
- __constants__ = ['dim']
+ __constants__ = ["dim"]
def __init__(self, dim=0):
super().__init__()
@@ -374,16 +490,23 @@ class TestSymbolicShapeAnalysis(JitTestCase):
# Also, as the return shapes are the input, weight, and bias shape, there is no point
# in a really complicated test
- input = torch.randn((16, 16, 8, 8), dtype=torch.float32, device="cpu", requires_grad=True)
- weight = torch.randn((8, 4, 3, 3), dtype=torch.float32, device="cpu", requires_grad=True)
+ input = torch.randn(
+ (16, 16, 8, 8), dtype=torch.float32, device="cpu", requires_grad=True
+ )
+ weight = torch.randn(
+ (8, 4, 3, 3), dtype=torch.float32, device="cpu", requires_grad=True
+ )
out_grad = torch.randn((16, 8, 8, 8), dtype=torch.float32, device="cpu")
-
@torch.jit.script
def conv_bwd(input, weight, grad):
- bias_sizes = [8, ]
+ bias_sizes = [
+ 8,
+ ]
args = ([1, 1], [1, 1], [1, 1], False, [0, 0], 4, [True, True, True])
- return torch.ops.aten.convolution_backward(grad, input, weight, bias_sizes, *args)
+ return torch.ops.aten.convolution_backward(
+ grad, input, weight, bias_sizes, *args
+ )
self.assert_shape_equal_scripted(conv_bwd, (input, weight, out_grad))
@@ -391,15 +514,19 @@ class TestSymbolicShapeAnalysis(JitTestCase):
def conv_bwd_2(input, weight, grad):
bias_sizes = None
args = ([1, 1], [1, 1], [1, 1], False, [0, 0], 4, [True, True, True])
- return torch.ops.aten.convolution_backward(grad, input, weight, bias_sizes, *args)
- self.assert_shape_equal_scripted(conv_bwd_2, (input, weight, out_grad))
+ return torch.ops.aten.convolution_backward(
+ grad, input, weight, bias_sizes, *args
+ )
+ self.assert_shape_equal_scripted(conv_bwd_2, (input, weight, out_grad))
def test_returning_input_symbolic_shapes(self):
mm = torch.jit.freeze(torch.jit.script(nn.Conv2d(16, 33, 3, stride=2).eval()))
inps = list(mm.graph.inputs())
inps[1].setType(inps[1].type().with_sizes([None, None, None, None]))
- shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
+ shape_compute_graph = (
+ torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
+ )
g = shape_compute_graph.partial_eval_shape_graph()
# to make into a jit function cant have multiple outputs
g.makeMultiOutputIntoTuple()
@@ -412,8 +539,12 @@ class TestSymbolicShapeAnalysis(JitTestCase):
def test_partial_eval_graph_conv(self):
mm = torch.jit.freeze(torch.jit.script(nn.Conv2d(16, 33, 3, stride=2).eval()))
- shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
- output_sizes = mm.graph.findNode("aten::conv2d").output().type().symbolic_sizes()
+ shape_compute_graph = (
+ torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
+ )
+ output_sizes = (
+ mm.graph.findNode("aten::conv2d").output().type().symbolic_sizes()
+ )
# calculating 0, 2 and 3 index
for i in [0, 2, 3]:
self.assertTrue(output_sizes[i] < 0)
@@ -428,7 +559,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
for o, oe in zip(output, output_eager[0:1] + output_eager[2:]):
self.assertEqual(o, oe)
- def checkSymShapeCompute(self, shape_compute_graph, nodes, node_output_sizes, shape_inputs):
+ def checkSymShapeCompute(
+ self, shape_compute_graph, nodes, node_output_sizes, shape_inputs
+ ):
g = shape_compute_graph.partial_eval_shape_graph()
self.assertTrue(len(list(g.inputs())) == len(shape_inputs))
output_sym_map = shape_compute_graph.graph_output_to_symbolic_shape_dim()
@@ -451,27 +584,49 @@ class TestSymbolicShapeAnalysis(JitTestCase):
self.assertEqual(sym_outputs[sym_shape_index], output_shape[i])
def test_partial_eval_stitching(self):
- conv1 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
- max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
- conv2 = nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
-
- mod = torch.jit.freeze(torch.jit.script(nn.Sequential(conv1, max_pool, conv2).eval()))
+ conv1 = torch.nn.Conv2d(
+ 3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
+ )
+ max_pool = torch.nn.MaxPool2d(
+ kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False
+ )
+ conv2 = nn.Conv2d(
+ 64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False
+ )
+
+ mod = torch.jit.freeze(
+ torch.jit.script(nn.Sequential(conv1, max_pool, conv2).eval())
+ )
conv1_output = conv1(torch.rand(1, 3, 224, 224))
max_pool_output = max_pool(conv1_output)
conv2_output = conv2(max_pool_output)
- shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
- nodes = [mod.graph.findNode("aten::max_pool2d")] + list(mod.graph.findAllNodes("aten::conv2d"))
- output_shapes = [max_pool_output.size(), conv1_output.size(), conv2_output.size()]
- self.checkSymShapeCompute(shape_compute_graph, nodes, output_shapes, ([1, 3, 224, 224],))
+ shape_compute_graph = (
+ torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
+ )
+ nodes = [mod.graph.findNode("aten::max_pool2d")] + list(
+ mod.graph.findAllNodes("aten::conv2d")
+ )
+ output_shapes = [
+ max_pool_output.size(),
+ conv1_output.size(),
+ conv2_output.size(),
+ ]
+ self.checkSymShapeCompute(
+ shape_compute_graph, nodes, output_shapes, ([1, 3, 224, 224],)
+ )
def test_refinement_through_graph_stitching(self):
class TwoConvs(torch.nn.Module):
def __init__(self):
super().__init__()
- self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
- self.conv2 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
+ self.conv1 = torch.nn.Conv2d(
+ 3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
+ )
+ self.conv2 = torch.nn.Conv2d(
+ 3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
+ )
def forward(self, x):
a = self.conv1(x)
@@ -495,18 +650,29 @@ class TestSymbolicShapeAnalysis(JitTestCase):
self.assertEqual(out1, out2)
def test_stitching_multi_output(self):
- max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False, return_indices=True)
+ max_pool = torch.nn.MaxPool2d(
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ dilation=1,
+ ceil_mode=False,
+ return_indices=True,
+ )
tensor = torch.rand(1, 3, 224, 224)
mod = torch.jit.trace(max_pool, (tensor,))
mod = torch.jit.freeze(mod.eval())
inp = list(mod.graph.inputs())[1]
inp.setType(inp.type().with_sizes([None, None, None, None]))
output_tensor = list(mod(tensor)[0].size())
- self.run_pass('lower_all_tuples', mod.graph)
- shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
+ self.run_pass("lower_all_tuples", mod.graph)
+ shape_compute_graph = (
+ torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
+ )
max_pool_node = mod.graph.findNode("aten::max_pool2d_with_indices")
outs = list(max_pool_node.outputs())
- self.assertEqual(outs[0].type().symbolic_sizes(), outs[1].type().symbolic_sizes())
+ self.assertEqual(
+ outs[0].type().symbolic_sizes(), outs[1].type().symbolic_sizes()
+ )
g = shape_compute_graph.partial_eval_shape_graph()
# to make into a jit function cant have multiple outputs
g.makeMultiOutputIntoTuple()
@@ -528,7 +694,6 @@ class TestSymbolicShapeAnalysis(JitTestCase):
self.assertEqual(out, [-2, -3])
def test_stitching_concat(self):
-
@torch.jit.script
def foo1(a, b, x, y):
return (a / b) + torch.cat([x, y])
@@ -542,15 +707,25 @@ class TestSymbolicShapeAnalysis(JitTestCase):
for inp in foo.graph.inputs():
inp.setType(inp.type().with_sizes([None, None]))
- shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(foo.graph)
- nodes = [g.findNode("aten::div")] + [g.findNode("aten::add")] + [g.findNode("aten::cat")]
+ shape_compute_graph = (
+ torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(
+ foo.graph
+ )
+ )
+ nodes = (
+ [g.findNode("aten::div")]
+ + [g.findNode("aten::add")]
+ + [g.findNode("aten::cat")]
+ )
inps = [1, 10], [20, 10], [15, 1], [5, 1]
output_shapes = [[20, 10], [20, 10], [20, 1]]
self.checkSymShapeCompute(shape_compute_graph, nodes, output_shapes, inps)
- @unittest.skipIf(not hasattr(torch.jit, "_shapes"), "shape functions not loaded in python")
+ @unittest.skipIf(
+ not hasattr(torch.jit, "_shapes"), "shape functions not loaded in python"
+ )
def test_shape_function_includes(self):
inp_shape = [1, 16, 5, 10]
weight_shape = [33, 16, 3, 3]
@@ -559,7 +734,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
padding = [0, 0]
dilation = [1, 1]
groups = 1
- res = torch.jit._shapes.conv2d(inp_shape, weight_shape, bias, stride, padding, dilation, groups)
+ res = torch.jit._shapes.conv2d(
+ inp_shape, weight_shape, bias, stride, padding, dilation, groups
+ )
self.assertEqual(res, [1, 33, 2, 4])
m1_shape = [10, 20]
@@ -580,8 +757,11 @@ class TestSymbolicShapeAnalysis(JitTestCase):
def wrong_input_types(x, y):
x: List[int] = []
return x
+
with self.assertRaisesRegex(RuntimeError, "Expected supertype of int"):
- torch._C._jit_register_shape_compute_graph_for_node(node, wrong_input_types.graph)
+ torch._C._jit_register_shape_compute_graph_for_node(
+ node, wrong_input_types.graph
+ )
@torch.jit.script
def wrong_output_types(x: List[int], y: List[int]):
@@ -589,7 +769,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
return x
with self.assertRaisesRegex(RuntimeError, "but got graph_type"):
- torch._C._jit_register_shape_compute_graph_for_node(node, wrong_output_types.graph)
+ torch._C._jit_register_shape_compute_graph_for_node(
+ node, wrong_output_types.graph
+ )
@torch.jit.script
def too_many_inputs(x: List[int], y: List[int], z: Any, z2: Any):
@@ -597,7 +779,9 @@ class TestSymbolicShapeAnalysis(JitTestCase):
return x
with self.assertRaises(RuntimeError) as error:
- torch._C._jit_register_shape_compute_graph_for_node(node, too_many_inputs.graph)
+ torch._C._jit_register_shape_compute_graph_for_node(
+ node, too_many_inputs.graph
+ )
self.assertTrue("fewer arguments than schema" in str(error.exception))
@@ -608,9 +792,22 @@ class TestSymbolicShapeAnalysis(JitTestCase):
inputs = list(foo.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes([8, 2]))
- inputs[1].setType(inputs[1].type().with_sizes([8,]))
+ inputs[1].setType(
+ inputs[1]
+ .type()
+ .with_sizes(
+ [
+ 8,
+ ]
+ )
+ )
torch._C._jit_pass_propagate_shapes_on_graph(foo.graph)
- self.assertEqual(next(foo.graph.outputs()).type().sizes(), [8,])
+ self.assertEqual(
+ next(foo.graph.outputs()).type().sizes(),
+ [
+ 8,
+ ],
+ )
def test_squeeze_dims(self):
@torch.jit.script
diff --git a/test/jit/test_tensor_creation_ops.py b/test/jit/test_tensor_creation_ops.py
index b3bab0eb20..3ca2919ade 100644
--- a/test/jit/test_tensor_creation_ops.py
+++ b/test/jit/test_tensor_creation_ops.py
@@ -10,10 +10,13 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestTensorCreationOps(JitTestCase):
"""
@@ -27,7 +30,7 @@ class TestTensorCreationOps(JitTestCase):
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
- self.checkScript(randperm, (3, ))
+ self.checkScript(randperm, (3,))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
@@ -36,7 +39,7 @@ class TestTensorCreationOps(JitTestCase):
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
- self.checkScript(randperm, (3, ))
+ self.checkScript(randperm, (3,))
def test_triu_indices_default_dtype(self):
def triu_indices(rows: int, cols: int):
diff --git a/test/jit/test_tensor_methods.py b/test/jit/test_tensor_methods.py
index c761a3884c..8e78c8684f 100644
--- a/test/jit/test_tensor_methods.py
+++ b/test/jit/test_tensor_methods.py
@@ -8,8 +8,8 @@ import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
+from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
@@ -18,6 +18,7 @@ if __name__ == "__main__":
"instead."
)
+
class TestTensorMethods(JitTestCase):
def test_getitem(self):
def tensor_getitem(inp: torch.Tensor):
@@ -25,7 +26,7 @@ class TestTensorMethods(JitTestCase):
return inp.__getitem__(indices)
inp = torch.rand(3, 4)
- self.checkScript(tensor_getitem, (inp, ))
+ self.checkScript(tensor_getitem, (inp,))
scripted = torch.jit.script(tensor_getitem)
FileCheck().check("aten::index").run(scripted.graph)
@@ -35,5 +36,6 @@ class TestTensorMethods(JitTestCase):
return inp.__getitem__()
with self.assertRaisesRegexWithHighlight(
- RuntimeError, "expected exactly 1 argument", "inp.__getitem__"):
+ RuntimeError, "expected exactly 1 argument", "inp.__getitem__"
+ ):
torch.jit.script(tensor_getitem_invalid)
diff --git a/test/jit/test_torchbind.py b/test/jit/test_torchbind.py
index e7ffb056c1..eaa090455b 100644
--- a/test/jit/test_torchbind.py
+++ b/test/jit/test_torchbind.py
@@ -1,27 +1,27 @@
# Owner(s): ["oncall: jit"]
+import copy
import io
import os
import sys
-import copy
import unittest
+from typing import Optional
import torch
-from typing import Optional
from torch.testing._internal.common_utils import skipIfTorchDynamo
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase
+from torch.testing import FileCheck
from torch.testing._internal.common_utils import (
+ find_library_location,
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
- find_library_location,
)
-from torch.testing import FileCheck
+from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
@@ -30,14 +30,15 @@ if __name__ == "__main__":
"instead."
)
+
@skipIfTorchDynamo("skipping as a precaution")
class TestTorchbind(JitTestCase):
def setUp(self):
if IS_SANDCASTLE or IS_MACOS or IS_FBCODE:
raise unittest.SkipTest("non-portable load_library call used in test")
- lib_file_path = find_library_location('libtorchbind_test.so')
+ lib_file_path = find_library_location("libtorchbind_test.so")
if IS_WINDOWS:
- lib_file_path = find_library_location('torchbind_test.dll')
+ lib_file_path = find_library_location("torchbind_test.dll")
torch.ops.load_library(str(lib_file_path))
def test_torchbind(self):
@@ -50,15 +51,17 @@ class TestTorchbind(JitTestCase):
val = torch.classes._TorchScriptTesting._Foo(5, 3)
val.increment(1)
return val
+
test_equality(f, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "Expected a value of type 'int'"):
val = torch.classes._TorchScriptTesting._Foo(5, 3)
- val.increment('foo')
+ val.increment("foo")
def f():
ss = torch.classes._TorchScriptTesting._StackString(["asdf", "bruh"])
return ss.pop()
+
test_equality(f, lambda x: x)
def f():
@@ -66,6 +69,7 @@ class TestTorchbind(JitTestCase):
ss2 = torch.classes._TorchScriptTesting._StackString(["111", "222"])
ss1.push(ss2.pop())
return ss1.pop() + ss2.pop()
+
test_equality(f, lambda x: x)
# test nn module with prepare_scriptable function
@@ -116,8 +120,11 @@ class TestTorchbind(JitTestCase):
scripted = torch.jit.script(foo)
# Ensure we are creating the object and calling __init__
# rather than calling the __init__wrapper nonsense
- fc = FileCheck().check('prim::CreateObject()')\
- .check('prim::CallMethod[name="__init__"]')
+ fc = (
+ FileCheck()
+ .check("prim::CreateObject()")
+ .check('prim::CallMethod[name="__init__"]')
+ )
fc.run(str(scripted.graph))
out = scripted()
self.assertEqual(out.pop(), "mom")
@@ -167,7 +174,7 @@ class TestTorchbind(JitTestCase):
out, result = scripted()
self.assertEqual(result, 10)
- with self.assertRaisesRegex(RuntimeError, 'can\'t set attribute'):
+ with self.assertRaisesRegex(RuntimeError, "can't set attribute"):
out.y = 5
def foo_not_setter():
@@ -177,9 +184,11 @@ class TestTorchbind(JitTestCase):
# getY method intentionally adds 4 to x
return fooGetterSetter.y
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- 'Tried to set read-only attribute: y',
- 'fooGetterSetter.y = old + 4'):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError,
+ "Tried to set read-only attribute: y",
+ "fooGetterSetter.y = old + 4",
+ ):
scripted = torch.jit.script(foo_not_setter)
def test_torchbind_def_property_readwrite(self):
@@ -196,9 +205,9 @@ class TestTorchbind(JitTestCase):
fooReadWrite.y = 5
return fooReadWrite
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- 'Tried to set read-only attribute: y',
- 'fooReadWrite.y = 5'):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "Tried to set read-only attribute: y", "fooReadWrite.y = 5"
+ ):
scripted = torch.jit.script(foo_readwrite_error)
def test_torchbind_take_instance_as_method_arg(self):
@@ -250,7 +259,9 @@ class TestTorchbind(JitTestCase):
return self.foo_mod.info()
def to_ivalue(self):
- torchbind_model = torch.classes._TorchScriptTesting._Foo(self.foo_mod.info(), 1)
+ torchbind_model = torch.classes._TorchScriptTesting._Foo(
+ self.foo_mod.info(), 1
+ )
return FooBar(torchbind_model)
inst = FooBar(torch.classes._TorchScriptTesting._Foo(2, 3))
@@ -338,7 +349,7 @@ class TestTorchbind(JitTestCase):
self.assertEqual(torch.zeros(4, 4), traced())
def test_torchbind_pass_wrong_type(self):
- with self.assertRaisesRegex(RuntimeError, 'but instead found type \'Tensor\''):
+ with self.assertRaisesRegex(RuntimeError, "but instead found type 'Tensor'"):
torch.ops._TorchScriptTesting.take_an_instance(torch.rand(3, 4))
def test_torchbind_tracing_nested(self):
@@ -368,12 +379,15 @@ class TestTorchbind(JitTestCase):
self.assertEqual(nt_loaded.pop(), exp)
def test_torchbind_instantiate_missing_class(self):
- with self.assertRaisesRegex(RuntimeError, 'Tried to instantiate class \'foo.IDontExist\', but it does not exist!'):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Tried to instantiate class 'foo.IDontExist', but it does not exist!",
+ ):
torch.classes.foo.IDontExist(3, 4, 5)
def test_torchbind_optional_explicit_attr(self):
class TorchBindOptionalExplicitAttr(torch.nn.Module):
- foo : Optional[torch.classes._TorchScriptTesting._StackString]
+ foo: Optional[torch.classes._TorchScriptTesting._StackString]
def __init__(self):
super().__init__()
@@ -384,13 +398,13 @@ class TestTorchbind(JitTestCase):
if foo_obj is not None:
return foo_obj.pop()
else:
- return '<None>'
+ return "<None>"
mod = TorchBindOptionalExplicitAttr()
scripted = torch.jit.script(mod)
def test_torchbind_no_init(self):
- with self.assertRaisesRegex(RuntimeError, 'torch::init'):
+ with self.assertRaisesRegex(RuntimeError, "torch::init"):
x = torch.classes._TorchScriptTesting._NoInit()
def test_profiler_custom_op(self):
@@ -401,17 +415,17 @@ class TestTorchbind(JitTestCase):
found_event = False
for e in prof.function_events:
- if e.name == '_TorchScriptTesting::take_an_instance':
+ if e.name == "_TorchScriptTesting::take_an_instance":
found_event = True
self.assertTrue(found_event)
def test_torchbind_getattr(self):
foo = torch.classes._TorchScriptTesting._StackString(["test"])
- self.assertEqual(None, getattr(foo, 'bar', None))
+ self.assertEqual(None, getattr(foo, "bar", None))
def test_torchbind_attr_exception(self):
foo = torch.classes._TorchScriptTesting._StackString(["test"])
- with self.assertRaisesRegex(AttributeError, 'does not have a field'):
+ with self.assertRaisesRegex(AttributeError, "does not have a field"):
foo.bar
def test_lambda_as_constructor(self):
diff --git a/test/jit/test_tracer.py b/test/jit/test_tracer.py
index 6c381edf48..5da8ab61c5 100644
--- a/test/jit/test_tracer.py
+++ b/test/jit/test_tracer.py
@@ -1,39 +1,55 @@
# Owner(s): ["oncall: jit"]
-import unittest
+import copy
import io
import os
import sys
-import copy
+import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
-from torch.autograd import Variable, Function
+from torch.autograd import Function, Variable
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.common_utils import suppress_warnings, \
- skipIfCompiledWithoutNumpy, enable_profiling_mode_for_profiling_tests, \
- IS_SANDCASTLE, TemporaryFileName, skipIfCrossRef, skipIfTorchDynamo
-from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
- _tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
- RUN_CUDA_MULTI_GPU, make_global
-from torch.testing._internal.common_cuda import with_tf32_off
-from torch import Tensor
+import warnings
# Standard library
from collections import namedtuple
from itertools import chain
from typing import Dict, List, Optional, Tuple
-import warnings
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+from torch import Tensor
+from torch.testing._internal.common_cuda import with_tf32_off
+from torch.testing._internal.common_utils import (
+ enable_profiling_mode_for_profiling_tests,
+ IS_SANDCASTLE,
+ skipIfCompiledWithoutNumpy,
+ skipIfCrossRef,
+ skipIfTorchDynamo,
+ suppress_warnings,
+ TemporaryFileName,
+)
+from torch.testing._internal.jit_utils import (
+ _tmp_donotuse_dont_inline_everything,
+ _trace,
+ enable_cpu_fuser,
+ JitTestCase,
+ make_global,
+ RUN_CUDA,
+ RUN_CUDA_MULTI_GPU,
+)
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
class TestTracer(JitTestCase):
@@ -123,18 +139,23 @@ class TestTracer(JitTestCase):
return x
else:
return torch.zeros_like(x)
+
x = torch.neg(x)
return make_decision(flag, x)
-
decision = TracedInlineDecision()
- torch.jit.trace(decision, (torch.rand(3, 4), torch.tensor([True], dtype=torch.bool)), check_trace=True)
+ torch.jit.trace(
+ decision,
+ (torch.rand(3, 4), torch.tensor([True], dtype=torch.bool)),
+ check_trace=True,
+ )
def test_trace_single_tuple(self):
- x = torch.tensor(2.)
+ x = torch.tensor(2.0)
def f2(x):
return (x,)
+
jit_f2 = torch.jit.trace(f2, x)
assert f2(x) == jit_f2(x) # fails
@@ -149,7 +170,7 @@ class TestTracer(JitTestCase):
trace_model = torch.jit.trace(run_cummax, (example_input, out_1, out_2))
def test_trace_namedtuple(self):
- Point = namedtuple('point', ['x', 'y'])
+ Point = namedtuple("point", ["x", "y"])
def f(p):
if type(p) is tuple:
@@ -172,16 +193,21 @@ class TestTracer(JitTestCase):
test_inputs = (torch.randint(0, 9, (9, 9)), torch.tensor(8))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
- self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
+ self.assertNotWarn(
+ lambda: traced_func(*test_inputs),
+ "Shouldn't throw slicing related warn here",
+ )
self.assertEqual(eager_out, traced_out)
test_inputs = (torch.randint(0, 50, (50, 50)), torch.tensor(12))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
- self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
+ self.assertNotWarn(
+ lambda: traced_func(*test_inputs),
+ "Shouldn't throw slicing related warn here",
+ )
self.assertEqual(eager_out, traced_out)
-
def test_typeas_trace_check(self):
a = torch.tensor([0.4], requires_grad=True)
b = torch.tensor([0.7], requires_grad=True)
@@ -198,7 +224,13 @@ class TestTracer(JitTestCase):
def fn(x, y):
return x[y]
- fn_traced = torch.jit.trace(fn, (x, y,))
+ fn_traced = torch.jit.trace(
+ fn,
+ (
+ x,
+ y,
+ ),
+ )
self.assertEqual(fn(x, y), fn_traced(x, y))
@@ -223,9 +255,9 @@ class TestTracer(JitTestCase):
def test_index_put(self):
ten = torch.zeros(3, 3)
- mask = torch.tensor([[True, True, True],
- [True, False, False],
- [True, True, False]])
+ mask = torch.tensor(
+ [[True, True, True], [True, False, False], [True, True, False]]
+ )
def test_fn(ten, mask):
ten[mask] = torch.ones(6)
@@ -251,14 +283,14 @@ class TestTracer(JitTestCase):
graph = traced.graph_for(x)
# There should be 4 int constants for the right sides of operators, plus one
# for the alpha argument for add and sub
- self.assertTrue(str(traced.graph_for(x)).count(': int = prim::Constant') == 5)
+ self.assertTrue(str(traced.graph_for(x)).count(": int = prim::Constant") == 5)
@suppress_warnings
def test_constant(self):
x = torch.randn(2, 2, requires_grad=True)
def f(x):
- return x.matmul(torch.diag(torch.tensor([2., 2.])))
+ return x.matmul(torch.diag(torch.tensor([2.0, 2.0])))
self.checkTrace(f, (x,), (torch.ones(2, 2, requires_grad=True),))
@@ -276,9 +308,8 @@ class TestTracer(JitTestCase):
scripted = torch.jit.trace(foobar, (), check_trace=True)
-
def test_inplace_transplant(self):
- x = torch.tensor([0.], requires_grad=True)
+ x = torch.tensor([0.0], requires_grad=True)
def fn(x):
y = x.clone()
@@ -287,10 +318,10 @@ class TestTracer(JitTestCase):
return y
g, _ = torch.jit._get_trace_graph(fn, (x,))
- self.run_pass('dce', g)
- FileCheck().check_count("aten::clone", 1, exactly=True) \
- .check_count("aten::add_", 2, exactly=True) \
- .check_next("return").run(str(g))
+ self.run_pass("dce", g)
+ FileCheck().check_count("aten::clone", 1, exactly=True).check_count(
+ "aten::add_", 2, exactly=True
+ ).check_next("return").run(str(g))
self.assertExportImport(g, (x,))
def test_inplace_flags(self):
@@ -313,7 +344,7 @@ class TestTracer(JitTestCase):
def backward(ctx, go):
return go
- x = torch.tensor([0.], requires_grad=True)
+ x = torch.tensor([0.0], requires_grad=True)
def fn(x):
y = RegularFn.apply(x)
@@ -323,13 +354,13 @@ class TestTracer(JitTestCase):
return y
trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True)
- self.run_pass('dce', trace_graph)
+ self.run_pass("dce", trace_graph)
ops = list(trace_graph.nodes())
for op in ops:
- self.assertTrue(op.hasAttribute('inplace'))
+ self.assertTrue(op.hasAttribute("inplace"))
inplace_flags = [False, True, True, False]
for op, is_inplace in zip(ops, inplace_flags):
- self.assertEqual(op.i('inplace'), is_inplace)
+ self.assertEqual(op.i("inplace"), is_inplace)
def test_inplace_check(self):
class MyInplaceFn(Function):
@@ -348,12 +379,13 @@ class TestTracer(JitTestCase):
x = torch.randn(5, 5)
ge = torch.jit.trace(fn, (x,), _force_outplace=True, check_trace=False)
- with self.assertRaisesRegex(RuntimeError, 'inplace MyInplaceFn'):
+ with self.assertRaisesRegex(RuntimeError, "inplace MyInplaceFn"):
ge(x)
def test_force_outplace_check_fill(self):
def f(x):
return torch.empty(x.shape).fill_(7)
+
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
@@ -361,6 +393,7 @@ class TestTracer(JitTestCase):
def test_force_outplace_check_zero(self):
def f(x):
return torch.empty(x.shape).zero_()
+
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
@@ -433,7 +466,7 @@ class TestTracer(JitTestCase):
# Test that a trace of torch.full(x.shape) doesn't store the shape as a constant
def test_trace_full_dynamic_shape(self):
def full_with_shape_like(x):
- return torch.full(x.shape, 2.)
+ return torch.full(x.shape, 2.0)
x = torch.randn(3, 4)
ge = torch.jit.trace(full_with_shape_like, example_inputs=x)
@@ -460,7 +493,7 @@ class TestTracer(JitTestCase):
def slice(x):
results = []
for i in range(4):
- results.append(x[:x.size(0) - i, i:x.size(2), i:3])
+ results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
def slice_select(x):
@@ -489,20 +522,21 @@ class TestTracer(JitTestCase):
def test_trace_slice_with_grad(self):
self.do_trace_slice(True)
-
def test_trace_casts(self):
casts = [
lambda x: x.byte(),
lambda x: x.float(),
lambda x: x.cpu(),
- lambda x: x.to(device='cpu'),
+ lambda x: x.to(device="cpu"),
lambda x: x.to(dtype=torch.int64),
- lambda x: x.to(device='cpu', dtype=torch.float),
- lambda x: x.to(x)
+ lambda x: x.to(device="cpu", dtype=torch.float),
+ lambda x: x.to(x),
]
def assertContainsCast(trace):
- self.assertEqual(sum(n.kind() == 'aten::to' for n in trace.graph.nodes()), 1)
+ self.assertEqual(
+ sum(n.kind() == "aten::to" for n in trace.graph.nodes()), 1
+ )
for cast in casts:
trace = torch.jit.trace(cast, torch.randn(2, 2))
@@ -513,7 +547,9 @@ class TestTracer(JitTestCase):
def to_tensor(x, y):
return x.to(y)
- to_tensor_trace = torch.jit.trace(to_tensor, (torch.randn(2, 2), torch.randn(1, 8)))
+ to_tensor_trace = torch.jit.trace(
+ to_tensor, (torch.randn(2, 2), torch.randn(1, 8))
+ )
assertContainsCast(to_tensor_trace)
x, y = torch.randn(2, 2), torch.randn(1, 10)
self.assertEqual(to_tensor_trace(x, y), to_tensor(x, y))
@@ -524,7 +560,7 @@ class TestTracer(JitTestCase):
def fn(x):
int(x) # Warning 1.
y = x * 1
- if y: # Warning 2.
+ if y: # Warning 2.
pass
q = [x, x * 4]
z = q[y]
@@ -540,12 +576,12 @@ class TestTracer(JitTestCase):
for warn in warns:
self.assertIs(warn.category, torch.jit.TracerWarning)
warns = [str(w.message) for w in warns]
- self.assertIn('a Python integer', warns[0])
- self.assertIn('a Python boolean', warns[1])
- self.assertIn('a Python float', warns[2])
- self.assertIn('a Python list', warns[3])
- self.assertIn('a NumPy array', warns[4])
- self.assertIn('Iterating over', warns[5])
+ self.assertIn("a Python integer", warns[0])
+ self.assertIn("a Python boolean", warns[1])
+ self.assertIn("a Python float", warns[2])
+ self.assertIn("a Python list", warns[3])
+ self.assertIn("a NumPy array", warns[4])
+ self.assertIn("Iterating over", warns[5])
def test_trace_tuple(self):
def fn(x, y):
@@ -555,15 +591,18 @@ class TestTracer(JitTestCase):
traced_fn = torch.jit.trace(fn, (x, y))
self.assertEqual(traced_fn(x, y), fn(x, y))
# should be a tuple nested within another tuple
- FileCheck().check_count("prim::TupleConstruct", 2, exactly=True).check_next("return") \
- .run(str(traced_fn.graph))
+ FileCheck().check_count("prim::TupleConstruct", 2, exactly=True).check_next(
+ "return"
+ ).run(str(traced_fn.graph))
self.assertExportImport(traced_fn.graph, (x, y))
def test_trace_random(self):
def f(mean, std):
return torch.normal(mean, std)
- traced = torch.jit.trace(f, (torch.zeros(2, 3), torch.ones(2, 3)), check_trace=False)
+ traced = torch.jit.trace(
+ f, (torch.zeros(2, 3), torch.ones(2, 3)), check_trace=False
+ )
mean, std = torch.zeros(5, 5), torch.ones(5, 5)
with torch.random.fork_rng(devices=[]):
output = f(mean, std)
@@ -572,19 +611,20 @@ class TestTracer(JitTestCase):
def test_trace_tensor_factory(self):
def run(**kwargs):
- inputs_require_grads = kwargs.pop('inputs_require_grads', True)
+ inputs_require_grads = kwargs.pop("inputs_require_grads", True)
def fn(x):
return x + torch.ones(2, 3, **kwargs)
input_kwargs = kwargs.copy()
- if 'out' in input_kwargs:
- del input_kwargs['out']
+ if "out" in input_kwargs:
+ del input_kwargs["out"]
input = torch.ones(2, 3, **input_kwargs)
self.checkTrace(fn, (input,), inputs_require_grads=inputs_require_grads)
# check we recorded 'ones' and did not just record a constant
tfn = torch.jit.trace(fn, input)
self.assertTrue("ones" in str(tfn.graph))
+
run()
run(dtype=torch.int, inputs_require_grads=False)
run(out=torch.tensor([]))
@@ -598,6 +638,7 @@ class TestTracer(JitTestCase):
x = x.clone()
x[0] = y
return x
+
example = torch.rand(3, 4)
self.checkTrace(stuff, (example, example[0] + 1))
@@ -605,8 +646,17 @@ class TestTracer(JitTestCase):
@unittest.expectedFailure
def test_output_unflatten(self):
"""Check that outputs of traced functions retain the original structure and nesting"""
+
def fn(x):
- return (x * 2, (x ** 2, x + 4, (x + 2,), ), x * 4)
+ return (
+ x * 2,
+ (
+ x**2,
+ x + 4,
+ (x + 2,),
+ ),
+ x * 4,
+ )
self.checkTrace(fn, (torch.randn(2, 2),))
@@ -629,94 +679,103 @@ class TestTracer(JitTestCase):
def test_input_dict_remembers_keys(self):
"""Check that the trace remembers which keys were in a dict input"""
+
class TestModule(torch.nn.Module):
def forward(self, dict_input):
- return dict_input['x']
+ return dict_input["x"]
- input_1 = {'x': torch.tensor(1)}
+ input_1 = {"x": torch.tensor(1)}
m = TestModule()
- m_traced = torch.jit.trace(m, (input_1, ))
+ m_traced = torch.jit.trace(m, (input_1,))
self.assertEqual(m_traced(input_1), torch.tensor(1))
# should work to change the values and not the keys
- input_same_key_different_value = {'x': torch.tensor(2)}
+ input_same_key_different_value = {"x": torch.tensor(2)}
self.assertEqual(m_traced(input_same_key_different_value), torch.tensor(2))
# error to use something that doesn't have `x`
- input_different_key = {'y': torch.tensor(3)}
+ input_different_key = {"y": torch.tensor(3)}
with self.assertRaises(RuntimeError):
m_traced(input_different_key)
# it's okay to have additional elements in the dictionary, so long as 'x' is there
- input_additional_key = {'x': torch.tensor(4), 'y': torch.tensor(3)}
+ input_additional_key = {"x": torch.tensor(4), "y": torch.tensor(3)}
self.assertEqual(m_traced(input_additional_key), torch.tensor(4))
def test_input_dict_insertion_order(self):
"""Check that dictionary access doesn't care about insertion order"""
+
class TestModule(torch.nn.Module):
def forward(self, dict_input):
- return dict_input['x'], dict_input['y']
+ return dict_input["x"], dict_input["y"]
+
input_x_then_y = {}
- input_x_then_y['x'] = torch.tensor(1)
- input_x_then_y['y'] = torch.tensor(2)
+ input_x_then_y["x"] = torch.tensor(1)
+ input_x_then_y["y"] = torch.tensor(2)
m = TestModule()
- m_traced = torch.jit.trace(m, (input_x_then_y, ))
+ m_traced = torch.jit.trace(m, (input_x_then_y,))
self.assertEqual(m_traced(input_x_then_y), (torch.tensor(1), torch.tensor(2)))
input_y_then_x = {}
- input_y_then_x['y'] = torch.tensor(4)
- input_y_then_x['x'] = torch.tensor(3)
+ input_y_then_x["y"] = torch.tensor(4)
+ input_y_then_x["x"] = torch.tensor(3)
self.assertEqual(m_traced(input_y_then_x), (torch.tensor(3), torch.tensor(4)))
def test_input_dict_recursive(self):
class TestModule(torch.nn.Module):
def forward(self, dict_input):
- return dict_input['x'][1]
+ return dict_input["x"][1]
- input_1 = {'x': {1: torch.tensor(1)}}
+ input_1 = {"x": {1: torch.tensor(1)}}
m = TestModule()
- m_traced = torch.jit.trace(m, (input_1, ))
+ m_traced = torch.jit.trace(m, (input_1,))
- input_2 = {'x': {1: torch.tensor(2)}}
+ input_2 = {"x": {1: torch.tensor(2)}}
self.assertEqual(m_traced(input_2), torch.tensor(2))
def test_input_dict_checkTrace_mut(self):
def test(d):
- d['x'].tanh_()
- return d['x']
- inputs = {'x': torch.rand(3, 4), 'y': torch.rand(3, 4)}
+ d["x"].tanh_()
+ return d["x"]
+
+ inputs = {"x": torch.rand(3, 4), "y": torch.rand(3, 4)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_dict_unify(self):
def test(d):
- return d['int'], d['float']
- inputs = {'int': torch.ones((2, 2), dtype=torch.int32),
- 'float': torch.ones((2, 2), dtype=torch.float32)}
+ return d["int"], d["float"]
+
+ inputs = {
+ "int": torch.ones((2, 2), dtype=torch.int32),
+ "float": torch.ones((2, 2), dtype=torch.float32),
+ }
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_tuple_of_dicts(self):
def test(t):
d = t[0]
- return d['x']['y']
- inputs = {'x': {'y': torch.rand(2, 3)}}
+ return d["x"]["y"]
+
+ inputs = {"x": {"y": torch.rand(2, 3)}}
self.checkTrace(test, ((inputs, inputs),), allow_unused=True)
def test_input_dict_of_dicts(self):
def test(d):
- return d['x']['y']
- nested_input = {'y': torch.rand(2, 3)}
- unified_nested = {'y': torch.rand(3, 2)}
- inputs = {'x': nested_input, 'force_unify': unified_nested}
+ return d["x"]["y"]
+
+ nested_input = {"y": torch.rand(2, 3)}
+ unified_nested = {"y": torch.rand(3, 2)}
+ inputs = {"x": nested_input, "force_unify": unified_nested}
self.checkTrace(test, (inputs,), allow_unused=True)
def test_input_dict_of_lists(self):
def test(d):
- return d['x'][0]
+ return d["x"][0]
- inputs = {'x': [torch.rand(3, 2)]}
+ inputs = {"x": [torch.rand(3, 2)]}
self.checkTrace(test, (inputs,))
def test_input_list_toplevel_flatten(self):
@@ -730,32 +789,38 @@ class TestTracer(JitTestCase):
class Test(torch.nn.Module):
def forward(self, t1, t2):
return torch.add(t1, t2)
+
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
torch.jit.trace(Test(), inputs)
def test_input_list_of_tuples(self):
def test(l):
return l[0][0]
+
inputs = [(torch.ones(2, 2),)]
self.checkTrace(test, (inputs,))
def test_input_dict_empty_list(self):
def test(d):
pass
+
inputs = {1: []}
- with self.assertRaisesRegex(RuntimeError, 'List trace'):
+ with self.assertRaisesRegex(RuntimeError, "List trace"):
self.checkTrace(test, (inputs,))
def test_input_list_mixed_type(self):
def test(d):
pass
+
inputs = [torch.rand(2, 3), (torch.ones(2), torch.ones(2))]
- with self.assertRaisesRegex(RuntimeError, 'consistent'):
+ with self.assertRaisesRegex(RuntimeError, "consistent"):
self.checkTrace(test, (inputs,))
def test_conv(self):
x = torch.ones(20, 16, 50, 40)
- g, outputs, inputs = torch.jit._get_trace_graph(nn.Conv2d(16, 13, 3, bias=False), x, return_inputs=True)
+ g, outputs, inputs = torch.jit._get_trace_graph(
+ nn.Conv2d(16, 13, 3, bias=False), x, return_inputs=True
+ )
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
@@ -773,7 +838,8 @@ class TestTracer(JitTestCase):
def test_nested_inplace(self):
x = torch.randn(2, 2)
g, outputs, inputs = torch.jit._get_trace_graph(
- lambda x: F.threshold(x, 0, 0, inplace=True), (x, ), return_inputs=True)
+ lambda x: F.threshold(x, 0, 0, inplace=True), (x,), return_inputs=True
+ )
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
FileCheck().check("threshold_").run(str(g))
@@ -807,8 +873,8 @@ class TestTracer(JitTestCase):
out.copy_(x)
return out
- g, outputs, inputs = torch.jit._get_trace_graph(f, (x, ), return_inputs=True)
- self.run_pass('dce', g)
+ g, outputs, inputs = torch.jit._get_trace_graph(f, (x,), return_inputs=True)
+ self.run_pass("dce", g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
@@ -822,8 +888,9 @@ class TestTracer(JitTestCase):
return out
g, outputs, inputs = torch.jit._get_trace_graph(
- f, (x, ), return_inputs=True, _force_outplace=True)
- self.run_pass('dce', g)
+ f, (x,), return_inputs=True, _force_outplace=True
+ )
+ self.run_pass("dce", g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
@@ -840,7 +907,7 @@ class TestTracer(JitTestCase):
m = MyModule()
g, _ = torch.jit._get_trace_graph(m, (torch.randn(2, 2),))
- self.run_pass('dce', g)
+ self.run_pass("dce", g)
self.assertEqual(len(list(g.inputs())), 2)
FileCheck().check("mul").check("add").run(str(g))
@@ -853,52 +920,73 @@ class TestTracer(JitTestCase):
class MyModel(torch.nn.Module):
def forward(self, scores, bbox_deltas, im_info, anchors):
a, b = torch.ops._caffe2.GenerateProposals(
- (scores), (bbox_deltas), (im_info), (anchors),
- 2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
+ (scores),
+ (bbox_deltas),
+ (im_info),
+ (anchors),
+ 2.0,
+ 6000,
+ 300,
+ 0.7,
+ 16,
+ True,
+ -90,
+ 90,
+ 1.0,
+ True,
)
return a, b
+
model = MyModel()
A = 4
H = 10
W = 8
img_count = 3
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
- bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
- dtype=torch.float32)
+ bbox_deltas = torch.linspace(
+ 0, 10, steps=img_count * 4 * A * H * W, dtype=torch.float32
+ )
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
traced_model = torch.jit.trace(model, inputs)
self.assertEqual(traced_model(*inputs), model(*inputs))
- self.assertExportImportModule(traced_model, (scores, bbox_deltas, im_info, anchors))
+ self.assertExportImportModule(
+ traced_model, (scores, bbox_deltas, im_info, anchors)
+ )
def run_ge_tests(self, optimize, use_cuda):
-
with enable_profiling_mode_for_profiling_tests():
with torch.jit.optimized_execution(optimize):
+
def rand(*args):
t = torch.rand(*args).float()
if use_cuda:
t = t.cuda()
return t
- self.checkTrace(lambda a, b: a * b + b,
- [rand(1), rand(1)], [rand(2, 3), rand(2, 3)])
+
+ self.checkTrace(
+ lambda a, b: a * b + b, [rand(1), rand(1)], [rand(2, 3), rand(2, 3)]
+ )
# trivial identity
self.checkTrace(lambda a, b: (b, a), [rand(1), rand(1)])
def foo(a):
t = a * a
return t * t, 4 * t
+
self.checkTrace(foo, [rand(1)])
# unused input
self.checkTrace(
- lambda a, b: a * a, [rand(1), rand(1)], allow_unused=True)
+ lambda a, b: a * a, [rand(1), rand(1)], allow_unused=True
+ )
# test outputs that do not get used in grad
self.checkTrace(foo, [rand(1)], drop=1)
# test autograd fallback
- self.checkTrace(lambda a, b: a * b /
- (a - 2 * b) + b, [rand(1), rand(1)])
+ self.checkTrace(
+ lambda a, b: a * b / (a - 2 * b) + b, [rand(1), rand(1)]
+ )
def test_ge_unoptimized(self):
self.run_ge_tests(False, False)
@@ -917,22 +1005,24 @@ class TestTracer(JitTestCase):
def test_ge(self):
def foo(a, b):
return a * b / (a - b) + b
+
V = Variable
a, b = V(torch.rand(1)), V(torch.rand(1))
ge = torch.jit.trace(foo, (a, b))
a, b = V(torch.rand(1), requires_grad=True), V(
- torch.rand(1), requires_grad=True)
- r, = ge(a, b)
+ torch.rand(1), requires_grad=True
+ )
+ (r,) = ge(a, b)
da, db = torch.autograd.grad(r + 3, [a, b], create_graph=True)
- l2 = (da * db + db * db)
+ l2 = da * db + db * db
g2result = torch.autograd.grad(l2, [da, db])
r = foo(a, b)
da2, db2 = torch.autograd.grad(r + 3, [a, b], create_graph=True)
self.assertEqual(da, da2)
self.assertEqual(db, db2)
- l3 = (da2 * db2 + db2 * db2)
+ l3 = da2 * db2 + db2 * db2
g2result2 = torch.autograd.grad(l3, [da2, db2])
self.assertEqual(g2result, g2result2)
@@ -953,8 +1043,10 @@ class TestTracer(JitTestCase):
def __init__(self, num_features, num_layers):
super().__init__()
self.num_layers = num_layers
- layers = [[nn.Linear(num_features, num_features), nn.Sigmoid()]
- for _ in range(num_layers)]
+ layers = [
+ [nn.Linear(num_features, num_features), nn.Sigmoid()]
+ for _ in range(num_layers)
+ ]
self.submodule = nn.Sequential(*chain(*layers))
def forward(self, x):
@@ -977,7 +1069,9 @@ class TestTracer(JitTestCase):
with self.assertRaises(AttributeError):
linear_submodule.in_features
linear_submodule.weight
- linear_submodule.weight = nn.Parameter(torch.randn(linear_submodule.weight.shape))
+ linear_submodule.weight = nn.Parameter(
+ torch.randn(linear_submodule.weight.shape)
+ )
with self.assertRaises(RuntimeError):
del linear_submodule.weight
@@ -992,9 +1086,9 @@ class TestTracer(JitTestCase):
traced_model.cpu()
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
- traced_model.to('cuda')
+ traced_model.to("cuda")
cuda_out = traced_model(x.float().cuda())
- traced_model.to('cpu')
+ traced_model.to("cpu")
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.to(torch.get_default_dtype())
@@ -1022,15 +1116,23 @@ class TestTracer(JitTestCase):
return h
a = Model()
- b = torch.jit.trace(a, example_inputs=(torch.ones([1], device=torch.device("cuda")),))
+ b = torch.jit.trace(
+ a, example_inputs=(torch.ones([1], device=torch.device("cuda")),)
+ )
FileCheck().check_not("device").run(b.code)
def test_export_no_reorder(self):
def func(a, b):
return a * b / (a - 2 * b) + b
- recording_inputs = [torch.tensor([0.55619788169860839844], dtype=torch.float32, requires_grad=True),
- torch.tensor([0.25947844982147216797], dtype=torch.float32, requires_grad=True)]
+ recording_inputs = [
+ torch.tensor(
+ [0.55619788169860839844], dtype=torch.float32, requires_grad=True
+ ),
+ torch.tensor(
+ [0.25947844982147216797], dtype=torch.float32, requires_grad=True
+ ),
+ ]
ge1 = torch.jit.trace(func, recording_inputs)
ge2 = self.getExportImportCopy(ge1)
@@ -1057,7 +1159,7 @@ class TestTracer(JitTestCase):
def fn(x):
return MyFn.apply(x + 2) + 3
- x = torch.tensor([1., 2., 3.])
+ x = torch.tensor([1.0, 2.0, 3.0])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
@@ -1076,7 +1178,8 @@ class TestTracer(JitTestCase):
def fn(x):
a, b = MyFn.apply(x + 2)
return a + b + 3
- x = torch.tensor([1., 2., 3.])
+
+ x = torch.tensor([1.0, 2.0, 3.0])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
@@ -1150,21 +1253,20 @@ class TestTracer(JitTestCase):
self.foo = Foo()
def forward(self, a, b):
- return self.foo({'a': a, 'b': b})['a']
+ return self.foo({"a": a, "b": b})["a"]
class Foo(torch.nn.Module):
def forward(self, x):
- return {'a': x['a'] * x['b']}
+ return {"a": x["a"] * x["b"]}
x = (torch.rand(3), torch.rand(3))
model = Bar()
self.checkTrace(model, x)
def test_trace_dict_output(self):
-
class TraceDictStrTensor(torch.nn.Module):
def forward(self, a, b):
- return {'a': a, 'b': b}
+ return {"a": a, "b": b}
class TraceDictTensorTensor(torch.nn.Module):
def forward(self, a, b):
@@ -1175,15 +1277,20 @@ class TestTracer(JitTestCase):
torch.jit.trace(TraceDictStrTensor(), x)
traced_dict_str_mod = torch.jit.trace(TraceDictStrTensor(), x, strict=False)
- self.assertEqual(traced_dict_str_mod(*x), {'a': x[0], 'b': x[1]})
+ self.assertEqual(traced_dict_str_mod(*x), {"a": x[0], "b": x[1]})
- traced_dict_tensor_mod = torch.jit.trace(TraceDictTensorTensor(), x, strict=False)
+ traced_dict_tensor_mod = torch.jit.trace(
+ TraceDictTensorTensor(), x, strict=False
+ )
self.assertEqual(traced_dict_tensor_mod(*x), {x[0]: x[1], x[1]: x[0]})
def test_trace_with_tensor_list_output(self):
def f():
return [torch.zeros(1), torch.zeros(5)]
- with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
+
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning, "cause the trace to be incorrect"
+ ):
torch.jit.trace(f, [])
traced_non_strict_f = torch.jit.trace(f, [], strict=False)
self.assertEqual(traced_non_strict_f(), f())
@@ -1191,13 +1298,19 @@ class TestTracer(JitTestCase):
def test_trace_with_number_list_output(self):
def f():
return [1, 5]
- with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
+
+ with self.assertRaisesRegex(
+ RuntimeError, r"Only tensors.+can be output from traced functions"
+ ):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_tensor_list_output(self):
def f():
return [[torch.zeros(1)], [torch.zeros(5)]]
- with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
+
+ with self.assertRaisesRegex(
+ RuntimeError, r"Only tensors.+can be output from traced functions"
+ ):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_strided_tensor_output(self):
@@ -1253,15 +1366,28 @@ class TestTracer(JitTestCase):
return torch.tensor([x.size()[0]])
self.assertEqual(
- tensor_size(torch.rand(15,)),
- torch.tensor([15])
+ tensor_size(
+ torch.rand(
+ 15,
+ )
+ ),
+ torch.tensor([15]),
)
- traced_tensor_size = torch.jit.trace(tensor_size, torch.rand(7,))
+ traced_tensor_size = torch.jit.trace(
+ tensor_size,
+ torch.rand(
+ 7,
+ ),
+ )
self.assertEqual(
- traced_tensor_size(torch.rand(15,)),
- torch.tensor([15])
+ traced_tensor_size(
+ torch.rand(
+ 15,
+ )
+ ),
+ torch.tensor([15]),
)
@torch.jit.script
@@ -1271,8 +1397,13 @@ class TestTracer(JitTestCase):
def foo(x):
return use_device(x)
- traced_tensor_size = torch.jit.trace(foo, torch.rand(7,))
- self.run_pass('inline', traced_tensor_size.graph)
+ traced_tensor_size = torch.jit.trace(
+ foo,
+ torch.rand(
+ 7,
+ ),
+ )
+ self.run_pass("inline", traced_tensor_size.graph)
FileCheck().check("prim::device").run(traced_tensor_size.graph)
def test_trace_save(self):
@@ -1336,10 +1467,12 @@ class TestTracer(JitTestCase):
f = Foo()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
- expected_names = ['__getstate__', '__setstate__']
+ expected_names = ["__getstate__", "__setstate__"]
def check(mod):
- self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
+ self.assertTrue(
+ all(name in mod._c._method_names() for name in expected_names)
+ )
check(traced)
@@ -1375,10 +1508,12 @@ class TestTracer(JitTestCase):
f = Wrapper()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
- expected_names = ['__getstate__', '__setstate__']
+ expected_names = ["__getstate__", "__setstate__"]
def check(mod):
- self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
+ self.assertTrue(
+ all(name in mod._c._method_names() for name in expected_names)
+ )
check(traced.foo)
@@ -1412,7 +1547,7 @@ class TestTracer(JitTestCase):
f = WrapperExports()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
- expected_names = ['addOne']
+ expected_names = ["addOne"]
check(traced)
def test_trace_autograd_function(self):
@@ -1425,12 +1560,10 @@ class TestTracer(JitTestCase):
def backward(ctx, grad_output):
return torch.neg(grad_output)
-
class TracedModule(torch.nn.Module):
def forward(self, x):
return torch.relu(TestFunc.apply(x))
-
class Wrapper(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -1469,9 +1602,11 @@ class TestTracer(JitTestCase):
print(traced.graph)
# Expected output schema of the custom autograd.Function.
- schema = '(Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu), '\
- 'Double(3, 2, strides=[2, 1], requires_grad=0, device=cpu)) '\
- '= ^Foo'
+ schema = (
+ "(Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu), "
+ "Double(3, 2, strides=[2, 1], requires_grad=0, device=cpu)) "
+ "= ^Foo"
+ )
# See if expected schema exists.
FileCheck().check(schema).run(traced.graph)
@@ -1490,7 +1625,9 @@ class TestTracer(JitTestCase):
def forward(self, x):
y = self.conv(x)
- w = nn.functional.interpolate(y, mode='bilinear', align_corners=False, scale_factor=3)
+ w = nn.functional.interpolate(
+ y, mode="bilinear", align_corners=False, scale_factor=3
+ )
return w
f = test()
@@ -1554,10 +1691,9 @@ class TestTracer(JitTestCase):
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: neg op from the traced function should be properly inlined
- FileCheck().check("aten::mm") \
- .check('name="traced_fn"') \
- .check_next("prim::CallFunction") \
- .run(str(tm.graph))
+ FileCheck().check("aten::mm").check('name="traced_fn"').check_next(
+ "prim::CallFunction"
+ ).run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_module_from_traced_module(self):
@@ -1580,7 +1716,9 @@ class TestTracer(JitTestCase):
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
- FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
+ FileCheck().check("aten::mm").check("prim::CallMethod").check_same(
+ "forward"
+ ).check("aten::add").run(str(tm.graph))
def test_index_put_trace_with_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(1, 1, 1, 4))
@@ -1588,7 +1726,9 @@ class TestTracer(JitTestCase):
target[indices] = rhs
return target
- FileCheck().check("aten::view").check("index_put_").run(str(test_index_put.graph))
+ FileCheck().check("aten::view").check("index_put_").run(
+ str(test_index_put.graph)
+ )
def test_index_put_trace_without_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(4))
@@ -1596,12 +1736,17 @@ class TestTracer(JitTestCase):
target[indices] = rhs
return target
- FileCheck().check_not("aten::view").check("index_put_").run(str(test_index_put.graph))
+ FileCheck().check_not("aten::view").check("index_put_").run(
+ str(test_index_put.graph)
+ )
@suppress_warnings
def test_trace_checker_dot_data(self):
- with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Tensor-valued Constant nodes differed in value '
- r'across invocations'):
+ with self.assertRaisesRegex(
+ torch.jit.TracingCheckError,
+ r"Tensor-valued Constant nodes differed in value " r"across invocations",
+ ):
+
@_trace(torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def foo(x):
y = x.data
@@ -1614,18 +1759,25 @@ class TestTracer(JitTestCase):
x = torch.neg(x)
return x
- with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
+ with self.assertRaisesRegex(
+ torch.jit.TracingCheckError, r"Graphs differed across invocations!"
+ ):
torch.jit.trace(foo, torch.randn(3, 4), check_inputs=[torch.randn(4, 4)])
@suppress_warnings
def test_trace_checker_memoization(self):
- with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
+ with self.assertRaisesRegex(
+ torch.jit.TracingCheckError, r"Graphs differed across invocations!"
+ ):
+
def foo(x):
- if not hasattr(foo, 'cache'):
+ if not hasattr(foo, "cache"):
foo.cache = torch.neg(x)
return x + foo.cache
- traced = torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
+ traced = torch.jit.trace(
+ foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)]
+ )
def test_trace_checker_slice_lhs(self):
def foo(x):
@@ -1640,34 +1792,45 @@ class TestTracer(JitTestCase):
x.view(-1).add_(-x.view(-1))
return x
- with self.assertWarnsRegex(torch.jit.TracerWarning,
- 'Output nr 1. of the traced function does not match the '
- 'corresponding output of the Python function'):
- torch.jit.trace(foo,
- torch.rand(3, 4),
- check_inputs=[torch.rand(5, 6)],
- _force_outplace=True)
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning,
+ "Output nr 1. of the traced function does not match the "
+ "corresponding output of the Python function",
+ ):
+ torch.jit.trace(
+ foo,
+ torch.rand(3, 4),
+ check_inputs=[torch.rand(5, 6)],
+ _force_outplace=True,
+ )
def test_lhs_index_fails(self):
def foo(x):
x[0, 1] = 4
return x
- with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning, "cause the trace to be incorrect"
+ ):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
def test_lhs_index_trivial(self):
def foo(y, x):
y[...] = x
return y
- self.checkTrace(foo, (torch.rand(3, 4), torch.rand(4)), inputs_require_grads=False)
+
+ self.checkTrace(
+ foo, (torch.rand(3, 4), torch.rand(4)), inputs_require_grads=False
+ )
def test_inplace_warn(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
- with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning, "cause the trace to be incorrect"
+ ):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
@suppress_warnings
@@ -1675,13 +1838,16 @@ class TestTracer(JitTestCase):
def foo(x):
return torch.dropout(x, p=0.5, train=True)
- with self.assertWarnsRegex(torch.jit.TracerWarning,
- 'Output nr 1. of the traced function does not match the '
- 'corresponding output of the Python function'):
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning,
+ "Output nr 1. of the traced function does not match the "
+ "corresponding output of the Python function",
+ ):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
- with self.assertWarnsRegex(torch.jit.TracerWarning,
- 'Trace had nondeterministic nodes'):
+ with self.assertWarnsRegex(
+ torch.jit.TracerWarning, "Trace had nondeterministic nodes"
+ ):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
def test_trace_checker_dropout_notrain(self):
@@ -1736,10 +1902,7 @@ class TestTracer(JitTestCase):
class MyMod(torch.nn.Module):
def __init__(self):
super().__init__()
- self.ml = torch.nn.ModuleList([
- MySubmod(),
- MySubmod()
- ])
+ self.ml = torch.nn.ModuleList([MySubmod(), MySubmod()])
def forward(self, x):
for mod in self.ml:
@@ -1760,9 +1923,7 @@ class TestTracer(JitTestCase):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
- self.ml = torch.nn.ModuleList([
- MySubmod() for i in range(2)
- ])
+ self.ml = torch.nn.ModuleList([MySubmod() for i in range(2)])
def forward(self, x):
futs = []
@@ -1813,9 +1974,9 @@ class TestTracer(JitTestCase):
traced = torch.jit.trace(foo, (torch.rand(3, 3), torch.rand(3, 3)))
graph_str = str(traced.graph)
- assert 'bar' in graph_str
- assert 'baz' in graph_str
- assert 'quick_brown_fox' in graph_str
+ assert "bar" in graph_str
+ assert "baz" in graph_str
+ assert "quick_brown_fox" in graph_str
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tracing_hooks(self):
@@ -1904,7 +2065,10 @@ class TestTracer(JitTestCase):
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
- inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
+ inputs = {
+ "forward": example_forward_input,
+ "weighted_kernel_sum": example_weight,
+ }
n = Net()
module = torch.jit.trace_module(n, inputs)
@@ -1912,14 +2076,26 @@ class TestTracer(JitTestCase):
for i in range(2):
check_weight = torch.rand(1, 1, 3, 3)
check_forward_input = torch.rand(1, 1, 3, 3)
- check_inputs.append({'forward' : check_forward_input, 'weighted_kernel_sum' : check_weight})
- module = torch.jit.trace_module(n, inputs, check_trace=True, check_inputs=check_inputs)
+ check_inputs.append(
+ {"forward": check_forward_input, "weighted_kernel_sum": check_weight}
+ )
+ module = torch.jit.trace_module(
+ n, inputs, check_trace=True, check_inputs=check_inputs
+ )
self.assertTrue(module._c._has_method("forward"))
self.assertTrue(module._c._has_method("weighted_kernel_sum"))
module = torch.jit.trace(n.forward, example_forward_input)
- module = torch.jit.trace(n.forward, example_forward_input, check_trace=True, check_inputs=[example_forward_input])
- with self.assertRaisesRegex(AttributeError, "trace doesn't support compiling individual module's functions"):
+ module = torch.jit.trace(
+ n.forward,
+ example_forward_input,
+ check_trace=True,
+ check_inputs=[example_forward_input],
+ )
+ with self.assertRaisesRegex(
+ AttributeError,
+ "trace doesn't support compiling individual module's functions",
+ ):
module = torch.jit.trace(n.weighted_kernel_sum, inputs)
def test_tensor_with_grad_as_constant(self):
@@ -1928,13 +2104,19 @@ class TestTracer(JitTestCase):
def f(x):
return x + param
- with self.assertRaisesRegex(RuntimeError, "Cannot insert a Tensor that requires grad as a constant"):
+
+ with self.assertRaisesRegex(
+ RuntimeError, "Cannot insert a Tensor that requires grad as a constant"
+ ):
torch.jit.trace(f, x)
def test_non_tensor_tracing(self):
def f(x):
return x + param # noqa: F821
- with self.assertRaisesRegex(RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"):
+
+ with self.assertRaisesRegex(
+ RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"
+ ):
torch.jit.trace(f, (1,))
def test_trace_skip_none_submodule(self):
@@ -1948,7 +2130,7 @@ class TestTracer(JitTestCase):
return inputs
m = TestModule()
- tm = torch.jit.trace(m, torch.tensor(1.))
+ tm = torch.jit.trace(m, torch.tensor(1.0))
self.assertFalse(hasattr(tm, "submod"))
def test_trace_with_conditional_property(self):
@@ -1957,7 +2139,7 @@ class TestTracer(JitTestCase):
super().__init__()
if attr is not None:
self._attr = attr
- self.attr_name = '_attr'
+ self.attr_name = "_attr"
@property
def attr(self):
@@ -1974,16 +2156,16 @@ class TestTracer(JitTestCase):
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1), torch.ones(1)))
- FileCheck().check("first_arg").check_next("second_arg") \
- .run(str(traced_fn.graph))
+ FileCheck().check("first_arg").check_next("second_arg").run(
+ str(traced_fn.graph)
+ )
def test_trace_partial_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg=1) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1),))
- FileCheck().check("first_arg").check_not("second_arg") \
- .run(str(traced_fn.graph))
+ FileCheck().check("first_arg").check_not("second_arg").run(str(traced_fn.graph))
def test_trace_module_argument_names_captured(self):
class TestModule(nn.Module):
@@ -1999,13 +2181,15 @@ class TestTracer(JitTestCase):
# Explicitly tracing module's forward method
traced_module_forward = torch.jit.trace(m.forward, example_input)
- FileCheck().check("first_arg").check_next("second_arg") \
- .run(str(traced_module_forward.graph))
+ FileCheck().check("first_arg").check_next("second_arg").run(
+ str(traced_module_forward.graph)
+ )
# Tracing module's directly
traced_module = torch.jit.trace(m, example_input)
- FileCheck().check("first_arg").check_next("second_arg") \
- .run(str(traced_module.graph))
+ FileCheck().check("first_arg").check_next("second_arg").run(
+ str(traced_module.graph)
+ )
def test_trace_checking_with_deprecated_name(self):
class MyClass(torch.nn.Module):
@@ -2014,12 +2198,18 @@ class TestTracer(JitTestCase):
def forward(self, x, y, **deprecated_arguments):
if len(deprecated_arguments) > 0:
- raise RuntimeError(f"Got unexpected arguments: {deprecated_arguments}")
+ raise RuntimeError(
+ f"Got unexpected arguments: {deprecated_arguments}"
+ )
return x + y
model = MyClass()
m2 = torch.jit.trace(model, (torch.ones(1), torch.ones(1)))
- m3 = torch.jit.trace(model, example_kwarg_inputs={'x': torch.ones(1), "y": torch.ones(1)}, strict=False)
+ m3 = torch.jit.trace(
+ model,
+ example_kwarg_inputs={"x": torch.ones(1), "y": torch.ones(1)},
+ strict=False,
+ )
def test_trace_with_tuple_tensor(self):
class MyClass(torch.nn.Module):
@@ -2030,11 +2220,21 @@ class TestTracer(JitTestCase):
return x + y[0] + y[1]
model = MyClass()
- traced_model = torch.jit.trace(model, (torch.ones(1), (torch.ones(1), torch.ones(1))))
- input_dict = {"x": torch.tensor([2, 3]), "y": (torch.tensor([5, 6]), torch.tensor([7, 8]))}
+ traced_model = torch.jit.trace(
+ model, (torch.ones(1), (torch.ones(1), torch.ones(1)))
+ )
+ input_dict = {
+ "x": torch.tensor([2, 3]),
+ "y": (torch.tensor([5, 6]), torch.tensor([7, 8])),
+ }
self.assertEqual(model(**input_dict), traced_model(**input_dict))
- traced_model = torch.jit.trace(model, example_kwarg_inputs={
- 'x': torch.ones(1), "y": (torch.ones(1), torch.ones(1))})
+ traced_model = torch.jit.trace(
+ model,
+ example_kwarg_inputs={
+ "x": torch.ones(1),
+ "y": (torch.ones(1), torch.ones(1)),
+ },
+ )
self.assertEqual(model(**input_dict), traced_model(**input_dict))
def test_trace_no_duplicated_lifted_input_output(self):
@@ -2098,7 +2298,9 @@ class TestMixTracingScripting(JitTestCase):
self.checkTrace(func2, ((a, b),))
@torch.jit.script
- def func3(x: Tensor, method: str = 'bilinear', align_corners: bool = True) -> Tensor:
+ def func3(
+ x: Tensor, method: str = "bilinear", align_corners: bool = True
+ ) -> Tensor:
hw = x.shape[2:4]
return F.interpolate(x, hw, mode=method, align_corners=align_corners)
@@ -2115,7 +2317,7 @@ class TestMixTracingScripting(JitTestCase):
def test_trace_mixed_by_script_with_dict_output(self):
@torch.jit.script
def return_dict(input: torch.Tensor) -> Dict[str, torch.Tensor]:
- return {"foo" : input + 1}
+ return {"foo": input + 1}
class TraceModule(torch.nn.Module):
def forward(self, input):
@@ -2235,18 +2437,18 @@ class TestMixTracingScripting(JitTestCase):
# for each of these checks, check that *BOTH* the underlying
# _C.ScriptModule object has the expected method/param, as well as the
# Python object that wraps it.
- self.assertTrue(traced.ssm._c._has_method('foo'))
- self.assertTrue(hasattr(traced.ssm, 'foo'))
+ self.assertTrue(traced.ssm._c._has_method("foo"))
+ self.assertTrue(hasattr(traced.ssm, "foo"))
imported = self.getExportImportCopy(traced)
- self.assertTrue(imported.ssm._c._has_method('foo'))
- self.assertTrue(hasattr(imported.ssm, 'foo'))
+ self.assertTrue(imported.ssm._c._has_method("foo"))
+ self.assertTrue(hasattr(imported.ssm, "foo"))
- self.assertTrue(imported.ssm.asm._c._has_method('bar'))
- self.assertTrue(hasattr(imported.ssm.asm, 'bar'))
+ self.assertTrue(imported.ssm.asm._c._has_method("bar"))
+ self.assertTrue(hasattr(imported.ssm.asm, "bar"))
- self.assertTrue(hasattr(imported.ssm.asm, 'param'))
+ self.assertTrue(hasattr(imported.ssm.asm, "param"))
def test_trace_parameter(self):
class Param(nn.Module):
@@ -2303,7 +2505,9 @@ class TestMixTracingScripting(JitTestCase):
return scripted_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
- FileCheck().check("aten::mm").check("name=\"scripted_fn\"").check("prim::CallFunction").run(str(tm.graph))
+ FileCheck().check("aten::mm").check('name="scripted_fn"').check(
+ "prim::CallFunction"
+ ).run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_module_from_traced_module(self):
@@ -2327,7 +2531,9 @@ class TestMixTracingScripting(JitTestCase):
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
- FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
+ FileCheck().check("aten::mm").check("prim::CallMethod").check_same(
+ "forward"
+ ).check("aten::add").run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_script_fn(self):
@@ -2339,10 +2545,16 @@ class TestMixTracingScripting(JitTestCase):
def script_fn(x):
return traced_fn(x) + 1
- FileCheck().check("prim::CallFunction").check("aten::add").run(str(script_fn.graph))
+ FileCheck().check("prim::CallFunction").check("aten::add").run(
+ str(script_fn.graph)
+ )
def test_call_traced_mod_from_script_fn(self):
- with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Cannot call a ScriptModule that is not a submodule of the caller",
+ ):
+
class TracedModule(torch.nn.Module):
def forward(self, x):
return torch.mm(x, torch.zeros(4, 3))
@@ -2369,7 +2581,9 @@ class TestMixTracingScripting(JitTestCase):
return traced_fn(torch.mm(x, self.param))
sm = ScriptMod()
- FileCheck().check("aten::mm").check("prim::CallFunction").run(str(sm.forward.graph))
+ FileCheck().check("aten::mm").check("prim::CallFunction").run(
+ str(sm.forward.graph)
+ )
@_tmp_donotuse_dont_inline_everything
def test_call_tracing_mod_from_script_module(self):
@@ -2437,31 +2651,41 @@ class TestMixTracingScripting(JitTestCase):
return self.b(feature_map)
- input_map = {"1" : [torch.rand(2, 2), torch.rand(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
+ input_map = {
+ "1": [torch.rand(2, 2), torch.rand(2, 2)],
+ "3": [torch.rand(2, 2), torch.rand(2, 2)],
+ }
model = testA()
traced_model = torch.jit.trace(model, input_map)
- new_input_map = {"1" : [torch.rand(2, 2), torch.randn(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
+ new_input_map = {
+ "1": [torch.rand(2, 2), torch.randn(2, 2)],
+ "3": [torch.rand(2, 2), torch.rand(2, 2)],
+ }
self.assertEqual(model(new_input_map), traced_model(new_input_map))
def test_trace_script_returning_complex_dict(self):
"""Tracing over a script function returning a dictionary should work.
The dictionary can should be able to contain other containers (like a tuple) recursively.
"""
+
class ReturnsDict(torch.nn.Module):
def forward(
- self, id_score_list: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
+ self,
+ id_score_list: Dict[
+ str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
+ ],
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
# do some random operations and then return a dict of the same structure
v = id_score_list["1000"]
idx_keys = v[1] - 1500000
weights = v[2]
- result = {
- "1000": (v[0], idx_keys, weights)
- }
+ result = {"1000": (v[0], idx_keys, weights)}
return result
class ChecksDict(torch.nn.Module):
- def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
+ def forward(
+ self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
+ ):
v = input["1000"]
return v[1] + 1
@@ -2471,7 +2695,9 @@ class TestMixTracingScripting(JitTestCase):
self.checks_dict = checks_dict
self.returns_dict = returns_dict
- def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
+ def forward(
+ self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
+ ):
foo = self.returns_dict(input)
return self.checks_dict(foo)
@@ -2479,7 +2705,7 @@ class TestMixTracingScripting(JitTestCase):
"1000": (
torch.tensor([0]),
torch.tensor([], dtype=torch.int64),
- torch.tensor([])
+ torch.tensor([]),
)
}
@@ -2487,7 +2713,7 @@ class TestMixTracingScripting(JitTestCase):
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
- torch.tensor([2.0, 3.0])
+ torch.tensor([2.0, 3.0]),
)
}
@@ -2502,15 +2728,14 @@ class TestMixTracingScripting(JitTestCase):
"""Tracing over a module returning a dictionary whose values are tuples of tensors
should work.
"""
+
class ReturnsDict(torch.nn.Module):
def forward(
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
x = 2 * k
y = 3 * v
- result = {
- "imakey": (x, y)
- }
+ result = {"imakey": (x, y)}
return result
class ReturnsBadDict(torch.nn.Module):
@@ -2518,22 +2743,24 @@ class TestMixTracingScripting(JitTestCase):
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, float]]:
x = 2 * k
- result = {
- "imakey": (x, 1)
- }
+ result = {"imakey": (x, 1)}
return result
mod = ReturnsDict()
- traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
+ traced_module = torch.jit.trace(
+ mod, [torch.ones(1), torch.ones(1)], strict=False
+ )
out = traced_module(torch.ones(1), torch.ones(1))
- expected = {
- "imakey": (torch.tensor([2.]), torch.tensor([3.]))
- }
+ expected = {"imakey": (torch.tensor([2.0]), torch.tensor([3.0]))}
self.assertEqual(out, expected)
- with self.assertRaisesRegex(RuntimeError, "cannot be understood by the tracer, only outputs matching"):
+ with self.assertRaisesRegex(
+ RuntimeError, "cannot be understood by the tracer, only outputs matching"
+ ):
mod = ReturnsBadDict()
- traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
+ traced_module = torch.jit.trace(
+ mod, [torch.ones(1), torch.ones(1)], strict=False
+ )
def test_trace_linear(self):
m = torch.nn.Linear(20, 20)
@@ -2545,7 +2772,9 @@ class TestMixTracingScripting(JitTestCase):
def test_traced_module_implements_interface(self):
@torch.jit.interface
class TestModuleInterface(nn.Module):
- def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
+ def forward(
+ self, first_arg: torch.Tensor, second_arg: torch.Tensor
+ ) -> torch.Tensor:
pass
make_global(TestModuleInterface)
@@ -2555,7 +2784,9 @@ class TestMixTracingScripting(JitTestCase):
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
- def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
+ def forward(
+ self, first_arg: torch.Tensor, second_arg: torch.Tensor
+ ) -> torch.Tensor:
return self.conv(first_arg) + second_arg
def fn_takes_interface(x: TestModuleInterface):
@@ -2566,7 +2797,6 @@ class TestMixTracingScripting(JitTestCase):
self.checkScript(fn_takes_interface, (scripted_test_module,))
def test_traced_module_contains_scripted_interface_types(self):
-
class LeafModule(torch.nn.Module):
def __init__(self):
super().__init__()
diff --git a/test/jit/test_type_sharing.py b/test/jit/test_type_sharing.py
index c2b84fc4e5..55f78258ec 100644
--- a/test/jit/test_type_sharing.py
+++ b/test/jit/test_type_sharing.py
@@ -1,21 +1,24 @@
# Owner(s): ["oncall: jit"]
+import io
import os
import sys
-import io
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import suppress_warnings
+from torch.testing._internal.jit_utils import JitTestCase
+
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
class TestTypeSharing(JitTestCase):
def assertSameType(self, m1, m2):
@@ -42,6 +45,7 @@ class TestTypeSharing(JitTestCase):
def forward(self, x):
return x
+
a = torch.rand(2, 3)
b = torch.rand(2, 3)
c = torch.rand(2, 3)
@@ -53,6 +57,7 @@ class TestTypeSharing(JitTestCase):
"""
Types should be shared even if attribute values differ
"""
+
class M(torch.nn.Module):
def __init__(self, a, b, c):
super().__init__()
@@ -62,6 +67,7 @@ class TestTypeSharing(JitTestCase):
def forward(self, x):
return x
+
a = torch.rand(2, 3)
b = torch.rand(2, 3)
c = torch.rand(2, 3)
@@ -73,6 +79,7 @@ class TestTypeSharing(JitTestCase):
"""
Types should be shared for identical constant values, and different for different constant values
"""
+
class M(torch.nn.Module):
__constants__ = ["const"]
@@ -111,6 +118,7 @@ class TestTypeSharing(JitTestCase):
"""
If submodules differ, the types should differ.
"""
+
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super().__init__()
@@ -137,6 +145,7 @@ class TestTypeSharing(JitTestCase):
The same module with an `foo` as a parameter vs. attribute shouldn't
share types
"""
+
class M(torch.nn.Module):
def __init__(self, foo):
super().__init__()
@@ -156,6 +165,7 @@ class TestTypeSharing(JitTestCase):
Even if everything about the module is the same, different originating
classes should prevent type sharing.
"""
+
class A(torch.nn.Module):
__constants__ = ["const"]
@@ -192,6 +202,7 @@ class TestTypeSharing(JitTestCase):
"""
Mutating the value of an attribute should not change type sharing
"""
+
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super().__init__()
@@ -214,6 +225,7 @@ class TestTypeSharing(JitTestCase):
"""
Assigning a new (python-only) attribute should not change type sharing
"""
+
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super().__init__()
@@ -244,6 +256,7 @@ class TestTypeSharing(JitTestCase):
"""
Attributes whose type cannot be inferred should fail cleanly with nice hints
"""
+
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -255,15 +268,16 @@ class TestTypeSharing(JitTestCase):
return self.foo
m = M()
- with self.assertRaisesRegexWithHighlight(RuntimeError,
- "failed to convert Python type",
- "self.foo"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, "failed to convert Python type", "self.foo"
+ ):
torch.jit.script(m)
def test_script_function_attribute_different(self):
"""
Different functions passed in should lead to different types
"""
+
@torch.jit.script
def fn1(x):
return x + x
@@ -317,6 +331,7 @@ class TestTypeSharing(JitTestCase):
"""
Same functions passed in should lead to same types
"""
+
@torch.jit.script
def fn(x):
return x + x
@@ -338,6 +353,7 @@ class TestTypeSharing(JitTestCase):
"""
Different functions passed in should lead to different types
"""
+
def fn1(x):
return x + x
@@ -361,6 +377,7 @@ class TestTypeSharing(JitTestCase):
"""
Same functions passed in should lead to same types
"""
+
def fn(x):
return x + x
@@ -383,6 +400,7 @@ class TestTypeSharing(JitTestCase):
Since we can't guarantee that methods are the same between different
trace runs, tracing must always generate a unique type.
"""
+
class M(torch.nn.Module):
def forward(self, x, y):
if x.sum() > y.sum():
@@ -429,8 +447,8 @@ class TestTypeSharing(JitTestCase):
def forward(self, x):
return self.traced(x)
- a = M((torch.ones(1), ))
- b = M((torch.zeros(1), ))
+ a = M((torch.ones(1),))
+ b = M((torch.zeros(1),))
self.assertDifferentType(a, b)
def test_loaded_modules_work(self):
@@ -465,7 +483,6 @@ class TestTypeSharing(JitTestCase):
buffer.seek(0)
return torch.jit.script(Wrapper(torch.jit.load(buffer)))
-
a = package(AB())
a()
b = package(A())
@@ -476,6 +493,7 @@ class TestTypeSharing(JitTestCase):
We should be able to differentiate between two ModuleDict instances
that have different keys but the same value types.
"""
+
class A(torch.nn.Module):
def forward(self, x):
return x
@@ -488,9 +506,9 @@ class TestTypeSharing(JitTestCase):
def forward(self, x):
return x
- a = Foo({'foo': A()})
- b = Foo({'bar': A()})
- c = Foo({'bar': A()})
+ a = Foo({"foo": A()})
+ b = Foo({"bar": A()})
+ c = Foo({"bar": A()})
self.assertDifferentType(a, b)
self.assertSameType(b, c)
@@ -500,13 +518,16 @@ class TestTypeSharing(JitTestCase):
subclass that defines methods in its __init__ are not
shared.
"""
+
class A(torch.jit.ScriptModule):
def __init__(self, val):
super().__init__()
- self.define(f"""
+ self.define(
+ f"""
def forward(self) -> int:
return {val}
- """)
+ """
+ )
one = A(1)
two = A(2)
@@ -518,6 +539,7 @@ class TestTypeSharing(JitTestCase):
"""
Test that type sharing can be disabled.
"""
+
class A(torch.nn.Module):
def __init__(self, sub):
super().__init__()
@@ -555,6 +577,7 @@ class TestTypeSharing(JitTestCase):
Test that types are shared if the exclusion of their
ignored attributes makes them equal.
"""
+
class A(torch.nn.Module):
__jit_ignored_attributes__ = ["a"]
@@ -579,6 +602,7 @@ class TestTypeSharing(JitTestCase):
Test that types are not shared if the exclusion of their
ignored attributes makes them not equal.
"""
+
class A(torch.nn.Module):
__jit_ignored_attributes__ = ["a"]
diff --git a/test/jit/test_types.py b/test/jit/test_types.py
index 8374afc542..e331aad12e 100644
--- a/test/jit/test_types.py
+++ b/test/jit/test_types.py
@@ -1,26 +1,31 @@
# Owner(s): ["oncall: jit"]
-from collections import namedtuple
-from typing import Dict, Iterator, List, Optional, Tuple
-
-from torch.testing._internal.jit_utils import JitTestCase
-from torch.testing import FileCheck
-from textwrap import dedent
-from jit.test_module_interface import TestModuleInterface # noqa: F401
import inspect
import os
import sys
+from collections import namedtuple
+from textwrap import dedent
+from typing import Dict, Iterator, List, Optional, Tuple
+
import torch
import torch.testing._internal.jit_utils
+from torch.testing import FileCheck
+
+from torch.testing._internal.jit_utils import JitTestCase
+
+from jit.test_module_interface import TestModuleInterface # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestTypesAndAnnotation(JitTestCase):
def test_pep585_type(self):
@@ -30,7 +35,7 @@ class TestTypesAndAnnotation(JitTestCase):
xl: list[tuple[torch.Tensor]] = []
xd: dict[str, int] = {}
xl.append((x,))
- xd['foo'] = 1
+ xd["foo"] = 1
return xl.pop(), xd
self.checkScript(fn, [torch.randn(2, 2)])
@@ -47,7 +52,7 @@ class TestTypesAndAnnotation(JitTestCase):
self.checkScript(fn, [torch.randn(2, 2)])
- GG = namedtuple('GG', ['f', 'g'])
+ GG = namedtuple("GG", ["f", "g"])
class Foo(torch.nn.Module):
@torch.jit.ignore
@@ -77,13 +82,17 @@ class TestTypesAndAnnotation(JitTestCase):
return x + 10
class M(torch.nn.Module):
- def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:
+ def forward(
+ self, in_batch: Dict[str, Optional[torch.Tensor]]
+ ) -> torch.Tensor:
self.dropout_modality(in_batch)
fn(in_batch)
return torch.tensor(1)
@torch.jit.ignore
- def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:
+ def dropout_modality(
+ self, in_batch: Dict[str, Optional[torch.Tensor]]
+ ) -> Dict[str, Optional[torch.Tensor]]:
return in_batch
sm = torch.jit.script(M())
@@ -111,16 +120,17 @@ class TestTypesAndAnnotation(JitTestCase):
return my_arg + 10
with self.assertRaisesRegex(RuntimeError, "argument 'my_arg'"):
+
@torch.jit.script
def other_fn(x):
- return fn('2')
+ return fn("2")
def test_type_annotate_py3(self):
def fn():
- a : List[int] = []
- b : torch.Tensor = torch.ones(2, 2)
- c : Optional[torch.Tensor] = None
- d : Optional[torch.Tensor] = torch.ones(3, 4)
+ a: List[int] = []
+ b: torch.Tensor = torch.ones(2, 2)
+ c: Optional[torch.Tensor] = None
+ d: Optional[torch.Tensor] = torch.ones(3, 4)
for _ in range(10):
a.append(4)
c = torch.ones(2, 2)
@@ -130,66 +140,88 @@ class TestTypesAndAnnotation(JitTestCase):
self.checkScript(fn, ())
def wrong_type():
- wrong : List[int] = [0.5]
+ wrong: List[int] = [0.5]
return wrong
- with self.assertRaisesRegex(RuntimeError, "List type annotation"
- r" `List\[int\]` did not match the "
- "types of the given list elements"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "List type annotation"
+ r" `List\[int\]` did not match the "
+ "types of the given list elements",
+ ):
torch.jit.script(wrong_type)
def test_optional_no_element_type_annotation(self):
"""
Test that using an optional with no contained types produces an error.
"""
+
def fn_with_comment(x: torch.Tensor) -> Optional:
return (x, x)
def annotated_fn(x: torch.Tensor) -> Optional:
return (x, x)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Optional without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Optional without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Optional without a contained type"
+ ):
torch.jit.script(fn_with_comment)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Optional without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Optional without a contained type"
+ ):
torch.jit.script(annotated_fn)
def test_tuple_no_element_type_annotation(self):
"""
Test that using a tuple with no contained types produces an error.
"""
+
def fn_with_comment(x: torch.Tensor) -> Tuple:
return (x, x)
def annotated_fn(x: torch.Tensor) -> Tuple:
return (x, x)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Tuple without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(fn_with_comment)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Tuple without a contained type"
+ ):
cu = torch.jit.CompilationUnit()
cu.define(dedent(inspect.getsource(annotated_fn)))
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Tuple without a contained type"
+ ):
torch.jit.script(fn_with_comment)
- with self.assertRaisesRegex(RuntimeError, r"Attempted to use Tuple without a contained type"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Attempted to use Tuple without a contained type"
+ ):
torch.jit.script(annotated_fn)
def test_ignoring_module_attributes(self):
"""
Test that module attributes can be ignored.
"""
+
class Sub(torch.nn.Module):
def forward(self, a: int) -> int:
return sum([a])
@@ -229,10 +261,11 @@ class TestTypesAndAnnotation(JitTestCase):
mod = ModuleUsesIgnoredAttr(1)
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"attribute was ignored during compilation", "self.sub"):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"attribute was ignored during compilation", "self.sub"
+ ):
scripted_mod = torch.jit.script(mod)
-
def test_ignoring_fn_with_nonscriptable_types(self):
class CFX:
def __init__(self, a: List[torch.Tensor]) -> None:
@@ -246,7 +279,9 @@ class TestTypesAndAnnotation(JitTestCase):
return iter(self.a)
@torch.jit._drop
- def __fx_create_arg__(self, tracer: torch.fx.Tracer) -> torch.fx.node.Argument:
+ def __fx_create_arg__(
+ self, tracer: torch.fx.Tracer
+ ) -> torch.fx.node.Argument:
# torch.fx classes are not scriptable
return tracer.create_node(
"call_function",
@@ -257,35 +292,36 @@ class TestTypesAndAnnotation(JitTestCase):
torch.jit.script(CFX)
-
def test_unimported_type_resolution(self):
# verify fallback from the python resolver to the c++ resolver
- @ torch.jit.script
+ @torch.jit.script
def fn(x):
# type: (number) -> number
return x + 1
- FileCheck().check('Scalar').run(fn.graph)
+ FileCheck().check("Scalar").run(fn.graph)
def test_parser_bug(self):
def parser_bug(o: Optional[torch.Tensor]):
pass
def test_mismatched_annotation(self):
- with self.assertRaisesRegex(RuntimeError, 'annotated with type'):
+ with self.assertRaisesRegex(RuntimeError, "annotated with type"):
+
@torch.jit.script
def foo():
- x : str = 4
+ x: str = 4
return x
def test_reannotate(self):
- with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):
+ with self.assertRaisesRegex(RuntimeError, "declare and annotate"):
+
@torch.jit.script
def foo():
x = 5
if 1 == 1:
- x : Optional[int] = 7
+ x: Optional[int] = 7
def test_annotate_outside_init(self):
msg = "annotations on instance attributes must be declared in __init__"
@@ -293,6 +329,7 @@ class TestTypesAndAnnotation(JitTestCase):
# Simple case
with self.assertRaisesRegexWithHighlight(ValueError, msg, highlight):
+
@torch.jit.script
class BadModule:
def __init__(self, x: int):
@@ -303,6 +340,7 @@ class TestTypesAndAnnotation(JitTestCase):
# Type annotation in a loop
with self.assertRaisesRegexWithHighlight(ValueError, msg, highlight):
+
@torch.jit.script
class BadModuleLoop:
def __init__(self, x: int):
@@ -324,8 +362,10 @@ class TestTypesAndAnnotation(JitTestCase):
def test_inferred_type_error_message(self):
inferred_type = torch._C.InferredType("ErrorReason")
- with self.assertRaisesRegex(RuntimeError,
- "Tried to get the type from an InferredType but the type is null."):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Tried to get the type from an InferredType but the type is null.",
+ ):
t = inferred_type.type()
with self.assertRaisesRegex(RuntimeError, "ErrorReason"):
diff --git a/test/jit/test_typing.py b/test/jit/test_typing.py
index 560af1b75d..ffb4ea98e0 100644
--- a/test/jit/test_typing.py
+++ b/test/jit/test_typing.py
@@ -2,12 +2,12 @@
import os
import sys
+from collections import namedtuple
+from typing import Dict, List, NamedTuple, Tuple
import torch
-from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing._internal.common_utils import IS_WINDOWS
-from collections import namedtuple
-from typing import List, Tuple, Dict, NamedTuple
+from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@@ -20,14 +20,15 @@ if __name__ == "__main__":
"instead."
)
+
class TestTyping(JitTestCase):
def test_dict_in_not_in(self):
def test_in_dict(x):
# type: (Dict[str, int]) -> bool
- return 'hi' in x
+ return "hi" in x
- self.checkScript(test_in_dict, ({'hi': 2, 'bye': 3},))
- self.checkScript(test_in_dict, ({'bye': 3},))
+ self.checkScript(test_in_dict, ({"hi": 2, "bye": 3},))
+ self.checkScript(test_in_dict, ({"bye": 3},))
# Check evaluation order
@torch.jit.script
@@ -57,8 +58,8 @@ class TestTyping(JitTestCase):
else:
return True
- self.checkScript(test_not_in_dict, ({"hello": 1, "world": 2}, ))
- self.checkScript(test_not_in_dict, ({"world": 2}, ))
+ self.checkScript(test_not_in_dict, ({"hello": 1, "world": 2},))
+ self.checkScript(test_not_in_dict, ({"world": 2},))
def test_dict_tensor_key(a, t):
# type: (Dict[Tensor, int], Tensor) -> bool
@@ -80,9 +81,12 @@ class TestTyping(JitTestCase):
l: List[int] = [1, 2, "foo", 3]
return l
- with self.assertRaisesRegex(RuntimeError, "List type annotation"
- r" `List\[int\]` did not match the "
- "types of the given list elements"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "List type annotation"
+ r" `List\[int\]` did not match the "
+ "types of the given list elements",
+ ):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_key_mismatch(self):
@@ -92,10 +96,13 @@ class TestTyping(JitTestCase):
d: Dict[int, str] = dict(zip(l1, l2))
return d
- with self.assertRaisesRegex(RuntimeError, "Dicts may only "
- "contain homogeneous keys, but the "
- "type of the first generated key "
- r"was Union\[int, str\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Dicts may only "
+ "contain homogeneous keys, but the "
+ "type of the first generated key "
+ r"was Union\[int, str\]",
+ ):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_value_mismatch(self):
@@ -105,28 +112,36 @@ class TestTyping(JitTestCase):
d: Dict[str, int] = dict(zip(l1, l2))
return d
- with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
- r" `Dict\[str, int\]` did not match"
- " the type of an actual value type"
- r" `Union\[int, str\]`"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Dict type annotation"
+ r" `Dict\[str, int\]` did not match"
+ " the type of an actual value type"
+ r" `Union\[int, str\]`",
+ ):
torch.jit.script(fn)
def test_dict_invalid_annotations(self):
# Check for invalid value type annotation
def wrong_value_type(dictionary: Dict[str, torch.jit.ScriptModule]):
return
+
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_value_type)
# Check for invalid key type annotation
def wrong_key_type(dictionary: Dict[torch.jit.ScriptModule, str]):
return
+
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_type)
# Check for invalid key and value type annotation
- def wrong_key_value_type(dictionary: Dict[torch.jit.ScriptModule, torch.jit.ScriptModule]):
+ def wrong_key_value_type(
+ dictionary: Dict[torch.jit.ScriptModule, torch.jit.ScriptModule]
+ ):
return
+
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_value_type)
@@ -138,13 +153,16 @@ class TestTyping(JitTestCase):
_, y = t2
return x + y
- t = torch.randn(2, 2), (1, torch.randn(2, 2)),
+ t = (
+ torch.randn(2, 2),
+ (1, torch.randn(2, 2)),
+ )
f(t, "hi")
graph = f.graph_for(t, "hi")
input_types = list(next(graph.inputs()).type().elements())
w = input_types[0]
- self.assertEqual(input_types[0].kind(), 'TensorType')
- self.assertEqual(input_types[1].elements()[1].kind(), 'TensorType')
+ self.assertEqual(input_types[0].kind(), "TensorType")
+ self.assertEqual(input_types[1].elements()[1].kind(), "TensorType")
def test_tuple_io(self):
def stuff(x):
@@ -165,8 +183,7 @@ class TestTyping(JitTestCase):
def foo():
return tuple(1, 2)
- self.checkScriptRaisesRegex(foo, (), Exception,
- "1 argument")
+ self.checkScriptRaisesRegex(foo, (), Exception, "1 argument")
def cant_infer_size():
return tuple([1, 2, 3]) # noqa: C409
@@ -179,12 +196,14 @@ class TestTyping(JitTestCase):
# type: (int) -> Tuple[Tensor, Tensor]
a = (torch.ones(x), torch.zeros(x))
return a
+
self.checkScript(stuff2, (3,))
def test_list_io(self):
def stuff3(x):
# type: (List[int]) -> Tuple[Tensor, List[int]]
return torch.ones(x), x
+
self.checkScript(stuff3, ([3, 2],))
def test_bool_list_io(self):
@@ -203,6 +222,7 @@ class TestTyping(JitTestCase):
# type: (Tuple[int, List[List[int]]]) -> int
x, y = z
return y[0][1]
+
self.checkScript(foo, ((1, [[1, 2], [3, 4]]),))
def test_list_sum(self):
@@ -215,12 +235,12 @@ class TestTyping(JitTestCase):
def fn2(x: List[bool]):
return sum(x)
- self.checkScript(fn, ([1, 2, 3], ))
- self.checkScript(fn1, ([1.0, 2.0, 3.0], ))
- self.checkScript(fn1, ([1, 2.8, 3], ))
- self.checkScript(fn2, ([True, False, False], ))
- self.checkScript(fn2, ([False, False, False], ))
- self.checkScript(fn2, ([0, 1, 1, 0], ))
+ self.checkScript(fn, ([1, 2, 3],))
+ self.checkScript(fn1, ([1.0, 2.0, 3.0],))
+ self.checkScript(fn1, ([1, 2.8, 3],))
+ self.checkScript(fn2, ([True, False, False],))
+ self.checkScript(fn2, ([False, False, False],))
+ self.checkScript(fn2, ([0, 1, 1, 0],))
def test_list_unification(self):
def fn():
@@ -254,7 +274,6 @@ class TestTyping(JitTestCase):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
-
def sum_list():
# type: () -> int
sum = 0
@@ -266,8 +285,8 @@ class TestTyping(JitTestCase):
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
-
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
+
@torch.jit.script
def sum_list(a):
# type: (int) -> int
@@ -280,14 +299,18 @@ class TestTyping(JitTestCase):
sum_list(1)
def test_list_iterables(self):
- with self.assertRaisesRegex(RuntimeError, 'List of iterables is not supported currently'):
- cu = torch.jit.CompilationUnit('''
+ with self.assertRaisesRegex(
+ RuntimeError, "List of iterables is not supported currently"
+ ):
+ cu = torch.jit.CompilationUnit(
+ """
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
- ''')
+ """
+ )
def test_for_in_string(self):
def test_strings(x):
@@ -352,36 +375,43 @@ class TestTyping(JitTestCase):
def test_dict_comprehension(self):
def fn():
- return {i : chr(i + 65) for i in range(4)}
+ return {i: chr(i + 65) for i in range(4)}
+
self.checkScript(fn, ())
def test_dict_comprehension_with_type_annotation(self):
def fn():
- d: Dict[int, str] = {i : chr(i + 65) for i in range(4)}
+ d: Dict[int, str] = {i: chr(i + 65) for i in range(4)}
return d
+
self.checkScript(fn, ())
with self.assertRaisesRegex(RuntimeError, ""):
- with self.assertRaisesRegex(AssertionError, "Expected Dict "
- "type annotation for dict "
- "comprehension, found "
- "Tuple[int, str]"):
+ with self.assertRaisesRegex(
+ AssertionError,
+ "Expected Dict "
+ "type annotation for dict "
+ "comprehension, found "
+ "Tuple[int, str]",
+ ):
+
@torch.jit.script
def fn():
- d: Tuple[int, str] = {i : chr(i + 65) for i in range(4)}
+ d: Tuple[int, str] = {i: chr(i + 65) for i in range(4)}
return d
def test_dict_comprehension_scope(self):
def comprehension_can_access_outer_scope_variables():
lst = ["foo", "bar", "baz"]
- return {l : len(l) for l in lst}
+ return {l: len(l) for l in lst}
self.checkScript(comprehension_can_access_outer_scope_variables, ())
with self.assertRaisesRegex(RuntimeError, "undefined value i"):
+
@torch.jit.script
def outer_scope_cannot_access_comprehension_variables():
- d = {i : chr(i + 65) for i in range(4)}
+ d = {i: chr(i + 65) for i in range(4)}
i = i + 1 # noqa: F821
def test_for_tuple_assign(self):
@@ -402,22 +432,28 @@ class TestTyping(JitTestCase):
sum += a[1]
return sum
- self.checkScript(test_tuple_assign, (((1, 2), (4, 7)), ))
+ self.checkScript(test_tuple_assign, (((1, 2), (4, 7)),))
def test_single_starred_lhs(self):
- with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear on the lhs within the presence'
- ' of another non-starred expression'):
- cu = torch.jit.CompilationUnit('''
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "A Starred expression may only appear on the lhs within the presence"
+ " of another non-starred expression",
+ ):
+ cu = torch.jit.CompilationUnit(
+ """
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
- ''')
+ """
+ )
def test_singleton_tuple_unpack(self):
def foo(a):
- b, = (a,)
+ (b,) = (a,)
return b + 1
+
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
@@ -441,7 +477,9 @@ class TestTyping(JitTestCase):
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
- self.checkScript(subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0))
+ self.checkScript(
+ subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0)
+ )
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
@@ -455,7 +493,7 @@ class TestTyping(JitTestCase):
a[0] += 1
return a
- with self.assertRaisesRegex(RuntimeError, 'does not support augmented assign'):
+ with self.assertRaisesRegex(RuntimeError, "does not support augmented assign"):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
def test_multiple_assign(self):
@@ -505,7 +543,6 @@ class TestTyping(JitTestCase):
# type: (Optional[int]) -> int
return torch.jit._unwrap_optional(x)
-
@torch.jit.script
def fn(x):
# type: (int) -> int
@@ -540,7 +577,7 @@ class TestTyping(JitTestCase):
# type: (Tuple[float, float]) -> int
return opt_list(x) + broadcast_opt_list(x)
- self.assertEqual(opt_list_tuple_caller((2., 3.)), 4)
+ self.assertEqual(opt_list_tuple_caller((2.0, 3.0)), 4)
def test_optional_tuple(self):
def fn(x=None):
@@ -556,10 +593,11 @@ class TestTyping(JitTestCase):
def test_namedtuple_redefine(self):
global _1, _2
- _1 = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
- _2 = namedtuple('GoogLeNetOutputs', ['different'])
+ _1 = namedtuple("GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"])
+ _2 = namedtuple("GoogLeNetOutputs", ["different"])
+
+ with self.assertRaisesRegex(RuntimeError, r"redefine"):
- with self.assertRaisesRegex(RuntimeError, r'redefine'):
@torch.jit.script
def foo(x, y):
# type: (_1, _2) -> _1
@@ -567,7 +605,9 @@ class TestTyping(JitTestCase):
def test_namedtuple_py2(self):
global _GoogLeNetOutputs # see [local resolution in python]
- _GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
+ _GoogLeNetOutputs = namedtuple(
+ "GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"]
+ )
@torch.jit.script
def foo(x):
@@ -575,22 +615,27 @@ class TestTyping(JitTestCase):
return x
vals = torch.rand(3), torch.rand(4), torch.rand(5)
- out = foo(_GoogLeNetOutputs(logits=vals[0], aux_logits2=vals[1], aux_logits1=vals[2]))
+ out = foo(
+ _GoogLeNetOutputs(logits=vals[0], aux_logits2=vals[1], aux_logits1=vals[2])
+ )
self.assertEqual(out.logits, vals[0])
self.assertEqual(out.aux_logits2, vals[1])
self.assertEqual(out.aux_logits1, vals[2])
def test_namedtuple_good_error(self):
global _GoogLeNetOutputs # see [local resolution in python]
- _GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
+ _GoogLeNetOutputs = namedtuple(
+ "GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"]
+ )
@torch.jit.script
def foo(x):
# type: (_GoogLeNetOutputs) -> _GoogLeNetOutputs
return x
- with self.assertRaisesRegex(RuntimeError,
- r'aka NamedTuple\(logits, aux_logits2, aux_logits1\)'):
+ with self.assertRaisesRegex(
+ RuntimeError, r"aka NamedTuple\(logits, aux_logits2, aux_logits1\)"
+ ):
out = foo(_GoogLeNetOutputs(logits="3", aux_logits2="4", aux_logits1="5"))
def test_namedtuple_error_source_attribution(self):
diff --git a/test/jit/test_union.py b/test/jit/test_union.py
index bee1efc031..b38ac28ddf 100644
--- a/test/jit/test_union.py
+++ b/test/jit/test_union.py
@@ -3,22 +3,25 @@
import io
import os
import sys
-
-import torch
-from torch.testing import FileCheck
from enum import Enum
from textwrap import dedent
from typing import Dict, List, Optional, Tuple, Union
+import torch
+from torch.testing import FileCheck
+
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestUnion(JitTestCase):
"""
@@ -57,9 +60,12 @@ class TestUnion(JitTestCase):
scripted = torch.jit.script(fn)
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[float, int\] but "
- "instead found type str"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[float, int\] but "
+ "instead found type str",
+ ):
scripted("1")
def test_union_with_collections(self):
@@ -71,22 +77,31 @@ class TestUnion(JitTestCase):
scripted = torch.jit.script(fn)
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[List\[int\], Dict\[str, "
- r"int\]\] but instead found type "
- r"Dict\[str, str\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[List\[int\], Dict\[str, "
+ r"int\]\] but instead found type "
+ r"Dict\[str, str\]",
+ ):
scripted({"foo": "bar", "baz": "qux"})
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[List\[int\], Dict\[str, "
- r"int\]\] but instead found type "
- r"List\[str\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[List\[int\], Dict\[str, "
+ r"int\]\] but instead found type "
+ r"List\[str\]",
+ ):
scripted(["foo", "bar", "baz"])
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[List\[int\], Dict\[str, "
- r"int\]\] but instead found type "
- "str"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[List\[int\], Dict\[str, "
+ r"int\]\] but instead found type "
+ "str",
+ ):
scripted("1")
def test_union_with_enum(self):
@@ -104,16 +119,18 @@ class TestUnion(JitTestCase):
scripted = torch.jit.script(fn)
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[__torch__.jit.test_union."
- r"Color, str\] but instead found "
- "type int"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[__torch__.jit.test_union."
+ r"Color, str\] but instead found "
+ "type int",
+ ):
scripted(1)
def test_union_in_class_constructor(self):
-
@torch.jit.script # noqa: B903
- class A: # noqa: B903
+ class A: # noqa: B903
def __init__(self, x: Union[int, str]) -> None:
self.x = x
@@ -125,9 +142,12 @@ class TestUnion(JitTestCase):
scripted = torch.jit.script(fn)
- with self.assertRaisesRegex(RuntimeError, "Expected a member of"
- r" Union\[int, str\] but instead "
- r"found type List\[str\]"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Expected a member of"
+ r" Union\[int, str\] but instead "
+ r"found type List\[str\]",
+ ):
scripted(["foo", "bar", "baz"])
def test_union_return_type(self):
@@ -171,7 +191,7 @@ class TestUnion(JitTestCase):
def test_union_variable_can_be_reassigned(self):
@torch.jit.script
def aux1(i: int):
- return int(i ** 2)
+ return int(i**2)
@torch.jit.script
def aux2(s: str):
@@ -225,8 +245,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : Union(float, int, str)") \
- .run(s)
+ FileCheck().check("x : Union(float, int, str)").run(s)
def test_unions_of_a_single_argument_vanish(self):
@torch.jit.script
@@ -235,8 +254,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : int") \
- .run(s)
+ FileCheck().check("x : int").run(s)
def test_union_redundant_arguments_are_skipped(self):
@torch.jit.script
@@ -245,8 +263,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : Union(int, str)") \
- .run(s)
+ FileCheck().check("x : Union(int, str)").run(s)
def test_union_redundant_arguments_are_skipped_optional(self):
@torch.jit.script
@@ -255,8 +272,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : Union(float, int, NoneType)") \
- .run(s)
+ FileCheck().check("x : Union(float, int, NoneType)").run(s)
def test_union_redundant_arguments_are_skipped_subtyping(self):
@torch.jit.script
@@ -265,8 +281,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : Union((int?, int), str)") \
- .run(s)
+ FileCheck().check("x : Union((int?, int), str)").run(s)
def test_union_redundant_arguments_are_skipped_container(self):
@torch.jit.script
@@ -275,8 +290,7 @@ class TestUnion(JitTestCase):
s = fn.graph
- FileCheck().check("x : Union(float[], str[])") \
- .run(s)
+ FileCheck().check("x : Union(float[], str[])").run(s)
def test_union_argument_order_is_ignored(self):
@torch.jit.script
@@ -288,8 +302,7 @@ class TestUnion(JitTestCase):
return "foo"
for s in (fn1.graph, fn2.graph):
- FileCheck().check("x : Union(int, str)") \
- .run(s)
+ FileCheck().check("x : Union(int, str)").run(s)
def test_union_argument_order_is_ignored_container(self):
@torch.jit.script
@@ -301,8 +314,7 @@ class TestUnion(JitTestCase):
return "foo"
for s in (fn1.graph, fn2.graph):
- FileCheck().check("x : Union(int[], str[])") \
- .run(s)
+ FileCheck().check("x : Union(int[], str[])").run(s)
def test_union_T_None_is_equivalent_to_optional_T(self):
@torch.jit.script
@@ -366,9 +378,9 @@ class TestUnion(JitTestCase):
s = l.code
- FileCheck().check("Union[int, NoneType, str]") \
- .check("Union[int, NoneType, str]") \
- .run(s)
+ FileCheck().check("Union[int, NoneType, str]").check(
+ "Union[int, NoneType, str]"
+ ).run(s)
def test_union_subclasses_larger_union(self):
def fn() -> Union[int, str, torch.Tensor]:
@@ -386,9 +398,12 @@ class TestUnion(JitTestCase):
x[1] = 2
return x[1]
- with self.assertRaisesRegex(RuntimeError, "only int, float, "
- "complex, Tensor, device and string keys "
- "are supported"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "only int, float, "
+ "complex, Tensor, device and string keys "
+ "are supported",
+ ):
torch.jit.script(fn)
def test_union_as_dict_value(self):
@@ -402,7 +417,6 @@ class TestUnion(JitTestCase):
def test_union_module_with_union_instance_variable(self):
class M(torch.nn.Module):
-
x: Union[int, str]
def __init__(self, x: Union[int, str]):
@@ -413,7 +427,12 @@ class TestUnion(JitTestCase):
self.x = y
return self.x
- self.checkModule(M(2,), (1,))
+ self.checkModule(
+ M(
+ 2,
+ ),
+ (1,),
+ )
self.checkModule(M("bar"), ("foo",))
def test_union_module_with_union_class_variable(self):
@@ -508,9 +527,7 @@ class TestUnion(JitTestCase):
s = fn.graph
# Check that we don't have any branching statements
- FileCheck().check_not("block0()") \
- .check_not("block1()") \
- .run(s)
+ FileCheck().check_not("block0()").check_not("block1()").run(s)
def test_union_type_refinement_statically_true(self):
@torch.jit.script
@@ -525,9 +542,7 @@ class TestUnion(JitTestCase):
s = fn.graph
# Check that we don't have any branching statements
- FileCheck().check_not("block0()") \
- .check_not("block1()") \
- .run(s)
+ FileCheck().check_not("block0()").check_not("block1()").run(s)
def test_union_type_refinement_partial_static_refinement_tuple_rhs(self):
def fn(x: Union[List[int], int]) -> int:
@@ -556,7 +571,7 @@ class TestUnion(JitTestCase):
def test_union_type_refinement_internal_declaration(self):
def fn(flag: bool) -> str:
x: Union[int, str, None] = None
- if (flag):
+ if flag:
y = "foo"
else:
y = 1
@@ -589,9 +604,12 @@ class TestUnion(JitTestCase):
else:
return "bar"
- with self.assertRaisesRegex(RuntimeError, "y is set to type str"
- " in the true branch and type int "
- "in the false branch"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "y is set to type str"
+ " in the true branch and type int "
+ "in the false branch",
+ ):
torch.jit.script(fn)
def test_union_branching_does_not_widen_existing_inferred_type(self):
@@ -606,9 +624,12 @@ class TestUnion(JitTestCase):
else:
return "baz"
- with self.assertRaisesRegex(RuntimeError, "previously had type "
- "str but is now being assigned to a"
- " value of type int"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "previously had type "
+ "str but is now being assigned to a"
+ " value of type int",
+ ):
torch.jit.script(fn)
def test_union_schema_matching_on_internal_type(self):
@@ -645,8 +666,8 @@ class TestUnion(JitTestCase):
def test_union_memory_aliasing(self):
def fn():
- x : List[torch.Tensor] = []
- z : List[Optional[List[torch.Tensor]]] = []
+ x: List[torch.Tensor] = []
+ z: List[Optional[List[torch.Tensor]]] = []
z.append(x)
x_alias = z[0]
if torch.jit.isinstance(x_alias, List[torch.Tensor]):
@@ -682,203 +703,212 @@ class TestUnion(JitTestCase):
code = template.format(ann=ann, lhs=lhs)
with self.assertRaisesRegex(RuntimeError, msg):
cu = torch.jit.CompilationUnit(code, _frames_up=1)
- string_frontend = getattr(cu, "fn") # noqa: B009
+ string_frontend = getattr(cu, "fn") # noqa: B009
def test_union_with_list_assignment(self):
- template = dedent('''
+ template = dedent(
+ """
def fn():
x: {ann} = {lhs}
if torch.jit.isinstance(x, List[torch.Tensor]):
x.append(torch.tensor(3))
return x
- ''')
-
- lhs = {"list_literal_empty" : "[]",
-
- "list_literal_of_tensor" : "[torch.arange(3), torch.arange(5)]",
-
- "list_literal_of_str" : "[\"foo\", \"bar\", \"baz\"]",
-
- "list_literal_of_mixed" : "[torch.arange(5), 1]",
-
- "list_comprehension_of_tensor" :
- "[torch.add(x, 1) for x in [torch.arange(3), torch.arange(5)]]",
-
- "list_comprehension_of_str" :
- "[x + \"!\" for x in [\"foo\", \"bar\", \"baz\"]]",
-
- "list_comprehension_of_mixed" :
- "[torch.add(1, x) for x in [torch.arange(5), 1]]"}
+ """
+ )
+
+ lhs = {
+ "list_literal_empty": "[]",
+ "list_literal_of_tensor": "[torch.arange(3), torch.arange(5)]",
+ "list_literal_of_str": '["foo", "bar", "baz"]',
+ "list_literal_of_mixed": "[torch.arange(5), 1]",
+ "list_comprehension_of_tensor": "[torch.add(x, 1) for x in [torch.arange(3), torch.arange(5)]]",
+ "list_comprehension_of_str": '[x + "!" for x in ["foo", "bar", "baz"]]',
+ "list_comprehension_of_mixed": "[torch.add(1, x) for x in [torch.arange(5), 1]]",
+ }
"""
Union[List[str], List[torch.Tensor]]
"""
- self._assert_raises(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_literal_empty"],
- "there are multiple possible List type "
- "candidates in the Union annotation")
-
- self._assert_passes(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_literal_of_tensor"])
-
- self._assert_passes(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_literal_of_str"])
-
- self._assert_raises(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_literal_of_mixed"],
- "none of those types match the types of the"
- " given list elements")
-
- self._assert_passes(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_comprehension_of_tensor"])
-
- self._assert_passes(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_comprehension_of_str"])
+ self._assert_raises(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_literal_empty"],
+ "there are multiple possible List type "
+ "candidates in the Union annotation",
+ )
+
+ self._assert_passes(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_literal_of_tensor"],
+ )
+
+ self._assert_passes(
+ template, "Union[List[str], List[torch.Tensor]]", lhs["list_literal_of_str"]
+ )
+
+ self._assert_raises(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_literal_of_mixed"],
+ "none of those types match the types of the" " given list elements",
+ )
+
+ self._assert_passes(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_comprehension_of_tensor"],
+ )
+
+ self._assert_passes(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_comprehension_of_str"],
+ )
# TODO: Support mixed list comprehensions
- self._assert_raises(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["list_comprehension_of_mixed"],
- "Arguments for call are not valid")
+ self._assert_raises(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["list_comprehension_of_mixed"],
+ "Arguments for call are not valid",
+ )
"""
Union[int, torch.Tensor]
"""
- self._assert_raises(template,
- "Union[int, torch.Tensor]",
- lhs["list_literal_empty"],
- "Expected an Union type annotation with an "
- "inner List type")
-
- self._assert_raises(template, "Union[int, torch.Tensor]",
- lhs["list_literal_of_tensor"],
- "Expected an Union type annotation with an "
- "inner List type")
-
- self._assert_raises(template, "Union[int, torch.Tensor]",
- lhs["list_comprehension_of_tensor"],
- "Expected an Union type annotation with an "
- "inner List type")
+ self._assert_raises(
+ template,
+ "Union[int, torch.Tensor]",
+ lhs["list_literal_empty"],
+ "Expected an Union type annotation with an " "inner List type",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[int, torch.Tensor]",
+ lhs["list_literal_of_tensor"],
+ "Expected an Union type annotation with an " "inner List type",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[int, torch.Tensor]",
+ lhs["list_comprehension_of_tensor"],
+ "Expected an Union type annotation with an " "inner List type",
+ )
"""
Union[List[torch.Tensor], int]
"""
- self._assert_passes(template,
- "Union[List[torch.Tensor], int]",
- lhs["list_literal_empty"])
-
- self._assert_passes(template,
- "Union[List[torch.Tensor], int]",
- lhs["list_literal_of_tensor"])
-
- self._assert_raises(template, "Union[List[torch.Tensor], int]",
- lhs["list_literal_of_str"],
- r"List type annotation `List\[Tensor\]` did "
- "not match the types of the given list "
- "elements")
-
- self._assert_raises(template, "Union[List[torch.Tensor], int]",
- lhs["list_literal_of_mixed"],
- r"List type annotation `List\[Tensor\]` did "
- "not match the types of the given list "
- "elements")
-
- self._assert_passes(template,
- "Union[List[torch.Tensor], int]",
- lhs["list_comprehension_of_tensor"])
-
- self._assert_raises(template,
- "Union[List[torch.Tensor], int]",
- lhs["list_comprehension_of_str"],
- r"List type annotation `List\[Tensor\]` did "
- "not match the types of the given list "
- "elements")
+ self._assert_passes(
+ template, "Union[List[torch.Tensor], int]", lhs["list_literal_empty"]
+ )
+
+ self._assert_passes(
+ template, "Union[List[torch.Tensor], int]", lhs["list_literal_of_tensor"]
+ )
+
+ self._assert_raises(
+ template,
+ "Union[List[torch.Tensor], int]",
+ lhs["list_literal_of_str"],
+ r"List type annotation `List\[Tensor\]` did "
+ "not match the types of the given list "
+ "elements",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[List[torch.Tensor], int]",
+ lhs["list_literal_of_mixed"],
+ r"List type annotation `List\[Tensor\]` did "
+ "not match the types of the given list "
+ "elements",
+ )
+
+ self._assert_passes(
+ template,
+ "Union[List[torch.Tensor], int]",
+ lhs["list_comprehension_of_tensor"],
+ )
+
+ self._assert_raises(
+ template,
+ "Union[List[torch.Tensor], int]",
+ lhs["list_comprehension_of_str"],
+ r"List type annotation `List\[Tensor\]` did "
+ "not match the types of the given list "
+ "elements",
+ )
# TODO(@ansley): Support mixed list comprehensions
- self._assert_raises(template,
- "Union[List[torch.Tensor], int]",
- lhs["list_comprehension_of_mixed"],
- "Arguments for call are not valid")
+ self._assert_raises(
+ template,
+ "Union[List[torch.Tensor], int]",
+ lhs["list_comprehension_of_mixed"],
+ "Arguments for call are not valid",
+ )
def test_union_with_dict_assignment(self):
- template = dedent('''
+ template = dedent(
+ """
def fn():
x: {ann} = {lhs}
if torch.jit.isinstance(x, Dict[str, torch.Tensor]):
x["foo"] = torch.tensor(3)
return x
- ''')
-
- lhs = {"dict_literal_empty" : "{}",
-
- "dict_literal_of_str_tensor" :
- "{\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)}",
-
- "dict_literal_of_str_int" :
- "{\"foo\" : 1, \"bar\" : 2}",
-
- "dict_literal_of_mixed" :
- "{\"foo\" : torch.arange(3), \"bar\" : 2}",
-
- "dict_comprehension_of_str_tensor" :
- "{x : torch.add(y, 1) for x, y in \
- zip([\"foo\", \"bar\"], [torch.arange(3), torch.arange(5)])}",
-
- "dict_comprehension_of_str_int" :
- "{x : torch.add(y, 1) for x, y in \
- zip([\"foo\", \"bar\"], [1, 2]}",
-
- "dict_comprehension_of_mixed" :
- "{x : torch.add(y, 1) for x, y in \
- zip([\"foo\", \"bar\"], [torch.arange(3), 2])}",
-
- "dict_keyword" :
- "dict(foo=torch.arange(3), baz=torch.arange(5))",
-
- "dict_keyword_with_iterable" :
- "dict([(\"foo\", torch.arange(3)), (\"bar\", torch.arange(5))])",
-
- "dict_keyword_with_empty_iterable" :
- "dict([])",
-
- "dict_keyword_with_internal_aggregate_function" :
- "dict(zip([\"foo\", \"bar\"], [torch.arange(3), torch.arange(5)])",
-
- "dict_keyword_with_mapping" :
- "dict({\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)})",
-
- "dict_keyword_with_mapping_and_kwargs" :
- "dict({\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)}, baz=torch.arange(7))",
-
- }
+ """
+ )
+
+ lhs = {
+ "dict_literal_empty": "{}",
+ "dict_literal_of_str_tensor": '{"foo" : torch.arange(3), "bar" : torch.arange(5)}',
+ "dict_literal_of_str_int": '{"foo" : 1, "bar" : 2}',
+ "dict_literal_of_mixed": '{"foo" : torch.arange(3), "bar" : 2}',
+ "dict_comprehension_of_str_tensor": '{x : torch.add(y, 1) for x, y in \
+ zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])}',
+ "dict_comprehension_of_str_int": '{x : torch.add(y, 1) for x, y in \
+ zip(["foo", "bar"], [1, 2]}',
+ "dict_comprehension_of_mixed": '{x : torch.add(y, 1) for x, y in \
+ zip(["foo", "bar"], [torch.arange(3), 2])}',
+ "dict_keyword": "dict(foo=torch.arange(3), baz=torch.arange(5))",
+ "dict_keyword_with_iterable": 'dict([("foo", torch.arange(3)), ("bar", torch.arange(5))])',
+ "dict_keyword_with_empty_iterable": "dict([])",
+ "dict_keyword_with_internal_aggregate_function": 'dict(zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])',
+ "dict_keyword_with_mapping": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)})',
+ "dict_keyword_with_mapping_and_kwargs": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)}, baz=torch.arange(7))',
+ }
"""
Union[Dict[str, torch.Tensor], Dict[str, int]]
"""
- self._assert_raises(template,
- "Union[List[str], List[torch.Tensor]]",
- lhs["dict_literal_empty"],
- "Expected an Union type annotation with an "
- "inner Dict type")
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_literal_of_str_tensor"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_literal_of_str_int"])
-
- self._assert_raises(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_literal_of_mixed"],
- "none of those dict types can hold the "
- "types of the given keys and values")
+ self._assert_raises(
+ template,
+ "Union[List[str], List[torch.Tensor]]",
+ lhs["dict_literal_empty"],
+ "Expected an Union type annotation with an " "inner Dict type",
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_literal_of_str_tensor"],
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_literal_of_str_int"],
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_literal_of_mixed"],
+ "none of those dict types can hold the "
+ "types of the given keys and values",
+ )
# TODO: String frontend does not support tuple unpacking
# https://github.com/pytorch/pytorch/issues/64096
@@ -899,45 +929,57 @@ class TestUnion(JitTestCase):
# TODO(@ansley): Follow-up project needed for full type
# inference with dict keyword (supported for dict comprehension
# and dict literal already; should not be a blocker for anyone)
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_keyword"],
- "full type inference is not yet supported")
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_keyword_with_iterable"],
- "full type inference is not yet supported")
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_keyword_with_empty_iterable"],
- "full type inference is not yet supported")
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_keyword_with_mapping"],
- "full type inference is not yet supported")
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], Dict[str, int]]",
- lhs["dict_keyword_with_mapping_and_kwargs"],
- "full type inference is not yet supported")
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_keyword"],
+ "full type inference is not yet supported",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_keyword_with_iterable"],
+ "full type inference is not yet supported",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_keyword_with_empty_iterable"],
+ "full type inference is not yet supported",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_keyword_with_mapping"],
+ "full type inference is not yet supported",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], Dict[str, int]]",
+ lhs["dict_keyword_with_mapping_and_kwargs"],
+ "full type inference is not yet supported",
+ )
"""
Union[int, torch.Tensor]
"""
- self._assert_raises(template,
- "Union[int, torch.Tensor]",
- lhs["dict_literal_empty"],
- "Expected an Union type annotation with "
- "an inner Dict type")
-
- self._assert_raises(template,
- "Union[int, torch.Tensor]",
- lhs["dict_literal_of_str_tensor"],
- "Expected an Union type annotation with "
- "an inner Dict type")
+ self._assert_raises(
+ template,
+ "Union[int, torch.Tensor]",
+ lhs["dict_literal_empty"],
+ "Expected an Union type annotation with " "an inner Dict type",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[int, torch.Tensor]",
+ lhs["dict_literal_of_str_tensor"],
+ "Expected an Union type annotation with " "an inner Dict type",
+ )
# See above--string frontend does not support tuple unpacking
# self._assert_raises(template, "Union[int, torch.Tensor]",
@@ -947,47 +989,61 @@ class TestUnion(JitTestCase):
"""
Union[Dict[str, torch.Tensor], int]
"""
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_literal_empty"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_literal_of_str_tensor"])
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_literal_of_str_int"],
- "Type annotation was inferred to be "
- r"`Dict\[str, Tensor\]`, but the type of "
- "values given by the dict literal is")
-
- self._assert_raises(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_literal_of_mixed"],
- "Type annotation was inferred to be "
- r"`Dict\[str, Tensor\]`, but the type of "
- "values given by the dict literal is")
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_keyword"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_keyword_with_iterable"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_keyword_with_empty_iterable"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_keyword_with_mapping"])
-
- self._assert_passes(template,
- "Union[Dict[str, torch.Tensor], int]",
- lhs["dict_keyword_with_mapping_and_kwargs"])
+ self._assert_passes(
+ template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_literal_empty"]
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_literal_of_str_tensor"],
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_literal_of_str_int"],
+ "Type annotation was inferred to be "
+ r"`Dict\[str, Tensor\]`, but the type of "
+ "values given by the dict literal is",
+ )
+
+ self._assert_raises(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_literal_of_mixed"],
+ "Type annotation was inferred to be "
+ r"`Dict\[str, Tensor\]`, but the type of "
+ "values given by the dict literal is",
+ )
+
+ self._assert_passes(
+ template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword"]
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_keyword_with_iterable"],
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_keyword_with_empty_iterable"],
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_keyword_with_mapping"],
+ )
+
+ self._assert_passes(
+ template,
+ "Union[Dict[str, torch.Tensor], int]",
+ lhs["dict_keyword_with_mapping_and_kwargs"],
+ )
# See above--string frontend does not support tuple unpacking
# self._assert_passes(template,
diff --git a/test/jit/test_unsupported_ops.py b/test/jit/test_unsupported_ops.py
index 9555dcac51..f93515a9e5 100644
--- a/test/jit/test_unsupported_ops.py
+++ b/test/jit/test_unsupported_ops.py
@@ -2,19 +2,21 @@
import os
import sys
+import unittest
import torch
-import unittest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
# NOTE: FIXING FAILING TESTS
# If you are seeing a test failure from this file, congrats, you improved
@@ -22,6 +24,7 @@ if __name__ == '__main__':
# the corresponding section in documentation that states the unsupported behavior.
# see: `jit_unsupported.rst`
+
class TestUnsupportedOps(JitTestCase):
def test_factory_ops_requires_grad_fail(self):
# Keyword argument {name} unknown is a JIT-only error message,
@@ -32,31 +35,31 @@ class TestUnsupportedOps(JitTestCase):
def ones():
return torch.ones([2], requires_grad=True)
- with self.assertRaisesRegexWithHighlight(Exception,
- "Keyword argument requires_grad unknown",
- "torch.ones"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Keyword argument requires_grad unknown", "torch.ones"
+ ):
torch.jit.script(ones)
def randn():
return torch.randn([2], requires_grad=True)
- with self.assertRaisesRegexWithHighlight(Exception,
- "Keyword argument requires_grad unknown",
- "torch.randn"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Keyword argument requires_grad unknown", "torch.randn"
+ ):
torch.jit.script(randn)
def zeros():
return torch.zeros([2], requires_grad=True)
- with self.assertRaisesRegexWithHighlight(Exception,
- "Keyword argument requires_grad unknown",
- "torch.zeros"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, "Keyword argument requires_grad unknown", "torch.zeros"
+ ):
torch.jit.script(zeros)
@unittest.skipIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")
def test_init_ops(self):
def calculate_gain():
- return torch.nn.init.calculate_gain('leaky_relu', 0.2)
+ return torch.nn.init.calculate_gain("leaky_relu", 0.2)
def eye_():
return torch.nn.init.eye_(torch.zeros([2, 2]))
@@ -71,9 +74,16 @@ class TestUnsupportedOps(JitTestCase):
return torch.nn.init.orthogonal_(torch.empty(3, 5))
def sparse():
- return torch.nn.init.sparse_(torch.empty(3, 5), sparsity=.1)
-
- for func in [calculate_gain, eye_, dirac_, kaiming_uniform_, orthogonal_, sparse]:
+ return torch.nn.init.sparse_(torch.empty(3, 5), sparsity=0.1)
+
+ for func in [
+ calculate_gain,
+ eye_,
+ dirac_,
+ kaiming_uniform_,
+ orthogonal_,
+ sparse,
+ ]:
# doesn't error in eager
func()
with self.assertRaisesRegex(Exception, ""):
diff --git a/test/jit/test_upgraders.py b/test/jit/test_upgraders.py
index a5b0d54b5e..fc325d95c3 100644
--- a/test/jit/test_upgraders.py
+++ b/test/jit/test_upgraders.py
@@ -3,20 +3,24 @@
import io
import os
import sys
-import torch
import zipfile
-from torch.testing import FileCheck
from typing import Union
+import torch
+from torch.testing import FileCheck
+
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
+
class TestUpgraders(JitTestCase):
def _load_model_version(self, loaded_model):
@@ -28,10 +32,10 @@ class TestUpgraders(JitTestCase):
# in a package between version 3 and 7.
# So we have to check for both.
try:
- version = int(zipped_model.read('archive/version').decode("utf-8"))
+ version = int(zipped_model.read("archive/version").decode("utf-8"))
return version
except KeyError:
- version = int(zipped_model.read('archive/.data/version').decode("utf-8"))
+ version = int(zipped_model.read("archive/.data/version").decode("utf-8"))
return version
# TODO (tugsuu) We should ideally be generating this test cases.
@@ -62,15 +66,23 @@ class TestUpgraders(JitTestCase):
upgrader_bumped_version = 3
upgrader_name = "_test_serialization_subcmul_0_2"
upgrader_schema = "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=2) -> Tensor"
- dummy_entry = torch._C._UpgraderEntry(upgrader_bumped_version, upgrader_name, upgrader_schema)
+ dummy_entry = torch._C._UpgraderEntry(
+ upgrader_bumped_version, upgrader_name, upgrader_schema
+ )
- torch._C._test_only_add_entry_to_op_version_map("aten::_test_serialization_subcmul", dummy_entry)
+ torch._C._test_only_add_entry_to_op_version_map(
+ "aten::_test_serialization_subcmul", dummy_entry
+ )
map_after_test = torch._C._get_operator_version_map()
self.assertTrue("aten::_test_serialization_subcmul" in map_after_test)
self.assertTrue(len(map_after_test) - len(map_before_test) == 1)
- torch._C._test_only_remove_entry_to_op_version_map("aten::_test_serialization_subcmul")
+ torch._C._test_only_remove_entry_to_op_version_map(
+ "aten::_test_serialization_subcmul"
+ )
map_after_remove_test = torch._C._get_operator_version_map()
- self.assertTrue("aten::_test_serialization_subcmul" not in map_after_remove_test)
+ self.assertTrue(
+ "aten::_test_serialization_subcmul" not in map_after_remove_test
+ )
self.assertEqual(len(map_after_remove_test), len(map_before_test))
def test_populated_test_upgrader_graph(self):
@@ -151,7 +163,7 @@ class TestUpgraders(JitTestCase):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
- for (a, b) in sample_inputs:
+ for a, b in sample_inputs:
output_with_step, output_without_step = loaded_model(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
@@ -161,7 +173,9 @@ class TestUpgraders(JitTestCase):
self.assertTrue(version == 8)
def test_aten_linspace_out(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
+ )
loaded_model = torch.jit.load(model_path)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64)),
@@ -169,7 +183,7 @@ class TestUpgraders(JitTestCase):
(4.0, 6.0, torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64)),
)
- for (a, b, c) in sample_inputs:
+ for a, b, c in sample_inputs:
output = loaded_model(a, b, c)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
@@ -181,7 +195,7 @@ class TestUpgraders(JitTestCase):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
- for (a, b) in sample_inputs:
+ for a, b in sample_inputs:
output_with_step, output_without_step = loaded_model(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
@@ -191,7 +205,9 @@ class TestUpgraders(JitTestCase):
self.assertTrue(version == 9)
def test_aten_logspace_out(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
+ )
loaded_model = torch.jit.load(model_path)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64)),
@@ -199,7 +215,7 @@ class TestUpgraders(JitTestCase):
(4.0, 6.0, torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64)),
)
- for (a, b, c) in sample_inputs:
+ for a, b, c in sample_inputs:
output = loaded_model(a, b, c)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
@@ -208,21 +224,36 @@ class TestUpgraders(JitTestCase):
self.assertTrue(version == 9)
def test_aten_test_serialization(self):
- model_path = pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt"
+ )
# add test version entry to the version map
upgrader_bumped_version = 3
upgrader_name = "_test_serialization_subcmul_0_2"
upgrader_schema = "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=2) -> Tensor"
- dummy_entry = torch._C._UpgraderEntry(upgrader_bumped_version, upgrader_name, upgrader_schema)
+ dummy_entry = torch._C._UpgraderEntry(
+ upgrader_bumped_version, upgrader_name, upgrader_schema
+ )
- torch._C._test_only_add_entry_to_op_version_map("aten::_test_serialization_subcmul", dummy_entry)
+ torch._C._test_only_add_entry_to_op_version_map(
+ "aten::_test_serialization_subcmul", dummy_entry
+ )
# add test upgrader in the upgraders map
@torch.jit.script
- def _test_serialization_subcmul_0_2(self: torch.Tensor, other: torch.Tensor, alpha: Union[int, float] = 2) -> torch.Tensor:
+ def _test_serialization_subcmul_0_2(
+ self: torch.Tensor, other: torch.Tensor, alpha: Union[int, float] = 2
+ ) -> torch.Tensor:
return other - (self * alpha)
- torch._C._test_only_populate_upgraders({"_test_serialization_subcmul_0_2": str(_test_serialization_subcmul_0_2.graph)})
+
+ torch._C._test_only_populate_upgraders(
+ {
+ "_test_serialization_subcmul_0_2": str(
+ _test_serialization_subcmul_0_2.graph
+ )
+ }
+ )
# test if the server is able to find the test upgraders and apply to IR
loaded_model = torch.jit.load(model_path)
@@ -238,11 +269,21 @@ class TestUpgraders(JitTestCase):
# we check by its' code because graph variable names
# can be different every time
self.assertEqual(loaded_model.code, loaded_model_twice.code)
- torch._C._test_only_remove_entry_to_op_version_map("aten::_test_serialization_subcmul")
- torch._C._test_only_remove_upgraders({"_test_serialization_subcmul_0_2": str(_test_serialization_subcmul_0_2.graph)})
+ torch._C._test_only_remove_entry_to_op_version_map(
+ "aten::_test_serialization_subcmul"
+ )
+ torch._C._test_only_remove_upgraders(
+ {
+ "_test_serialization_subcmul_0_2": str(
+ _test_serialization_subcmul_0_2.graph
+ )
+ }
+ )
def test_aten_div_scalar_at_3(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v3.pt"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v3.pt"
+ )
loaded_model = torch.jit.load(model_path)
FileCheck().check("prim::If").run(loaded_model.graph)
FileCheck().check_count("aten::div", 2).run(loaded_model.graph)
@@ -254,11 +295,15 @@ class TestUpgraders(JitTestCase):
self.assertEqual(version, 4)
loaded_model_twice = torch.jit.load(buffer)
- self.assertEqual(loaded_model(torch.Tensor([5.0, 3.0]), 2.0),
- loaded_model_twice(torch.Tensor([5.0, 3.0]), 2.0))
+ self.assertEqual(
+ loaded_model(torch.Tensor([5.0, 3.0]), 2.0),
+ loaded_model_twice(torch.Tensor([5.0, 3.0]), 2.0),
+ )
def test_aten_div_tensor_out_at_3(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_div_tensor_out_v3.pt"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_div_tensor_out_v3.pt"
+ )
loaded_model = torch.jit.load(model_path)
FileCheck().check("prim::If").run(loaded_model.graph)
FileCheck().check_count("aten::div", 2).run(loaded_model.graph)
@@ -274,7 +319,9 @@ class TestUpgraders(JitTestCase):
self.assertEqual(loaded_model.code, loaded_model_twice.code)
def test_aten_full_at_4(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_full_integer_value_v4.pt"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_full_integer_value_v4.pt"
+ )
loaded_model = torch.jit.load(model_path)
FileCheck().check_count("aten::Float", 1).run(loaded_model.graph)
FileCheck().check_count("aten::full", 2).run(loaded_model.graph)
@@ -290,7 +337,9 @@ class TestUpgraders(JitTestCase):
self.assertEqual(loaded_model.code, loaded_model_twice.code)
def test_aten_full_out_at_4(self):
- model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_full_preserved_v4.pt"
+ model_path = (
+ pytorch_test_dir + "/jit/fixtures/test_versioned_full_preserved_v4.pt"
+ )
loaded_model = torch.jit.load(model_path)
FileCheck().check_count("aten::full", 5).run(loaded_model.graph)
version = self._load_model_version(loaded_model)
diff --git a/test/jit/test_warn.py b/test/jit/test_warn.py
index 32547badd1..abd3198911 100644
--- a/test/jit/test_warn.py
+++ b/test/jit/test_warn.py
@@ -1,12 +1,12 @@
# Owner(s): ["oncall: jit"]
+import io
import os
import sys
-import io
-
-import torch
import warnings
from contextlib import redirect_stderr
+
+import torch
from torch.testing import FileCheck
# Make the helper files in test/ importable
@@ -14,10 +14,12 @@ pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
-if __name__ == '__main__':
- raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
- "\tpython test/test_jit.py TESTNAME\n\n"
- "instead.")
+if __name__ == "__main__":
+ raise RuntimeError(
+ "This test file is not meant to be run directly, use:\n\n"
+ "\tpython test/test_jit.py TESTNAME\n\n"
+ "instead."
+ )
class TestWarn(JitTestCase):
@@ -30,12 +32,9 @@ class TestWarn(JitTestCase):
with redirect_stderr(f):
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=1,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=1, exactly=True
+ ).run(f.getvalue())
def test_warn_only_once(self):
@torch.jit.script
@@ -47,12 +46,9 @@ class TestWarn(JitTestCase):
with redirect_stderr(f):
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=1,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=1, exactly=True
+ ).run(f.getvalue())
def test_warn_only_once_in_loop_func(self):
def w():
@@ -67,12 +63,9 @@ class TestWarn(JitTestCase):
with redirect_stderr(f):
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=1,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=1, exactly=True
+ ).run(f.getvalue())
def test_warn_once_per_func(self):
def w1():
@@ -90,12 +83,9 @@ class TestWarn(JitTestCase):
with redirect_stderr(f):
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=2,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=2, exactly=True
+ ).run(f.getvalue())
def test_warn_once_per_func_in_loop(self):
def w1():
@@ -114,12 +104,9 @@ class TestWarn(JitTestCase):
with redirect_stderr(f):
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=2,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=2, exactly=True
+ ).run(f.getvalue())
def test_warn_multiple_calls_multiple_warnings(self):
@torch.jit.script
@@ -131,12 +118,9 @@ class TestWarn(JitTestCase):
fn()
fn()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you",
- count=2,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you", count=2, exactly=True
+ ).run(f.getvalue())
def test_warn_multiple_calls_same_func_diff_stack(self):
def warn(caller: str):
@@ -155,13 +139,10 @@ class TestWarn(JitTestCase):
foo()
bar()
- FileCheck() \
- .check_count(
- str="UserWarning: I am warning you from foo",
- count=1,
- exactly=True) \
- .check_count(
- str="UserWarning: I am warning you from bar",
- count=1,
- exactly=True) \
- .run(f.getvalue())
+ FileCheck().check_count(
+ str="UserWarning: I am warning you from foo", count=1, exactly=True
+ ).check_count(
+ str="UserWarning: I am warning you from bar", count=1, exactly=True
+ ).run(
+ f.getvalue()
+ )
diff --git a/test/jit/test_with.py b/test/jit/test_with.py
index fdd11fbc68..4cbdfb2e99 100644
--- a/test/jit/test_with.py
+++ b/test/jit/test_with.py
@@ -32,6 +32,7 @@ class TestWith(JitTestCase):
Check that with statements that use the 'as' keyword to bind expressions
to targets work as expected.
"""
+
@torch.jit.script
class Context:
"""
@@ -189,6 +190,7 @@ class TestWith(JitTestCase):
Check that with statements that do not use the 'as' keyword to bind expressions
to targets work as expected.
"""
+
@torch.jit.script
class Context:
"""
@@ -345,6 +347,7 @@ class TestWith(JitTestCase):
Check that exceptions thrown in the bodies of with-statements are
handled correctly.
"""
+
@torch.jit.script
class Context:
"""
@@ -416,15 +419,21 @@ class TestWith(JitTestCase):
# checkScript and checkScriptRaisesRegex cannot be used because the string frontend will
# not compile class types (of which Context, the context manager being used for this test
# is one).
- with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, r"raised exception", 'raise Exception("raised exception'
+ ):
test_exception(torch.randn(2), c)
self.assertEqual(c.count, 1)
- with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, r"raised exception", 'raise Exception("raised exception'
+ ):
test_exception_nested(torch.randn(2), c)
self.assertEqual(c.count, 1)
- with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
+ with self.assertRaisesRegexWithHighlight(
+ Exception, r"raised exception", 'raise Exception("raised exception'
+ ):
test_exception_fn_call(torch.randn(2), c)
self.assertEqual(c.count, 1)
@@ -505,7 +514,9 @@ class TestWith(JitTestCase):
return x
- def test_exit_incorrect_types(x: torch.Tensor, cm: ExitIncorrectTypes) -> torch.Tensor:
+ def test_exit_incorrect_types(
+ x: torch.Tensor, cm: ExitIncorrectTypes
+ ) -> torch.Tensor:
with cm as _:
pass
@@ -523,7 +534,9 @@ class TestWith(JitTestCase):
self.checkScript(test_no_enter_no_exit, (test_tensor, NoEnterNoExit()))
with self.assertRaisesRegexWithHighlight(
- RuntimeError, r"__enter__ must have only one argument and one return value", "cm"
+ RuntimeError,
+ r"__enter__ must have only one argument and one return value",
+ "cm",
):
self.checkScript(test_bad_enter, (test_tensor, BadEnter()))
@@ -539,7 +552,9 @@ class TestWith(JitTestCase):
test_exit_incorrect_types, (test_tensor, ExitIncorrectTypes())
)
- with self.assertRaisesRegexWithHighlight(RuntimeError, r"must return an object", "\"not_object\""):
+ with self.assertRaisesRegexWithHighlight(
+ RuntimeError, r"must return an object", '"not_object"'
+ ):
self.checkScript(test_enter_without_object, ())
def test_with_no_grad(self):
@@ -603,6 +618,7 @@ class TestWith(JitTestCase):
Check that torch.autograd.profiler.record_function context manager is
torchscriptable.
"""
+
def with_rf(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.autograd.profiler.record_function("foo"):
# Nested record_function.
diff --git a/test/jit/xnnpack/test_xnnpack_delegate.py b/test/jit/xnnpack/test_xnnpack_delegate.py
index 4c7bc4aa62..f6fc5357f2 100644
--- a/test/jit/xnnpack/test_xnnpack_delegate.py
+++ b/test/jit/xnnpack/test_xnnpack_delegate.py
@@ -7,6 +7,7 @@ import torch._C
torch.ops.load_library("//caffe2:xnnpack_backend")
+
class TestXNNPackBackend(unittest.TestCase):
def test_xnnpack_constant_data(self):
class Module(torch.nn.Module):
@@ -24,17 +25,19 @@ class TestXNNPackBackend(unittest.TestCase):
scripted_module,
{
"forward": {
- "inputs" : [torch.randn(4, 4, 4)],
- "outputs": [torch.randn(4, 4, 4)]
+ "inputs": [torch.randn(4, 4, 4)],
+ "outputs": [torch.randn(4, 4, 4)],
}
- }
+ },
)
for i in range(0, 20):
sample_input = torch.randn(4, 4, 4)
actual_output = scripted_module(sample_input)
expected_output = lowered_module(sample_input)
- self.assertTrue(torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03))
+ self.assertTrue(
+ torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03)
+ )
def test_xnnpack_lowering(self):
class Module(torch.nn.Module):
@@ -45,13 +48,11 @@ class TestXNNPackBackend(unittest.TestCase):
faulty_compile_spec = {
"backward": {
- "inputs" : [torch.zeros(1)],
+ "inputs": [torch.zeros(1)],
"outputs": [torch.zeros(1)],
}
}
- error_msg = (
- "method_compile_spec does not contain the \"forward\" key."
- )
+ error_msg = 'method_compile_spec does not contain the "forward" key.'
with self.assertRaisesRegex(
RuntimeError,
@@ -64,21 +65,21 @@ class TestXNNPackBackend(unittest.TestCase):
)
mismatch_compile_spec = {
- "forward" : {
- "inputs" : [torch.zeros(1), torch.zeros(1)],
- "outputs" : [torch.zeros(1)]
+ "forward": {
+ "inputs": [torch.zeros(1), torch.zeros(1)],
+ "outputs": [torch.zeros(1)],
}
}
- error_msg = ("method_compile_spec inputs do not match expected number of forward inputs")
+ error_msg = (
+ "method_compile_spec inputs do not match expected number of forward inputs"
+ )
with self.assertRaisesRegex(
RuntimeError,
error_msg,
):
_ = torch._C._jit_to_backend(
- "xnnpack",
- scripted_module,
- mismatch_compile_spec
+ "xnnpack", scripted_module, mismatch_compile_spec
)
lowered = torch._C._jit_to_backend(
@@ -86,10 +87,10 @@ class TestXNNPackBackend(unittest.TestCase):
scripted_module,
{
"forward": {
- "inputs" : [torch.zeros(1)],
+ "inputs": [torch.zeros(1)],
"outputs": [torch.zeros(1)],
}
- }
+ },
)
lowered(torch.zeros(1))
@@ -113,14 +114,16 @@ class TestXNNPackBackend(unittest.TestCase):
add_module,
{
"forward": {
- "inputs" : [sample_inputs[0].clone(), sample_inputs[1].clone()],
- "outputs": [sample_output]
+ "inputs": [sample_inputs[0].clone(), sample_inputs[1].clone()],
+ "outputs": [sample_output],
}
- }
+ },
)
actual_output = lowered_add_module.forward(sample_inputs[0], sample_inputs[1])
- self.assertTrue(torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03))
+ self.assertTrue(
+ torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03)
+ )
def test_xnnpack_broadcasting(self):
class AddModule(torch.nn.Module):
@@ -139,14 +142,16 @@ class TestXNNPackBackend(unittest.TestCase):
add_module,
{
"forward": {
- "inputs" : [sample_inputs[0], sample_inputs[1]],
- "outputs": [sample_output]
+ "inputs": [sample_inputs[0], sample_inputs[1]],
+ "outputs": [sample_output],
}
- }
+ },
)
actual_output = lowered_add_module.forward(sample_inputs[0], sample_inputs[1])
- self.assertTrue(torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03))
+ self.assertTrue(
+ torch.allclose(actual_output, expected_output, atol=1e-03, rtol=1e-03)
+ )
def test_xnnpack_unsupported(self):
class AddSpliceModule(torch.nn.Module):
@@ -173,8 +178,8 @@ class TestXNNPackBackend(unittest.TestCase):
add_module,
{
"forward": {
- "inputs" : [sample_inputs[0], sample_inputs[1]],
- "outputs": [sample_output]
+ "inputs": [sample_inputs[0], sample_inputs[1]],
+ "outputs": [sample_output],
}
- }
+ },
)
diff --git a/test/jit_hooks/model.py b/test/jit_hooks/model.py
index 4bc55c26cb..b2f6ba5ea0 100644
--- a/test/jit_hooks/model.py
+++ b/test/jit_hooks/model.py
@@ -1,23 +1,30 @@
import argparse
import os
import sys
+
import torch
# grab modules from test_jit_hooks.cpp
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from jit.test_hooks_modules import (
- create_forward_tuple_input, create_module_forward_multiple_inputs,
- create_module_forward_single_input, create_module_hook_return_nothing,
+ create_forward_tuple_input,
+ create_module_forward_multiple_inputs,
+ create_module_forward_single_input,
+ create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
- create_module_multiple_hooks_single_input, create_module_no_forward_input,
- create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
+ create_module_multiple_hooks_single_input,
+ create_module_no_forward_input,
+ create_module_same_hook_repeated,
+ create_submodule_forward_multiple_inputs,
create_submodule_forward_single_input,
create_submodule_hook_return_nothing,
create_submodule_multiple_hooks_multiple_inputs,
create_submodule_multiple_hooks_single_input,
create_submodule_same_hook_repeated,
- create_submodule_to_call_directly_with_hooks)
+ create_submodule_to_call_directly_with_hooks,
+)
+
# Create saved modules for JIT forward hooks and pre-hooks
def main():
@@ -30,23 +37,45 @@ def main():
save_name = options.export_script_module_to + "_"
tests = [
- ("test_submodule_forward_single_input", create_submodule_forward_single_input()),
- ("test_submodule_forward_multiple_inputs", create_submodule_forward_multiple_inputs()),
- ("test_submodule_multiple_hooks_single_input", create_submodule_multiple_hooks_single_input()),
- ("test_submodule_multiple_hooks_multiple_inputs", create_submodule_multiple_hooks_multiple_inputs()),
+ (
+ "test_submodule_forward_single_input",
+ create_submodule_forward_single_input(),
+ ),
+ (
+ "test_submodule_forward_multiple_inputs",
+ create_submodule_forward_multiple_inputs(),
+ ),
+ (
+ "test_submodule_multiple_hooks_single_input",
+ create_submodule_multiple_hooks_single_input(),
+ ),
+ (
+ "test_submodule_multiple_hooks_multiple_inputs",
+ create_submodule_multiple_hooks_multiple_inputs(),
+ ),
("test_submodule_hook_return_nothing", create_submodule_hook_return_nothing()),
("test_submodule_same_hook_repeated", create_submodule_same_hook_repeated()),
-
("test_module_forward_single_input", create_module_forward_single_input()),
- ("test_module_forward_multiple_inputs", create_module_forward_multiple_inputs()),
- ("test_module_multiple_hooks_single_input", create_module_multiple_hooks_single_input()),
- ("test_module_multiple_hooks_multiple_inputs", create_module_multiple_hooks_multiple_inputs()),
+ (
+ "test_module_forward_multiple_inputs",
+ create_module_forward_multiple_inputs(),
+ ),
+ (
+ "test_module_multiple_hooks_single_input",
+ create_module_multiple_hooks_single_input(),
+ ),
+ (
+ "test_module_multiple_hooks_multiple_inputs",
+ create_module_multiple_hooks_multiple_inputs(),
+ ),
("test_module_hook_return_nothing", create_module_hook_return_nothing()),
("test_module_same_hook_repeated", create_module_same_hook_repeated()),
-
("test_module_no_forward_input", create_module_no_forward_input()),
("test_forward_tuple_input", create_forward_tuple_input()),
- ("test_submodule_to_call_directly_with_hooks", create_submodule_to_call_directly_with_hooks())
+ (
+ "test_submodule_to_call_directly_with_hooks",
+ create_submodule_to_call_directly_with_hooks(),
+ ),
]
for name, model in tests: | 2.41.0 |
d9dc976aec8766d86c51548de2d4e5416fa9a39 | Thu, 11 Apr 2024 12:07:03 -0700 | [PATCH 0047/1000] Handle unqualified imports in custom Triton kernels (#123703) | Summary: If in a custom (user-written) Triton kernel an externally imported symbol is used directly, we need to codegen the corresponding import outside the kernel body in the Python wrapper. E.g., if the user code has this: ``` from triton.language.extra.cuda.libdevice import fast_dividef @triton.jit def my_kernel(...): ... x = fast_dividef(...) ... ``` The `from triton.language.extra.cuda.libdevice import fast_dividef` line needs to be carried over together with the `my_kernel` function. The PR adds this. Test Plan: ``` $ python test/inductor/test_triton_kernels.py ... ---------------------------------------------------------------------- Ran 464 tests in 113.512s OK ``` Differential Revision: [D55953241](https://our.internmc.facebook.com/intern/diff/D55953241) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123703 Approved by: https://github.com/jansel, https://github.com/oulgen | diff --git a/test/inductor/test_triton_kernels.py b/test/inductor/test_triton_kernels.py
index 2b45920c42..162728b3a3 100644
--- a/test/inductor/test_triton_kernels.py
+++ b/test/inductor/test_triton_kernels.py
@@ -16,7 +16,7 @@ from torch._higher_order_ops.triton_kernel_wrap import (
from torch._inductor import metrics
from torch._inductor.utils import run_and_get_code
from torch.testing._internal import common_utils
-from torch.testing._internal.common_utils import skipIfRocm
+from torch.testing._internal.common_utils import skipIfRocm, TEST_WITH_ROCM
# Defines all the kernels for tests
from torch.testing._internal.triton_utils import * # noqa: F403
@@ -25,6 +25,12 @@ if HAS_CUDA:
import triton
from triton import language as tl
+ if not TEST_WITH_ROCM:
+ from triton.language.extra.cuda.libdevice import (
+ fast_dividef,
+ fast_dividef as my_fast_dividef,
+ )
+
# Define shared triton constants here.
CONSTANT_C = 4
@@ -1006,6 +1012,70 @@ def forward(self, x_1, output_1):
self.assertTrue("equal_to_1=(3,)" in sources[0])
self.assertEqual(compiled_out, eager_out)
+ @requires_cuda
+ @skipIfRocm
+ def test_triton_kernel_with_imported_symbol(self):
+ @triton.jit
+ def add_kernel_with_imported_symbol(
+ in_ptr,
+ out_ptr,
+ n_elements,
+ BLOCK_SIZE: "tl.constexpr",
+ ):
+ pid = tl.program_id(axis=0)
+ block_start = pid * BLOCK_SIZE
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
+ mask = offsets < n_elements
+ x = tl.load(in_ptr + offsets, mask=mask)
+ output = fast_dividef(x, 3.14)
+ tl.store(out_ptr + offsets, output, mask=mask)
+
+ def f(x):
+ out = torch.empty_like(x)
+ n_elements = x.numel()
+ add_kernel_with_imported_symbol[(n_elements,)](
+ x, out, n_elements, BLOCK_SIZE=16
+ )
+ return out
+
+ x = torch.randn(4, device="cuda")
+ eager_out = f(x)
+ compiled_out = torch.compile(f)(x)
+
+ self.assertEqual(compiled_out, eager_out)
+
+ @requires_cuda
+ @skipIfRocm
+ def test_triton_kernel_with_imported_symbol_with_custom_name(self):
+ @triton.jit
+ def add_kernel_with_imported_symbol(
+ in_ptr,
+ out_ptr,
+ n_elements,
+ BLOCK_SIZE: "tl.constexpr",
+ ):
+ pid = tl.program_id(axis=0)
+ block_start = pid * BLOCK_SIZE
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
+ mask = offsets < n_elements
+ x = tl.load(in_ptr + offsets, mask=mask)
+ output = my_fast_dividef(x, 3.14)
+ tl.store(out_ptr + offsets, output, mask=mask)
+
+ def f(x):
+ out = torch.empty_like(x)
+ n_elements = x.numel()
+ add_kernel_with_imported_symbol[(n_elements,)](
+ x, out, n_elements, BLOCK_SIZE=16
+ )
+ return out
+
+ x = torch.randn(4, device="cuda")
+ eager_out = f(x)
+ compiled_out = torch.compile(f)(x)
+
+ self.assertEqual(compiled_out, eager_out)
+
@requires_cuda
@skipIfRocm
@common_utils.parametrize("size", [4, 16])
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 3ffab72a19..a754f3522e 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -1,6 +1,7 @@
import collections
import contextlib
import dataclasses
+import dis
import functools
import inspect
import operator
@@ -1155,6 +1156,15 @@ class WrapperCodeGen(CodeGen):
symbols_included = {original_name}
def traverse(cur_kernel):
+ # here we extract the unqualified names (i.e., not attributes and
+ # without prepended module name) loaded in the kernel code, which
+ # are matched with the co_names and __globals__ below to codegen
+ # the respective imports necessary for the kernel compilation
+ unqualified_loads = {
+ inst.argval
+ for inst in dis.Bytecode(cur_kernel.fn)
+ if inst.opname == "LOAD_GLOBAL"
+ }
for symbol_name in cur_kernel.fn.__code__.co_names:
if symbol_name in symbols_included:
continue
@@ -1170,6 +1180,22 @@ class WrapperCodeGen(CodeGen):
compile_wrapper.newline()
compile_wrapper.writeline(f"{symbol_name} = {symbol!r}")
symbols_included.add(symbol_name)
+ elif (
+ symbol_name in unqualified_loads
+ and symbol_name != "tl" # already imported
+ and hasattr(symbol, "__module__")
+ # only codegen imports from triton; JITFunctions
+ # imported from other modules will be codegened
+ # in the separate branch above
+ and symbol.__module__.startswith("triton")
+ ):
+ # a global symbol imported from triton is referenced
+ # without module qualification (i.e., `store` instead
+ # of `tl.store`): need to codegen an import
+ compile_wrapper.writeline(
+ f"from {symbol.__module__} import {symbol.__name__} as {symbol_name}"
+ )
+ symbols_included.add(symbol_name)
traverse(kernel)
| 2.41.0 |
f6884f6209c44335b807079754fcea186b1b590 | Thu, 11 Apr 2024 12:09:50 -0700 | [PATCH 0048/1000] Added some extra repr to triton template buffers and added autotuned block configs to templated attention (#123813) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123813 Approved by: https://github.com/drisspg, https://github.com/shunting314 ghstack dependencies: #123768 | diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 64e8a97260..b374d6d77a 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -5,6 +5,7 @@ from collections import namedtuple
from typing import Callable
from unittest import expectedFailure, skipUnless
+from unittest.mock import patch
import torch
from torch._inductor.test_case import TestCase as InductorTestCase
@@ -168,6 +169,14 @@ class TestTemplatedSDPA(InductorTestCase):
with self.assertRaisesRegex(ValueError, "NYI: The target sequence length"):
_templated_attention(query, key, value, _identity_mod)
+ @supported_platform
+ @patch.object(torch._inductor.config, "max_autotune", True)
+ def test_max_autotune(self):
+ def score_mod(score, b, h, m, n):
+ return score * 2
+
+ self.run_test(score_mod)
+
common_utils.instantiate_parametrized_tests(TestTemplatedSDPA)
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index e783009a45..860b71c545 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3508,7 +3508,13 @@ class TemplateBuffer(Buffer):
class TritonTemplateBuffer(TemplateBuffer):
- pass
+ def __init__(self, layout, inputs, make_kernel_render, debug_extra=None):
+ super().__init__(layout, inputs, make_kernel_render)
+ self.debug_extra = debug_extra
+
+ def __str__(self):
+ out = f"TritonTemplateBuffer(layout={self.layout}, {self.debug_extra})"
+ return out
PrimitiveInfoType = Union[int, float, bool, str, List[Union[int, str, float, bool]]]
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index a137a97636..bdb7867acf 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -5737,17 +5737,23 @@ def templated_attention(*args, **kwargs):
choices: List[Any] = []
from .select_algorithm import autotune_select_algorithm
- sdpa_template.maybe_append_choice(
- choices=choices,
- input_nodes=(query, key, value),
- layout=layout,
- subgraphs=subgraph_buffer,
- num_stages=2,
- num_warps=4,
- BLOCK_M=64,
- BLOCK_N=128,
- BLOCK_DMODEL=query.get_size()[-1],
- )
+ for BLOCK_M, BLOCK_N, num_warps, num_stages in [
+ (128, 64, 4, 3),
+ (128, 128, 4, 3),
+ (128, 128, 8, 2),
+ (64, 128, 4, 3),
+ ]:
+ sdpa_template.maybe_append_choice(
+ choices=choices,
+ input_nodes=(query, key, value),
+ layout=layout,
+ subgraphs=subgraph_buffer,
+ num_stages=num_stages,
+ num_warps=num_warps,
+ BLOCK_M=BLOCK_M,
+ BLOCK_N=BLOCK_N,
+ BLOCK_DMODEL=query.get_size()[-1],
+ )
return autotune_select_algorithm(
"sdpa", choices, [query, key, value], layout
)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 03a8e63141..75deeaf5e3 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -760,6 +760,7 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase):
layout=self.layout,
inputs=self.input_nodes,
make_kernel_render=self.make_kernel_render,
+ debug_extra=self.debug_extra,
)
)
@@ -1094,6 +1095,8 @@ class AlgorithmSelectorCache(PersistentCache):
return result
def benchmark_in_current_process(choices):
+ from triton.runtime.autotuner import OutOfResources
+
timings = {}
for choice in choices:
try:
@@ -1113,6 +1116,9 @@ class AlgorithmSelectorCache(PersistentCache):
if "illegal memory access" in msg:
msg += "\n\nEither error in template or triton bug.\n"
raise ErrorFromChoice(msg, choice, debug_str()) # noqa: TRY200
+ except OutOfResources as e:
+ log.warning(e)
+ timing = float("inf")
except AssertionError as e:
raise AssertionError( # noqa: TRY200
f"Incorrect result from choice {choice}\n\n{e}" | 2.41.0 |
b8c81eb824c58820ad2b71b0c807280c6dd6c61 | Fri, 12 Apr 2024 00:05:45 +0000 | [PATCH 0049/1000] [PT] [FSDP] fix HSDP sharding placement (#123778) | Summary: https://github.com/pytorch/pytorch/pull/123230 formalized the contract for `ShardedTensor` sub group rank placement validation by making sure the placement rank is global rank, to align with general `torch.distributed` convention. The current HSDP allows for both `ShardedTensor` and `DTensor`. While `DTensor` will eventually will replace `ShardedTensor`, its usage still exists and there's at least one test verifying the state dict with ST output. This got broken as the test is run periodically only so it didn't block the other PR. Fixes [#123749](https://github.com/pytorch/pytorch/issues/123749) Test Plan: CI Differential Revision: D55991256 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123778 Approved by: https://github.com/Skylion007, https://github.com/wz337 | diff --git a/test/distributed/_shard/sharding_plan/test_sharding_plan.py b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
index b5ea29b020..0536163a18 100644
--- a/test/distributed/_shard/sharding_plan/test_sharding_plan.py
+++ b/test/distributed/_shard/sharding_plan/test_sharding_plan.py
@@ -141,15 +141,15 @@ class TestShardingPlan(ShardedTensorTestBase):
colwise_sharding_spec = ChunkShardingSpec(
dim=0,
placements=[
- "rank:0/cuda:2",
- "rank:1/cuda:3",
+ "rank:2/cuda:2",
+ "rank:3/cuda:3",
],
)
rowwise_sharding_spec = ChunkShardingSpec(
dim=1,
placements=[
- "rank:0/cuda:2",
- "rank:1/cuda:3",
+ "rank:2/cuda:2",
+ "rank:3/cuda:3",
],
)
sharding_plan = ShardingPlan(
diff --git a/torch/distributed/_shard/api.py b/torch/distributed/_shard/api.py
index 5f17237ab5..9afa7d9e79 100644
--- a/torch/distributed/_shard/api.py
+++ b/torch/distributed/_shard/api.py
@@ -66,7 +66,7 @@ def _shard_tensor(
f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index]
f'match with sharding_spec={entry[1]} on rank: {idx}')
- st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group)
+ st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=pg)
return st
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
index a96bc1c25f..2775dbd9dd 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
@@ -9,7 +9,7 @@ from torch.distributed._shard.sharded_tensor.utils import (
from torch.distributed._shard._utils import narrow_tensor
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
-from typing import List, Union, TYPE_CHECKING
+from typing import cast, List, Optional, Union, TYPE_CHECKING
from ._internals import (
get_chunked_dim_size,
get_split_size,
@@ -129,11 +129,15 @@ class ChunkShardingSpec(ShardingSpec):
pin_memory=tensor.is_pinned()
)
current_rank = dist.get_rank(process_group)
+ current_global_rank = dist.get_rank()
tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
local_shards = []
local_tensor = None
local_metadata = None
- tensors_to_scatter = [None] * dist.get_world_size(process_group)
+ tensors_to_scatter = cast(
+ List[Optional[torch.Tensor]],
+ [None] * dist.get_world_size(process_group),
+ )
sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
chunks = len(self.placements)
@@ -142,7 +146,7 @@ class ChunkShardingSpec(ShardingSpec):
scatter_shape[self.dim] = split_size # type: ignore[index]
for shard_meta in tensor_meta.shards_metadata:
- rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
+ remote_global_rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
if current_rank == src_rank:
# Reshape to get shard for this rank and we don't want autograd
# recording here for the narrow op and 'local_shard' should be a
@@ -157,9 +161,11 @@ class ChunkShardingSpec(ShardingSpec):
else:
tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
- tensors_to_scatter[rank] = tensor_to_scatter
+ tensors_to_scatter[
+ dist.get_group_rank(process_group, remote_global_rank)
+ ] = tensor_to_scatter
- if current_rank == rank:
+ if current_global_rank == remote_global_rank:
local_tensor = torch.empty(
scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
local_metadata = shard_meta
diff --git a/torch/distributed/fsdp/_shard_utils.py b/torch/distributed/fsdp/_shard_utils.py
index 38db819866..8af94b7820 100644
--- a/torch/distributed/fsdp/_shard_utils.py
+++ b/torch/distributed/fsdp/_shard_utils.py
@@ -57,7 +57,11 @@ def _create_chunk_sharded_tensor(
else device.type
)
placements = [
- _get_remote_device_str(r, device_type, num_devices_per_node)
+ _get_remote_device_str(
+ dist.get_global_rank(pg, r),
+ device_type,
+ num_devices_per_node,
+ )
for r in range(len(chunk_sizes))
]
assert len(chunk_sizes) == len(chunk_offsets) == len(placements) | 2.41.0 |
8824fd212d236e3a467d81245f53b5ff532c934 | Fri, 12 Apr 2024 00:07:42 +0000 | [PATCH 0050/1000] [inductor] Fix recompiles bug for torch.full (#123811) | Fixes #123810 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123811 Approved by: https://github.com/peterbell10 | diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index c3b0147fab..b7f80471cb 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -578,7 +578,7 @@ class TestInductorDynamic(TestCase):
actual = cfn(3)
self.assertEqual(expect, actual)
- def test_full(self, device):
+ def test_full_symbolic_value(self, device):
def fn(a):
return torch.full((3,), a), torch.full((3,), torch.sym_float(a))
@@ -587,6 +587,25 @@ class TestInductorDynamic(TestCase):
actual = cfn(5)
self.assertEqual(expect, actual)
+ def test_full_recompiles(self, device):
+ def fn(x):
+ _, L = x.shape
+ return torch.full((L, L), torch.finfo(torch.float16).min, device=device)
+
+ cfn = self.compile_fn(fn)
+
+ import functools
+
+ input_fn = functools.partial(torch.randint, 10, 1000, device=device)
+
+ cfn(input_fn((2, 3)))
+ cfn(input_fn((2, 4))) # expect don't recompile here
+
+ # check compiled times of frame 0
+ from torch._dynamo.convert_frame import FRAME_COMPILE_COUNTER
+
+ self.assertEqual(FRAME_COMPILE_COUNTER[0], 1)
+
@parametrize(
"op",
[
diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py
index 27b1289cd3..00640f62fd 100644
--- a/torch/_inductor/decomposition.py
+++ b/torch/_inductor/decomposition.py
@@ -134,7 +134,7 @@ def full(size, fill_value, **kwargs):
dtype = kwargs.get("dtype")
if dtype is None:
kwargs["dtype"] = type_to_dtype(type(fill_value))
- return aten.full(size, fill_value, **kwargs)
+ return torch.full(size, fill_value, **kwargs)
return NotImplemented
| 2.41.0 |
2ba180e552aa05b21d07ccceece0db41846267b | Thu, 11 Apr 2024 14:06:21 -0700 | [PATCH 0051/1000] [c10d] add more fields for periodic logging (#123860) | Summary: Added the names of the last enquened, started and completed colletives, in addition to their seq ID Test Plan: CI Pull Request resolved: https://github.com/pytorch/pytorch/pull/123860 Approved by: https://github.com/XilunWu | diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index def79cde2b..8f2d63656e 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1551,11 +1551,17 @@ void ProcessGroupNCCL::watchdogHandler() {
lastStatusUpdateTime, std::chrono::steady_clock::now()) >=
kWorkStatusUpdatePeriodMs) {
::c10d::C10dLoggingData data;
+ // logging integers
data.integers["pg_id"] = uid_;
data.integers["rank"] = rank_;
data.integers["global_rank"] = globalRank();
data.integers["last_enqueued_work"] = lastEnqueuedSeq_;
+ data.integers["last_started_work"] = lastStartedSeq_;
data.integers["last_completed_work"] = lastCompletedSeq_;
+ // logging strings
+ data.strings["last_enqueued_work_name"] = lastEnqueuedWorkName_;
+ data.strings["last_started_work_name"] = lastStartedWorkName_;
+ data.strings["last_completed_work_name"] = lastCompletedWorkName_;
logger->log(data);
lastStatusUpdateTime = std::chrono::steady_clock::now();
}
@@ -1647,9 +1653,19 @@ void ProcessGroupNCCL::watchdogHandler() {
}
}
+ // a work could be started but not completed, so we should not update
+ // lastStartedSeq_ and lastStartedOpName_ if the work state is checked
+ // multiple times after the start
+ if (lastStartedSeq_ < static_cast<int64_t>(work.seq_) &&
+ work.isStarted()) {
+ lastStartedSeq_ = work.seq_;
+ lastStartedWorkName_ = opTypeToString(work.opType_);
+ }
+
// Clean up completed work
if (work.isCompleted()) {
lastCompletedSeq_ = work.seq_;
+ lastCompletedWorkName_ = opTypeToString(work.opType_);
NCCLTraceBuffer::get()->retire_id(work.trace_id_, true);
if (onCompletionHook_) {
// Move Work object to completedWorkList_ to be consumed by the hook
@@ -2259,6 +2275,7 @@ void ProcessGroupNCCL::workEnqueue(
// get deadlock. Here we enqueue work without outputs_.
workMetaList_.emplace_back(*work);
lastEnqueuedSeq_ = work->seq_;
+ lastEnqueuedWorkName_ = opTypeToString(work->opType_);
lastWorkListUpdateTime_ = std::chrono::steady_clock::now();
}
}
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
index ea9007986c..18201db287 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
@@ -1072,11 +1072,23 @@ class TORCH_API ProcessGroupNCCL : public Backend {
// initialized to be -1 to indicate no collective has been enqueued
int64_t lastEnqueuedSeq_{-1};
+ // the name of the last collective enqueued into workMetaList_
+ std::string lastEnqueuedWorkName_;
+
+ // the sequential number of the last colletive started as the kernal
+ int64_t lastStartedSeq_{-1};
+
+ // the name of the last collective started as the kernal
+ std::string lastStartedWorkName_;
+
// the sequential number of the last colletive completed marked by
// the watchdog thread
// initialized to be -1 to indicate no collective has been completed
int64_t lastCompletedSeq_{-1};
+ // the name of the last collective completed
+ std::string lastCompletedWorkName_;
+
std::exception_ptr watchDogException_ = nullptr;
size_t uid_; | 2.41.0 |
fe672b146e776d18fcb794ca3cbb4e52d32ba8e | Thu, 11 Apr 2024 08:19:27 -0700 | [PATCH 0052/1000] compile: ban mutations on non-compositional uses of as_strided (#122502) | Fixes https://github.com/pytorch/pytorch/issues/104505 I was originally going to ban all usages of as_strided + mutation in functionalization. But I'm pretty sure that as_strided + mutation is fine when we are calling as_strided on a base tensor. So in this PR I added a slightly more conservative check: if we see an as_strided + mutation, where the input to an as_strided was **another** view op, then I error loudly in functionalization and link to the github issue above (in case anyone runs into this in the real world) Pull Request resolved: https://github.com/pytorch/pytorch/pull/122502 Approved by: https://github.com/ezyang, https://github.com/albanD | diff --git a/aten/src/ATen/FunctionalStorageImpl.cpp b/aten/src/ATen/FunctionalStorageImpl.cpp
index 22bba985a4..78a5b6a9cf 100644
--- a/aten/src/ATen/FunctionalStorageImpl.cpp
+++ b/aten/src/ATen/FunctionalStorageImpl.cpp
@@ -10,7 +10,7 @@ namespace at::functionalization {
ViewMeta ViewMeta::to_out_idx(int64_t out_idx) {
if (out_idx == this->out_index) return *this;
- return ViewMeta(forward_fn, reverse_fn, is_multi_output, out_idx);
+ return ViewMeta(forward_fn, reverse_fn, is_multi_output, is_as_strided, out_idx);
}
// Note [Functionalization: Alias Removal Part 2]
@@ -103,6 +103,18 @@ FunctionalStorageImpl::FunctionalStorageImpl(const Tensor& base)
void FunctionalStorageImpl::add_update(const Tensor& updated_val, const std::vector<ViewMeta>& metas) {
TORCH_CHECK(!frozen_, "cannot mutate tensors with frozen storage");
+
+ if (metas.size() > 1) {
+ for (size_t i = 1; i < metas.size(); ++i) {
+ // Skipping this check for XLA. Would be good to add it back, but it is failing XLA CI
+ TORCH_CHECK(updated_val.device().type() == c10::DeviceType::XLA || !metas[i].is_as_strided,
+"During torch.compile, encountered a mutation on a view chain of length ", metas.size(), ", where view ", i,
+" was an as_strided() call. as_strided() is non-compositional, and therefore is not possible to functionalize properly today,"
+"so this behavior is banned in compile. As a workaround, you can either remove the mutation from the model code, or you "
+"can insert a graph break right before the mutation with torch._dynamo.graph_break(). If you would like this behavior to "
+"work properly, please comment on https://github.com/pytorch/pytorch/issues/104505.");
+ }
+ }
updates_.push_back({updated_val, metas});
generation_++;
}
diff --git a/aten/src/ATen/FunctionalStorageImpl.h b/aten/src/ATen/FunctionalStorageImpl.h
index 7ec74c2b97..8d899fe016 100644
--- a/aten/src/ATen/FunctionalStorageImpl.h
+++ b/aten/src/ATen/FunctionalStorageImpl.h
@@ -32,11 +32,13 @@ struct ViewMeta {
std::function<Tensor(const Tensor&, int64_t)> forward,
std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse,
bool is_multi_output = false,
+ bool is_as_strided = false,
int64_t out_idx = 0)
: forward_fn(std::move(forward)),
reverse_fn(std::move(reverse)),
out_index(out_idx),
- is_multi_output(is_multi_output) {}
+ is_multi_output(is_multi_output),
+ is_as_strided(is_as_strided) {}
std::function<Tensor(const Tensor&, int64_t)> forward_fn;
std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse_fn;
@@ -46,6 +48,8 @@ struct ViewMeta {
// Tells us if this is a multi-output view
bool is_multi_output;
+ bool is_as_strided;
+
// Returns a copy of the current ViewMeta, if out_idx matches the current
// out_index. Otherwise, returns a new ViewMeta with the same forward/reverse
// functions, but a new out index.
diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index a27478dfed..e9693159f0 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -4414,6 +4414,39 @@ class ReproTests(torch._dynamo.test_case.TestCase):
T = IncByTwo
self.assertEqual(fn(x), opt_fn(x))
+ # https://github.com/pytorch/pytorch/issues/104505
+ def test_as_strided_on_base_with_mutation_works(self):
+ def foo(a):
+ f = a.as_strided((2,), (1,), 0)
+ f.add_(1.0)
+ return a
+
+ a = torch.randn(2, 4)
+ a_ref = a.clone()
+ out_ref = foo(a_ref)
+ f_compiled = torch.compile(foo, backend="aot_eager")
+ out = f_compiled(a)
+ self.assertEqual(out_ref, out)
+ self.assertEqual(a_ref, a)
+
+ # https://github.com/pytorch/pytorch/issues/104505
+ def test_as_strided_on_existing_view_banned(self):
+ def foo(a):
+ e = a.diagonal()
+ f = e.as_strided((2,), (1,), 0)
+ f.add_(1.0)
+ return a
+
+ a = torch.randn(2, 4)
+ a_ref = a.clone()
+ out_ref = foo(a_ref)
+ f_compiled = torch.compile(foo, backend="aot_eager")
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "encountered a mutation on a view chain of length 2, where view 1 was an as_strided",
+ ):
+ out = f_compiled(a)
+
def test_dont_aggressively_write_assert(self):
record_graph = torch._dynamo.testing.EagerAndRecordGraphs()
diff --git a/torch/csrc/autograd/FunctionsManual.cpp b/torch/csrc/autograd/FunctionsManual.cpp
index 9eaad59ab7..12342b4322 100644
--- a/torch/csrc/autograd/FunctionsManual.cpp
+++ b/torch/csrc/autograd/FunctionsManual.cpp
@@ -3237,12 +3237,11 @@ Tensor as_strided_scatter_backward(
// take the perf hit and contiguify grad for now.
auto grad_ = grad.contiguous();
auto grad_slice = grad_.as_strided_symint(sizes, strides, storage_offset);
- auto result =
- grad_.new_zeros_symint(input_geometry.sym_sizes())
- .as_strided_symint(
- input_geometry.sym_sizes(), input_geometry.sym_strides());
- auto result_slice =
- result.as_strided_symint(sizes, strides, std::move(storage_offset));
+ auto result_buffer = grad_.new_zeros_symint(input_geometry.sym_sizes());
+ auto result = result_buffer.as_strided_symint(
+ input_geometry.sym_sizes(), input_geometry.sym_strides());
+ auto result_slice = result_buffer.as_strided_symint(
+ sizes, strides, std::move(storage_offset));
result_slice.copy_(grad_slice);
return result;
}
diff --git a/torchgen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py
index 91ef97b821..191d939d2d 100644
--- a/torchgen/gen_functionalization_type.py
+++ b/torchgen/gen_functionalization_type.py
@@ -431,7 +431,8 @@ def emit_view_functionalization_body(
{reverse_lambda.decl()} {{
return {reverse_lambda.inner_call()}
}},
- /*is_multi_output=*/{str(is_multi_output_view).lower()}
+ /*is_multi_output=*/{str(is_multi_output_view).lower()},
+ /*is_as_strided=*/{str(str(f.func.name) == 'as_strided').lower()}
);
auto out = at::functionalization::impl::create_functional_tensor_with_view_meta(tmp_output, {view_tensor_name}, view_meta);
// See Note [Propagating strides in the functionalization pass] | 2.41.0 |
efaf54dc46034189cb36b345764a5a9a5b693d4 | Thu, 11 Apr 2024 08:19:28 -0700 | [PATCH 0054/1000] Fakeifying views shouldnt create symbols when dynamic=False (#123348) | Fixes https://github.com/pytorch/pytorch/issues/123298 I was also seeing some crashes in torchtrain due to dynamic shapes, even when I set `compile(dynamic=False)` (cc @voznesenskym @penguinwu @EikanWang @jgong5 @Guobing-Chen @XiaobingSuper @zhuhaozhe @blzheng @wenzhe-nrv @jiayisunx @chenyang78 @kadeng @chauhang @wanchaol). This doesn't fix the underlying dynamic shape issues with compile + DTensor, but it does prevent dynamic shapes from leaking in. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123348 Approved by: https://github.com/ezyang ghstack dependencies: #122502, #122751 | diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py
index 2bc7101c55..387b6bf59b 100644
--- a/test/dynamo/test_subclasses.py
+++ b/test/dynamo/test_subclasses.py
@@ -1456,6 +1456,24 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
for nt_view in self._get_views():
self._input_view_test(nt_view)
+ def test_subclass_gives_static_shapes_when_dynamic_false(self):
+ def check_graph(gm, *args):
+ first_node_example_val = next(iter(gm.graph.nodes)).meta["example_value"]
+ # We compiled with dynamic=False, expect no SymInt sizes on our placeholders
+ self.assertTrue(
+ all(isinstance(x, int) for x in first_node_example_val.shape)
+ )
+ return gm
+
+ @torch.compile(backend=check_graph, dynamic=False)
+ def f(x):
+ return x + 1
+
+ x_inner = torch.ones(4)
+ x = TwoTensor(x_inner, x_inner)
+ x_view = x.view(2, 2)
+ out = f(x_view)
+
# NJT1 -> Dense -> NJT2 -> Dense view
# During view replay, the Dense -> NJT2 part will construct an intermediate,
# symbolically-sized NJT that is immediately deconstructed to return the final dense
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_aliases_and_none_require_gradients b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_aliases_and_none_require_gradients
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/torch/_subclasses/meta_utils.py b/torch/_subclasses/meta_utils.py
index 7cd3a33f9e..8c70103d87 100644
--- a/torch/_subclasses/meta_utils.py
+++ b/torch/_subclasses/meta_utils.py
@@ -764,10 +764,24 @@ class MetaConverter:
return base.as_strided(sizes, strides, storage_offset)
from torch._dynamo.source import EphemeralSource
- from torch.fx.experimental.symbolic_shapes import sym_eq
+ from torch.fx.experimental.symbolic_shapes import (
+ StatelessSymbolicContext,
+ sym_eq,
+ )
def symint_visitor_fn(s):
- if shape_env is None:
+ nonlocal symbolic_context
+ from torch.fx.experimental.symbolic_shapes import DimDynamic
+
+ all_static_sizes = (
+ symbolic_context is not None
+ and isinstance(symbolic_context, StatelessSymbolicContext)
+ and all(
+ x is DimDynamic.STATIC for x in symbolic_context.dynamic_sizes
+ )
+ )
+ # Can't just rely on shape env being None - dynamo always initializes it
+ if all_static_sizes or shape_env is None:
return s
# NB: The symbol here is expected to be simplified out because we a priori | 2.41.0 |
b648afba4901521ccff124ebad931f8dd7aa13a | Fri, 12 Apr 2024 01:21:50 +0000 | [PATCH 0055/1000] Enable UFMT on test/test_multiprocessing (#123840) | part of https://github.com/pytorch/pytorch/issues/123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123840 Approved by: https://github.com/ezyang | diff --git a/.lintrunner.toml b/.lintrunner.toml
index d492ed12e7..6a4472073f 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1375,7 +1375,6 @@ exclude_patterns = [
'test/test_modules.py',
'test/test_monitor.py',
'test/test_mps.py',
- 'test/test_multiprocessing.py',
'test/test_multiprocessing_spawn.py',
'test/test_namedtensor.py',
'test/test_namedtuple_return_api.py',
diff --git a/test/test_multiprocessing.py b/test/test_multiprocessing.py
index 44aa012c43..b45c4cded0 100644
--- a/test/test_multiprocessing.py
+++ b/test/test_multiprocessing.py
@@ -1,12 +1,12 @@
# Owner(s): ["module: multiprocessing"]
import contextlib
+import copy
import gc
import os
import sys
import time
import unittest
-import copy
from sys import platform
import torch
@@ -14,9 +14,19 @@ import torch.cuda
import torch.multiprocessing as mp
import torch.utils.hooks
from torch.nn import Parameter
-from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN,
- load_tests, slowTest, TEST_WITH_TSAN, TEST_WITH_TORCHDYNAMO,
- TEST_WITH_ROCM, IS_MACOS)
+from torch.testing._internal.common_utils import (
+ IS_MACOS,
+ IS_WINDOWS,
+ load_tests,
+ NO_MULTIPROCESSING_SPAWN,
+ run_tests,
+ slowTest,
+ TEST_WITH_ASAN,
+ TEST_WITH_ROCM,
+ TEST_WITH_TORCHDYNAMO,
+ TEST_WITH_TSAN,
+ TestCase,
+)
# load_tests from common_utils is used to automatically filter tests for
@@ -24,16 +34,18 @@ from torch.testing._internal.common_utils import (TestCase, run_tests, IS_WINDOW
load_tests = load_tests
TEST_REPEATS = 30
-HAS_SHM_FILES = os.path.isdir('/dev/shm')
+HAS_SHM_FILES = os.path.isdir("/dev/shm")
MAX_WAITING_TIME_IN_SECONDS = 30
-TEST_CUDA_IPC = torch.cuda.is_available() and \
- sys.platform != 'darwin' and \
- sys.platform != 'win32' and \
- not TEST_WITH_ROCM # https://github.com/pytorch/pytorch/issues/90940
+TEST_CUDA_IPC = (
+ torch.cuda.is_available()
+ and sys.platform != "darwin"
+ and sys.platform != "win32"
+ and not TEST_WITH_ROCM
+) # https://github.com/pytorch/pytorch/issues/90940
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
if TEST_CUDA_IPC:
- torch.cuda.memory._set_allocator_settings('expandable_segments:False')
+ torch.cuda.memory._set_allocator_settings("expandable_segments:False")
class SubProcess(mp.Process):
@@ -50,7 +62,7 @@ def _test_cuda_ipc_deadlock_actor(queue, iterations):
for i in range(iterations):
if not queue.empty():
queue.get()
- time.sleep(.01)
+ time.sleep(0.01)
def _test_cuda_ipc_deadlock_learner(queue, iterations):
@@ -58,7 +70,7 @@ def _test_cuda_ipc_deadlock_learner(queue, iterations):
for i in range(iterations):
if not queue.full():
queue.put(copy.deepcopy(net.state_dict()))
- time.sleep(.01)
+ time.sleep(0.01)
def simple_fill(queue, event):
@@ -107,8 +119,14 @@ def sum_tensors(inq, outq):
with torch.cuda.device(1):
tensors = inq.get()
for tensor in tensors:
- outq.put((tensor.sum().item(), tensor.get_device(),
- tensor.numel(), tensor.storage().size()))
+ outq.put(
+ (
+ tensor.sum().item(),
+ tensor.get_device(),
+ tensor.numel(),
+ tensor.storage().size(),
+ )
+ )
def queue_get_exception(inqueue, outqueue):
@@ -118,7 +136,7 @@ def queue_get_exception(inqueue, outqueue):
except Exception as e:
outqueue.put(e)
else:
- outqueue.put('no exception')
+ outqueue.put("no exception")
# Multiply by two in a separate stream
@@ -148,7 +166,7 @@ def autograd_sharing(queue, ready, master_modified, device, is_parameter):
ready.set()
master_modified.wait()
- expected_var = torch.arange(1., 26, device=device).view(5, 5)
+ expected_var = torch.arange(1.0, 26, device=device).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5, device=device)
@@ -174,14 +192,16 @@ def mixed_type_producer(queue, event):
event.wait()
event.clear()
+
def simple_autograd_function(a=1):
torch.rand(3).requires_grad_(True).mean().backward()
- return a ** 2
+ return a**2
+
@contextlib.contextmanager
def fs_sharing():
prev_strategy = mp.get_sharing_strategy()
- mp.set_sharing_strategy('file_system')
+ mp.set_sharing_strategy("file_system")
try:
yield
finally:
@@ -189,7 +209,6 @@ def fs_sharing():
class leak_checker:
-
def __init__(self, test_case):
self.checked_pids = [os.getpid()]
self.test_case = test_case
@@ -228,7 +247,7 @@ class leak_checker:
return False
result = self._has_shm_files()
- if not result or mp.get_sharing_strategy() != 'file_system' or not wait:
+ if not result or mp.get_sharing_strategy() != "file_system" or not wait:
return result
total_waiting_time = 0
@@ -243,23 +262,25 @@ class leak_checker:
def _has_shm_files(self):
gc.collect()
- names = ['torch_' + str(pid) for pid in self.checked_pids]
- for filename in os.listdir('/dev/shm'):
+ names = ["torch_" + str(pid) for pid in self.checked_pids]
+ for filename in os.listdir("/dev/shm"):
for name in names:
if filename.startswith(name):
return True
return False
[email protected](TEST_WITH_TSAN, "TSAN is not fork-safe since we're forking in a multi-threaded environment")
[email protected](
+ TEST_WITH_TSAN,
+ "TSAN is not fork-safe since we're forking in a multi-threaded environment",
+)
class TestMultiprocessing(TestCase):
-
def tearDown(self):
# This will keep tests isolated from each-other
if torch.cuda.is_available():
torch.cuda.ipc_collect()
- def _test_sharing(self, ctx=mp, device='cpu', dtype=torch.float, repeat=1):
+ def _test_sharing(self, ctx=mp, device="cpu", dtype=torch.float, repeat=1):
def test_fill():
x = torch.zeros(5, 5).to(device, dtype)
q = ctx.Queue()
@@ -360,24 +381,36 @@ class TestMultiprocessing(TestCase):
for _ in range(repeat):
do_test()
- @unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
- @unittest.skipIf(TEST_WITH_ASAN,
- "seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
+ @unittest.skipIf(
+ platform == "darwin", "file descriptor strategy is not supported on macOS"
+ )
+ @unittest.skipIf(
+ TEST_WITH_ASAN,
+ "seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326",
+ )
def test_fd_sharing(self):
self._test_sharing(repeat=TEST_REPEATS)
- @unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
+ @unittest.skipIf(
+ platform == "darwin", "file descriptor strategy is not supported on macOS"
+ )
def test_fd_preserve_sharing(self):
self._test_preserve_sharing(repeat=TEST_REPEATS)
- @unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
+ @unittest.skipIf(
+ platform == "darwin", "file descriptor strategy is not supported on macOS"
+ )
def test_fd_pool(self):
self._test_pool(repeat=TEST_REPEATS)
- @unittest.skipIf(TEST_WITH_ASAN,
- "seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326")
- @unittest.skipIf(TEST_WITH_TORCHDYNAMO,
- "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467")
+ @unittest.skipIf(
+ TEST_WITH_ASAN,
+ "seems to hang with ASAN, see https://github.com/pytorch/pytorch/issues/5326",
+ )
+ @unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO,
+ "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467",
+ )
def test_fs_sharing(self):
with fs_sharing():
# The test works but is very slow on MacOS, see https://github.com/pytorch/pytorch/pull/93183,
@@ -385,21 +418,27 @@ class TestMultiprocessing(TestCase):
repeat = 1 if IS_MACOS else TEST_REPEATS
self._test_sharing(repeat=repeat)
- @unittest.skipIf(TEST_WITH_TORCHDYNAMO,
- "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467")
+ @unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO,
+ "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467",
+ )
def test_fs_preserve_sharing(self):
with fs_sharing():
self._test_preserve_sharing(repeat=TEST_REPEATS)
- @unittest.skipIf(TEST_WITH_TORCHDYNAMO,
- "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467")
+ @unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO,
+ "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467",
+ )
def test_fs_pool(self):
with fs_sharing():
self._test_pool(repeat=TEST_REPEATS)
@unittest.skipIf(not HAS_SHM_FILES, "don't not how to check if shm files exist")
- @unittest.skipIf(TEST_WITH_TORCHDYNAMO,
- "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467")
+ @unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO,
+ "Fail to clean up temporary /dev/shm/torch_* file, see https://github.com/pytorch/pytorch/issues/91467",
+ )
def test_fs(self):
def queue_put():
x = torch.DoubleStorage(4)
@@ -426,39 +465,53 @@ class TestMultiprocessing(TestCase):
@unittest.skipIf(IS_WINDOWS, "Test needs to use fork multiprocessing")
def test_autograd_errors(self):
- ctx = mp.get_context('fork')
+ ctx = mp.get_context("fork")
simple_autograd_function()
# Autograd only uses thread when GPUs are involved
- if torch.cuda.is_available() or torch.backends.mps.is_available() or torch.xpu.is_available():
- with self.assertRaisesRegex(RuntimeError, r'Unable to handle autograd'):
+ if (
+ torch.cuda.is_available()
+ or torch.backends.mps.is_available()
+ or torch.xpu.is_available()
+ ):
+ with self.assertRaisesRegex(RuntimeError, r"Unable to handle autograd"):
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
else:
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Test needs to use spawn multiprocessing")
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN, "Test needs to use spawn multiprocessing"
+ )
def test_autograd_fine_with_spawn(self):
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
simple_autograd_function()
with ctx.Pool(3) as pool:
pool.map(simple_autograd_function, [1, 2, 3])
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_simple(self):
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
- self._test_sharing(mp.get_context('spawn'), 'cuda', torch.float)
-
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ self._test_sharing(mp.get_context("spawn"), "cuda", torch.float)
+
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_memory_allocation(self):
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
q = ctx.Queue()
e = ctx.Event()
- p = ctx.Process(target=send_and_delete_tensors, args=(q, e, 'cuda', torch.int, 5))
+ p = ctx.Process(
+ target=send_and_delete_tensors, args=(q, e, "cuda", torch.int, 5)
+ )
p.start()
t = []
for _ in range(5):
@@ -468,15 +521,19 @@ class TestMultiprocessing(TestCase):
e.set()
p.join(1)
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_ipc_deadlock(self):
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
queue = ctx.Queue(1)
processes = dict(
a=ctx.Process(target=_test_cuda_ipc_deadlock_actor, args=(queue, 100)),
- l=ctx.Process(target=_test_cuda_ipc_deadlock_learner, args=(queue, 100)))
+ l=ctx.Process(target=_test_cuda_ipc_deadlock_learner, args=(queue, 100)),
+ )
for p in processes.values():
p.start()
@@ -487,22 +544,30 @@ class TestMultiprocessing(TestCase):
for p in processes.values():
self.assertFalse(p.is_alive())
-
@slowTest
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_send_many(self, name=None, size=5, count=100000):
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
q1 = ctx.Queue()
q2 = ctx.Queue()
q3 = ctx.Queue()
e1 = ctx.Event()
e2 = ctx.Event()
e3 = ctx.Event()
- p1 = ctx.Process(target=send_and_delete_tensors, args=(q1, e1, 'cuda', torch.long, count, size))
+ p1 = ctx.Process(
+ target=send_and_delete_tensors,
+ args=(q1, e1, "cuda", torch.long, count, size),
+ )
p2 = ctx.Process(target=receive_and_send, args=(q1, q2, e2, count))
- p3 = ctx.Process(target=receive_and_send_sum, args=(q2, q3, e3, 'cuda', torch.long, count, size))
+ p3 = ctx.Process(
+ target=receive_and_send_sum,
+ args=(q2, q3, e3, "cuda", torch.long, count, size),
+ )
p1.start()
p2.start()
p3.start()
@@ -516,18 +581,21 @@ class TestMultiprocessing(TestCase):
p2.join(1)
p3.join(1)
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
- @unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
+ @unittest.skipIf(not TEST_MULTIGPU, "found only 1 GPU")
def test_cuda_small_tensors(self):
# Check multiple small tensors which will likely use the same
# underlying cached allocation
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
tensors = []
for i in range(5):
device = i % 2
- tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]
+ tensors += [torch.arange(i * 5.0, (i + 1) * 5).cuda(device)]
inq = ctx.Queue()
outq = ctx.Queue()
@@ -542,7 +610,7 @@ class TestMultiprocessing(TestCase):
for i, _tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
- self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
+ self.assertEqual(v, torch.arange(i * 5.0, (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
@@ -563,8 +631,8 @@ class TestMultiprocessing(TestCase):
# memory 'file' for performance reason
torch.cuda.ipc_collect()
- @unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
- @unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
+ @unittest.skipIf(IS_WINDOWS, "not applicable to Windows (only fails with fork)")
+ @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_cuda_bad_call(self):
# Initialize CUDA
t = torch.zeros(5, 5).cuda().cpu()
@@ -576,10 +644,11 @@ class TestMultiprocessing(TestCase):
p.join()
self.assertIsInstance(outq.get(), RuntimeError)
- @unittest.skipIf(IS_WINDOWS, 'not applicable to Windows (only fails with fork)')
- @unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
+ @unittest.skipIf(IS_WINDOWS, "not applicable to Windows (only fails with fork)")
+ @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_wrong_cuda_fork(self):
- stderr = TestCase.runWithPytorchAPIUsageStderr("""\
+ stderr = TestCase.runWithPytorchAPIUsageStderr(
+ """\
import torch
from torch.multiprocessing import Process
def run(rank):
@@ -595,14 +664,18 @@ if __name__ == "__main__":
processes.append(p)
for p in processes:
p.join()
-""")
+"""
+ )
self.assertRegex(stderr, "Cannot re-initialize CUDA in forked subprocess.")
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_event(self):
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
queue = ctx.Queue()
ready = ctx.Event()
done = ctx.Event()
@@ -631,19 +704,23 @@ if __name__ == "__main__":
event.synchronize()
c2p.put(1) # notify parent synchronization is done
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_event_multiprocess(self):
event = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(event.query())
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_multiprocess_child,
- args=(event, p2c, c2p))
+ args=(event, p2c, c2p),
+ )
p.start()
c2p.get() # wait for until child process is ready
@@ -656,13 +733,16 @@ if __name__ == "__main__":
self.assertTrue(event.query())
p.join()
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
- @unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
+ @unittest.skipIf(not TEST_MULTIGPU, "found only 1 GPU")
def test_event_handle_multi_gpu(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
@@ -689,19 +769,23 @@ if __name__ == "__main__":
c2p.put(1) # notify synchronization is done in child
p2c.get() # wait for parent to finish before destructing child event
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_event_handle_importer(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
self.assertTrue(e0.query())
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_importer_consumer,
- args=(e0.ipc_handle(), p2c, c2p))
+ args=(e0.ipc_handle(), p2c, c2p),
+ )
p.start()
c2p.get() # wait for child to become ready
@@ -719,8 +803,7 @@ if __name__ == "__main__":
def _test_event_handle_exporter_consumer(handle, p2c, c2p):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
- e1 = torch.cuda.Event.from_ipc_handle(
- torch.cuda.current_device(), handle)
+ e1 = torch.cuda.Event.from_ipc_handle(torch.cuda.current_device(), handle)
torch.cuda._sleep(50000000) # spin for about 50 ms
e1.record()
c2p.put(0)
@@ -728,18 +811,22 @@ if __name__ == "__main__":
# destructing e1
p2c.get()
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_event_handle_exporter(self):
e0 = torch.cuda.Event(enable_timing=False, interprocess=True)
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
p2c = ctx.SimpleQueue()
c2p = ctx.SimpleQueue()
p = ctx.Process(
target=TestMultiprocessing._test_event_handle_exporter_consumer,
- args=(e0.ipc_handle(), p2c, c2p))
+ args=(e0.ipc_handle(), p2c, c2p),
+ )
p.start()
# wait for event in child process is recorded
c2p.get()
@@ -758,21 +845,24 @@ if __name__ == "__main__":
self.assertEqual(out, empty)
def test_empty_tensor_sharing(self):
- self._test_empty_tensor_sharing(torch.float32, torch.device('cpu'))
- self._test_empty_tensor_sharing(torch.int64, torch.device('cpu'))
+ self._test_empty_tensor_sharing(torch.float32, torch.device("cpu"))
+ self._test_empty_tensor_sharing(torch.int64, torch.device("cpu"))
- @unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
+ @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_empty_tensor_sharing_cuda(self):
- self._test_empty_tensor_sharing(torch.float32, torch.device('cuda'))
- self._test_empty_tensor_sharing(torch.int64, torch.device('cuda'))
+ self._test_empty_tensor_sharing(torch.float32, torch.device("cuda"))
+ self._test_empty_tensor_sharing(torch.int64, torch.device("cuda"))
def _test_autograd_sharing(self, var, ctx=mp, is_parameter=False):
- device = 'cuda' if var.is_cuda else 'cpu'
+ device = "cuda" if var.is_cuda else "cpu"
ready = ctx.Event()
master_modified = ctx.Event()
queue = ctx.Queue()
- p = ctx.Process(target=autograd_sharing, args=(queue, ready, master_modified, device, is_parameter))
+ p = ctx.Process(
+ target=autograd_sharing,
+ args=(queue, ready, master_modified, device, is_parameter),
+ )
p.daemon = True
p.start()
@@ -823,28 +913,35 @@ if __name__ == "__main__":
time.sleep(5)
p.join()
- @unittest.skipIf(TEST_WITH_ASAN,
- "non-deterministically hangs with ASAN https://github.com/pytorch/pytorch/issues/94024")
+ @unittest.skipIf(
+ TEST_WITH_ASAN,
+ "non-deterministically hangs with ASAN https://github.com/pytorch/pytorch/issues/94024",
+ )
def test_variable_sharing(self):
for requires_grad in [True, False]:
- var = torch.arange(1., 26).view(5, 5).requires_grad_(requires_grad)
+ var = torch.arange(1.0, 26).view(5, 5).requires_grad_(requires_grad)
self._test_autograd_sharing(var)
# See https://github.com/pytorch/pytorch/issues/14997
- @unittest.skipIf(TEST_WITH_ASAN,
- "non-deterministically hangs with ASAN")
+ @unittest.skipIf(TEST_WITH_ASAN, "non-deterministically hangs with ASAN")
def test_leaf_variable_sharing(self):
- devices = ['cpu']
+ devices = ["cpu"]
if torch.cuda.is_available() and not NO_MULTIPROCESSING_SPAWN and TEST_CUDA_IPC:
- devices.append('cuda')
+ devices.append("cuda")
for device in devices:
for requires_grad in [True, False]:
- var = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(requires_grad)
+ var = (
+ torch.arange(1.0, 26, device=device)
+ .view(5, 5)
+ .requires_grad_(requires_grad)
+ )
self.assertTrue(var.is_leaf)
- ctx = mp.get_context('spawn') if device == 'cuda' else mp
+ ctx = mp.get_context("spawn") if device == "cuda" else mp
ready = ctx.Event()
queue = ctx.Queue()
- p = ctx.Process(target=requires_grad_variable_sharing, args=(queue, ready))
+ p = ctx.Process(
+ target=requires_grad_variable_sharing, args=(queue, ready)
+ )
p.daemon = True
p.start()
queue.put(var)
@@ -853,65 +950,86 @@ if __name__ == "__main__":
self.assertTrue(worker_requires_grad == requires_grad)
def test_non_leaf_variable_sharing(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
+ devices = ["cpu"] if not torch.cuda.is_available() else ["cpu", "cuda"]
for device in devices:
- var0 = torch.arange(1., 26, device=device).view(5, 5).requires_grad_(True)
+ var0 = torch.arange(1.0, 26, device=device).view(5, 5).requires_grad_(True)
var = var0 * 2
# Don't use a regular Queue; it uses a background thread (which
# means we can't catch the exceptions)
queue = mp.SimpleQueue()
- self.assertRaisesRegex(RuntimeError, r'requires_grad', lambda: queue.put(var))
-
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ self.assertRaisesRegex(
+ RuntimeError, r"requires_grad", lambda: queue.put(var)
+ )
+
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_variable_sharing(self):
for requires_grad in [True, False]:
- var = torch.arange(1., 26, device='cuda').view(5, 5).requires_grad_(requires_grad)
- self._test_autograd_sharing(var, mp.get_context('spawn'))
-
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ var = (
+ torch.arange(1.0, 26, device="cuda")
+ .view(5, 5)
+ .requires_grad_(requires_grad)
+ )
+ self._test_autograd_sharing(var, mp.get_context("spawn"))
+
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_mixed_types_cuda_sharing(self):
- self._test_mixed_types_cuda_sharing(mp.get_context('spawn'))
+ self._test_mixed_types_cuda_sharing(mp.get_context("spawn"))
def test_parameter_sharing(self):
- param = Parameter(torch.arange(1., 26).view(5, 5))
+ param = Parameter(torch.arange(1.0, 26).view(5, 5))
self._test_autograd_sharing(param, is_parameter=True)
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_cuda_parameter_sharing(self):
- param = Parameter(torch.arange(1., 26, device='cuda').view(5, 5))
- self._test_autograd_sharing(param, mp.get_context('spawn'), is_parameter=True)
-
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
+ param = Parameter(torch.arange(1.0, 26, device="cuda").view(5, 5))
+ self._test_autograd_sharing(param, mp.get_context("spawn"), is_parameter=True)
+
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
def test_integer_parameter_serialization_cpu(self):
- self._test_integer_parameter_serialization(device='cpu')
-
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
- @unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
+ self._test_integer_parameter_serialization(device="cpu")
+
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
+ @unittest.skipIf(not TEST_CUDA_IPC, "CUDA IPC not available")
def test_integer_parameter_serialization_cuda(self):
- self._test_integer_parameter_serialization(device='cuda')
+ self._test_integer_parameter_serialization(device="cuda")
def _test_integer_parameter_serialization(self, device):
param = torch.nn.Parameter(
- torch.tensor(0, dtype=torch.int64, device=device),
- requires_grad=False
+ torch.tensor(0, dtype=torch.int64, device=device), requires_grad=False
)
- ctx = mp.get_context('spawn')
+ ctx = mp.get_context("spawn")
p = ctx.Process(target=integer_parameter_serialization, args=(param,))
p.start()
p.join()
self.assertEqual(
- 0, p.exitcode,
- msg=f'Failed to serialize successfully for "{device}" device!'
+ 0,
+ p.exitcode,
+ msg=f'Failed to serialize successfully for "{device}" device!',
)
def test_empty_shared(self):
@@ -924,7 +1042,9 @@ if __name__ == "__main__":
t.share_memory_()
self.assertTrue(t.is_shared())
- @unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on macOS")
+ @unittest.skipIf(
+ platform == "darwin", "file descriptor strategy is not supported on macOS"
+ )
def test_is_shared(self):
self._test_is_shared()
@@ -932,11 +1052,11 @@ if __name__ == "__main__":
with fs_sharing():
self._test_is_shared()
- @unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
+ @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_is_shared_cuda(self):
t = torch.randn(5, 5).cuda()
self.assertTrue(t.is_shared())
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests() | 2.41.0 |
68e5ced5df34f1aef3703654f76e03f5126b534 | Tue, 9 Apr 2024 15:40:30 -0700 | [PATCH 0057/1000] [Dispatcher] Collect autograd sequence numbers on PythonTLSSnapshot dispatch keys (#123304) | Fixes #121758 **TL;DR**: When profiling is turned on, the dispatcher will sometimes attach the autograd sequence number to the recorded profiler event. This PR expands the set of profiler events onto which we attach sequence numbers. Before, we'd only attach a sequence number if the current dispatch key was an Autograd dispatch key. Now, we attach a sequence number if the current dispatch key **set** contains Autograd. **Context**: The use case for this is torch.profiler for python subclasses. Autograd attaches a "sequence number" to all ops that it encounters during the forward pass. Then, the corresponding sequence number can be associated with a backward kernel when backward is executed. This is used by the profiler to associate the forward ops to the backward ops; a forward op and a backward op with the same sequence number are "linked" in some post-processing step. Prior to this PR, this profiler feature didn't work for python subclasses. The reason is that we don't collect profiler information for all the dispatches for a given kernel; we only dispatch the initial `call`, and not the subsequent `redispatch` invocations. Normally, an Autograd key (if we're running with autograd) is the highest dispatch key, so the initial `call` that we profile is an Autograd key, and we collect the sequence number. But when we're dealing with a python subclass, the first dispatch key is PythonTLSSnapshot, which eventually redispatches into Autograd. We don't record the Autograd sequence number in that case because we don't record redispatches. To fix this, this PR collects a sequence number whenever the dispatch key **set** contains an Autograd key. That means we might sometimes collect multiple events with the same sequence number, or possibly attach sequence numbers when we won't actually use them? (e.g. maybe if the initial dispatch key handler removes Autograd for some reason). Although this might be a bit confusing for users looking directly at the sequence_nr directly, I think the main use case is for the profiler to create fwd-bwd links; and those should still be generated correctly in these cases. Differential Revision: [D55724190](https://our.internmc.facebook.com/intern/diff/D55724190) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123304 Approved by: https://github.com/soulitzer | diff --git a/aten/src/ATen/core/dispatch/Dispatcher.cpp b/aten/src/ATen/core/dispatch/Dispatcher.cpp
index cd96c5825c..a355bbe92f 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.cpp
+++ b/aten/src/ATen/core/dispatch/Dispatcher.cpp
@@ -498,24 +498,38 @@ std::vector<OperatorName> Dispatcher::getRegistrationsForDispatchKey(c10::option
});
}
-int64_t Dispatcher::sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey) {
+int64_t Dispatcher::sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet) {
int64_t seq_num = -1;
// Setting sequence number in the Autograd case to associate
// the forward range with the corresponding Autograd's node
- if (isIncludedInAlias(dispatchKey, DispatchKey::Autograd) && at::GradMode::is_enabled()) {
+
+ // Note: this records a sequence number for both Autograd keys, and for
+ // non-Autograd keys where the dispatchKeySet still contains an autograd key.
+ // This means that we might collect the same sequence nubmer two different
+ // events if they all occurred above Autograd and still had the Autograd
+ // dispatch key in the dispatch key set.
+ // However, this usually doesn't happen: normally the first call will
+ // go through the call() or callBoxed() path in the dispatcher, while
+ // subsequent redispatches go through redispatch() or redispatchBoxed().
+ // `call` has profiler instrumentation, whereas `redispatch` doesn't.
+ // So usually, we'll collect a sequence number on the first call() if the
+ // dispatch keys contain autograd, and not on subsequent redispatches.
+ bool dispatchHasAutograd = !(dispatchKeySet & autograd_dispatch_keyset).empty();
+
+ if (dispatchHasAutograd && at::GradMode::is_enabled()) {
seq_num = at::sequence_number::peek();
}
return seq_num;
}
-void Dispatcher::runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef<const c10::IValue> args) {
- guard.before(schema_ref, args, sequenceNumberForRunningRecordFunction(dispatchKey));
+void Dispatcher::runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef<const c10::IValue> args) {
+ guard.before(schema_ref, args, sequenceNumberForRunningRecordFunction(dispatchKey, dispatchKeySet));
}
-void Dispatcher::runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey) {
+void Dispatcher::runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet) {
// Setting sequence number in the Autograd case to associate
// the forward range with the corresponding Autograd's node
- guard.before(schema_ref, sequenceNumberForRunningRecordFunction(dispatchKey));
+ guard.before(schema_ref, sequenceNumberForRunningRecordFunction(dispatchKey, dispatchKeySet));
}
#ifdef FBCODE_CAFFE2
bool Dispatcher::profilingOperatorEvents() {
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h
index d383ee9556..020f9e8e6d 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.h
+++ b/aten/src/ATen/core/dispatch/Dispatcher.h
@@ -304,9 +304,9 @@ public:
private:
Dispatcher();
- static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey);
- static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey);
- static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef<const c10::IValue> args);
+ static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
+ static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef<const c10::IValue> args);
#ifdef FBCODE_CAFFE2
static bool profilingOperatorEvents();
@@ -630,15 +630,15 @@ inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
// I don't *think* we need std::launder here, because IValue has
// no subclasses and no const or reference fields.
- runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
for (size_t ii = 0; ii < num_boxed_args; ++ii) {
reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
}
} else {
- runRecordFunction(guard, schema_ref, dispatchKey);
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
}
} else {
- runRecordFunction(guard, schema_ref, dispatchKey);
+ runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
}
if (C10_UNLIKELY(guard.needsOutputs())) {
@@ -732,8 +732,8 @@ inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const
auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
auto& schema = op.schema();
auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
- guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
- : runRecordFunction(guard, schema_ref, dispatchKey);
+ guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
+ : runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
// keeping the guard alive while executing the kernel
kernel.callBoxed(op, dispatchKeySet, stack);
diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py
index 6a31b2501c..6b7f7aa7ca 100644
--- a/test/test_nestedtensor.py
+++ b/test/test_nestedtensor.py
@@ -3938,6 +3938,45 @@ class TestNestedTensorSubclass(TestCase):
nt_t_copy_dtype = torch.ops.aten._to_copy(nt_t, dtype=torch.float16)
self.assertEqual(torch.float16, nt_t_copy_dtype.dtype)
+ @skipIfTorchDynamo("Dynamo doesn't know how to trace prof.events()")
+ def test_profiler_sequence_nr(self):
+ with torch.profiler.profile() as prof:
+ values = torch.randn(4, 6, requires_grad=True)
+ offsets = torch.tensor([0, 2, 4])
+ values = values * 2
+ l = torch.nn.Linear(6, 8)
+ nt = torch.nested.nested_tensor_from_jagged(values, offsets)
+
+ nt = l(nt)
+ val = nt.values()
+
+ loss = val.sum()
+ loss.backward()
+
+ fwd_seq_nrs = []
+ for evt in prof.events():
+ if "linear" in evt.name.lower() and "backward" not in evt.name.lower() and evt.sequence_nr != -1:
+ fwd_seq_nrs.append(evt.sequence_nr)
+
+ bwd_seq_nrs = []
+ for evt in prof.events():
+ if (
+ "linear" in evt.name.lower() and
+ "backward" in evt.name.lower() and
+ "evaluate_function" not in evt.name.lower() and
+ evt.sequence_nr != -1
+ ):
+ bwd_seq_nrs.append(evt.sequence_nr)
+
+ # There should only be one such event with a sequence number:
+ # the PythonTLSSnapshot event - but, note that it's not terrible if
+ # we end up with multiple events with the same sequence number - so we
+ # could relax this check if it becomes inconvenient to maintain this
+ # property.
+ self.assertEqual(len(fwd_seq_nrs), 1)
+ self.assertEqual(len(bwd_seq_nrs), 1)
+ self.assertEqual(fwd_seq_nrs[0], bwd_seq_nrs[0])
+
def test_is_same_size(self, device):
def get_3_tensors():
return [torch.randn(i + 2, 3, 4, requires_grad=True, dtype=torch.float64, device=device) for i in range(3)] | 2.41.0 |
aec97a40364bb6ccfd968f28d309cfff8748d20 | Thu, 11 Apr 2024 13:10:09 -0700 | [PATCH 0058/1000] [sparse] Add fast semi-structured spasification kernels (#122350) | This PR adds in fast semi-structured sparsification kernels to PyTorch. These kernels allow for accelerated semi-structured sparsification kernels in PyTorch. The kernels have been added as aten native functions In particular, three new functions have been added: * `torch._sparse_semi_structured_tile` This function will return the packed representation and metadata for both X and X', as well as the thread masks. Note that this applies 2:4 sparsity in a 4x4 tile instead of a 1x4 strip as usual. * `torch._sparse_semi_structured_apply` This function takes in an input tensor and thread masks from the above function and returns a packed representation and metadata from applying thread masks to the input tensor. * `torch._sparse_semi_structured_apply_dense` This function does the same thing as above but instead of returning the tensor in the sparse representation it returns it in the dense representation The subclasses have also been updated to add a new `prune_dense_static_sort` classmethod to create sparse tensors with this format. I've added some additional documentatino on how to calculate the compressed tensors needed to create a SparseSemiStructuredTensor oneself. To this end, there are two new helper functions added: `sparse_semi_structured_tile` `compute_compressed_swizzled_bitmask` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122350 Approved by: https://github.com/cpuhrsch | diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 1953abbd31..04cd21b27d 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3342,6 +3342,18 @@
dispatch:
CUDA: _cslt_sparse_mm_search
+- func: _sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor)
+ dispatch:
+ CUDA: _sparse_semi_structured_tile
+
+- func: _sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor)
+ dispatch:
+ CUDA: _sparse_semi_structured_apply
+
+- func: _sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor
+ dispatch:
+ CUDA: _sparse_semi_structured_apply_dense
+
# DEPRECATED: Use torch.__sparse_semi_structured_mm/torch._sparse_semi_structured_addmm instead
- func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor
dispatch:
diff --git a/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h b/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h
new file mode 100644
index 0000000000..3d6b14224d
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h
@@ -0,0 +1,184 @@
+#pragma once
+
+#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h>
+#include <ATen/native/sparse/cuda/StaticSort.h>
+#include <cutlass/bfloat16.h>
+#include <cutlass/half.h>
+
+// Given 4x4 values, computes the selected indices that will remain after 2:4
+// sparsification, as a bitmask.
+// NOTE: Algorithms might select LESS than 8 values in total in some cases.
+
+namespace platform {
+template <>
+struct numeric_limits<cutlass::bfloat16_t> {
+ CUTLASS_HOST_DEVICE
+ static cutlass::bfloat16_t infinity() {
+ return cutlass::bfloat16_t::bitcast(0x7f80);
+ }
+};
+} // namespace platform
+
+namespace at::native{
+
+template <typename Element, typename Pointwise>
+struct TileValueOrderedT {
+ union {
+ struct {
+ Element value;
+ uint2b_t col;
+ uint2b_t row;
+ } parts;
+ uint32_t raw;
+ };
+ CUTLASS_DEVICE bool operator<(
+ TileValueOrderedT<Element, Pointwise> const& other) const {
+ return Pointwise::apply(parts.value) < Pointwise::apply(other.parts.value);
+ }
+ CUTLASS_DEVICE TileValueOrderedT() {}
+};
+
+// Operations that we can apply to rank the values
+struct IdentityOp {
+ template <typename T>
+ static T CUTLASS_HOST_DEVICE apply(T const& x) {
+ return x;
+ }
+};
+// Can be applied to rank based on absolute value
+struct AbsOp {
+ template <typename T>
+ static T CUTLASS_HOST_DEVICE apply(T const& x) {
+ return cutlass::abs(x);
+ }
+};
+
+// Given 4x4 values, computes the selected indices that will remain after 2:4
+// sparsification, as a bitmask. We have 2 constraints:
+// (1) At most 2 values per line
+// (2) At most 2 values per column
+// This means we can select at most 8 values in total.
+// ALGO: We use a greedy algorithm, where we take values in the 4x4
+// tile in descending order. If a value fits (because the line/col is not
+// already full), we select it. Then we move on to the next one.
+// NOTE: This algorithm might select LESS than 8 values in total in some cases.
+// NOTE (2): RF are not indexable, so we shouldn't rely on indexing
+// values at any point, otherwise they will be stored in local memory.
+template <typename Op = IdentityOp>
+struct LargestValuesGreedy {
+ template <typename T>
+ static CUTLASS_DEVICE T outOfBoundsFillValue() {
+ return -platform::numeric_limits<T>::infinity();
+ }
+
+ template <typename Tile4x4Accessor>
+ CUTLASS_DEVICE Indices4x4 operator()(Tile4x4Accessor values) {
+ using TileValueOrdered =
+ TileValueOrderedT<typename Tile4x4Accessor::Element, Op>;
+ using TileValuesFragment = cutlass::Array<TileValueOrdered, 4 * 4>;
+ Indices4x4 indices;
+ TileValuesFragment values_ordered;
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 4; ++i) {
+ CUTLASS_PRAGMA_UNROLL
+ for (int j = 0; j < 4; ++j) {
+ TileValueOrdered& v = values_ordered[i * 4 + j];
+ v.parts.value = values.at(i, j).get();
+ v.parts.col = j;
+ v.parts.row = i;
+ }
+ }
+ // Use a sorting network (aka without branches) to avoid
+ // warp divergence
+ StaticSort<TileValuesFragment::kElements> sorter;
+ sorter(values_ordered);
+
+ // bitmask to store how many we have selected on a given row/col
+ // 0 selected: (numPerRow >> 2*row) = 00 (0)
+ // 1 selected: (numPerRow >> 2*row) = 01 (1)
+ // 2 selected: (numPerRow >> 2*row) = 11 (3)
+ uint32_t numPerRow = 0;
+ uint32_t numPerCol = 0;
+ indices = 0;
+
+ // Take as many as we can, starting with the largest values
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = values_ordered.size() - 1; i >= 0; i--) {
+ auto& e = values_ordered[i];
+
+ uint32_t rcount = uint2b_t(numPerRow >> 2 * e.parts.row);
+ uint32_t ccount = uint2b_t(numPerCol >> 2 * e.parts.col);
+ // NOTE: This is more efficient (yet equivalent) to:
+ // `rcount != 3 && ccount != 3`
+ bool selected = (rcount + ccount) <= 2;
+ indices |= selected << (e.parts.col + 4 * e.parts.row);
+
+ numPerRow |= (rcount + selected) << 2 * e.parts.row;
+ numPerCol |= (ccount + selected) << 2 * e.parts.col;
+ }
+ return indices;
+ }
+};
+
+// We consider each rows independantly in order
+// This is to ensure that a row's sparsity pattern is only determined
+// by its values and the rows before (but never the rows after)
+// This enforces causality strictly
+template <typename Op = IdentityOp>
+struct Causal1122 {
+ template <typename T>
+ static CUTLASS_DEVICE T outOfBoundsFillValue() {
+ return -platform::numeric_limits<T>::infinity();
+ }
+
+ template <typename Tile4x4Accessor>
+ CUTLASS_DEVICE Indices4x4 operator()(Tile4x4Accessor values) {
+ static constexpr int kMaxValuesPerRow[] = {1, 1, 2, 2};
+ using TileValueOrdered =
+ TileValueOrderedT<typename Tile4x4Accessor::Element, Op>;
+ using TileValuesFragment = cutlass::Array<TileValueOrdered, 4>;
+ Indices4x4 indices = 0;
+
+ uint32_t numPerCol = 0; // <- see doc in `LargestValuesGreedy`
+
+ CUTLASS_PRAGMA_UNROLL
+ for (int row = 0; row < 4; ++row) {
+ int row_count = 0;
+ TileValuesFragment values_ordered;
+ CUTLASS_PRAGMA_UNROLL
+ for (int col = 0; col < 4; ++col) {
+ TileValueOrdered& v = values_ordered[col];
+ v.parts.value = values.at(row, col).get();
+ v.parts.col = col;
+ }
+ // Use a sorting network (aka without branches) to avoid
+ // warp divergence
+ StaticSort<TileValuesFragment::kElements> sorter;
+ sorter(values_ordered);
+
+ // Take as many as we can, starting with the largest values
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = values_ordered.size() - 1; i >= 0; i--) {
+ auto& e = values_ordered[i];
+
+ uint32_t ccount = uint2b_t(numPerCol >> 2 * e.parts.col);
+ bool selected = ccount != 3 && (row_count < kMaxValuesPerRow[row]);
+ indices |= selected << (e.parts.col + 4 * row);
+ numPerCol |= (ccount + selected) << 2 * e.parts.col;
+ row_count += selected;
+ }
+ }
+ return indices;
+ }
+};
+
+template <typename T>
+void named_algorithms(T callback) {
+ callback(LargestValuesGreedy<IdentityOp>(), "largest_values_greedy");
+ callback(Causal1122<IdentityOp>(), "causal1122");
+ callback(LargestValuesGreedy<AbsOp>(), "largest_abs_values_greedy");
+ // default one
+ callback(LargestValuesGreedy<IdentityOp>(), "");
+}
+
+} // namespace
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu
new file mode 100644
index 0000000000..8195cec3d6
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu
@@ -0,0 +1,187 @@
+#include <ATen/ScalarOps.h>
+#include <ATen/Tensor.h>
+#include <ATen/Functions.h>
+#include <ATen/autocast_mode.h>
+#include <c10/cuda/CUDAGuard.h>
+#include <torch/library.h>
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+#include <ATen/native/sparse/cuda/ComputeSparseTile.h>
+#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h>
+#endif
+
+namespace at::native {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+struct Params {
+ uint64_t const* threads_masks;
+
+ uint16_t const* input;
+ int64_t input_stride;
+ int64_t input_dim0;
+ int64_t input_dim1;
+
+ uint16_t* output;
+ int64_t output_stride;
+
+ __host__ dim3 getBlocksGrid() const {
+ return dim3(
+ cutlass::ceil_div(input_dim0, kWarpX),
+ cutlass::ceil_div(input_dim1, kWarpY),
+ 1);
+ }
+
+ static CUTLASS_HOST_DEVICE dim3 getThreadsGrid() {
+ return dim3(kWarpX / kThreadX, kWarpY / kThreadY, 1);
+ }
+
+ CUTLASS_DEVICE Tile8x8Masks* getCurrentThreadIndices() const {
+ Tile8x8Masks* gmem_threads_masks = (Tile8x8Masks*)threads_masks;
+ gmem_threads_masks += blockIdx.y * getThreadsGrid().y + threadIdx.y;
+ int64_t strideX = gridDim.y * getThreadsGrid().y;
+ gmem_threads_masks +=
+ (blockIdx.x * getThreadsGrid().x + threadIdx.x) * strideX;
+ return gmem_threads_masks;
+ }
+};
+
+template <bool kInputRowMajor = true, bool kOutputRowMajor = true>
+__global__ void __launch_bounds__(32 /* num_threads */, 32) sparse_semi_structured_apply_dense_k(Params p) {
+ using Fragment = cutlass::Array<uint16_t, 8>;
+
+ // Top-left of the 8x8 tile we own
+ int warp_x = blockIdx.x * kWarpX;
+ int warp_y = blockIdx.y * kWarpY;
+ int x = warp_x + threadIdx.x * kThreadX;
+ int y = warp_y + threadIdx.y * kThreadY;
+
+ uint16_t* output = p.output + x * p.output_stride + y;
+ Tile8x8Masks indices = *p.getCurrentThreadIndices();
+
+ // Load dense
+ Fragment lines[8];
+ if (kInputRowMajor) {
+ uint16_t const* input = p.input + x * p.input_stride + y;
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 8; ++i) {
+ cutlass::arch::global_load<Fragment, sizeof(Fragment)>(
+ lines[i], input + i * p.input_stride, true);
+ }
+ } else {
+ uint16_t const* input = p.input + x + y * p.input_stride;
+ Fragment columns[8];
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 8; ++i) {
+ cutlass::arch::global_load<Fragment, sizeof(Fragment)>(
+ columns[i], input + i * p.input_stride, true);
+ }
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 8; ++i) {
+ CUTLASS_PRAGMA_UNROLL
+ for (int j = 0; j < 8; ++j) {
+ lines[i][j] = columns[j][i].get();
+ }
+ }
+ }
+
+ CUTLASS_PRAGMA_UNROLL
+ for (int row = 0; row < 2; ++row) {
+ Indices4x4 masks[2];
+ if (row == 0) {
+ masks[0] = indices.a;
+ masks[1] = indices.b;
+ } else {
+ masks[0] = indices.c;
+ masks[1] = indices.d;
+ }
+
+ // Apply mask
+ CUTLASS_PRAGMA_UNROLL
+ for (int m = 0; m < 2; ++m) {
+ CUTLASS_PRAGMA_UNROLL
+ for (int r = 0; r < 4; ++r) {
+ CUTLASS_PRAGMA_UNROLL
+ for (int c = 0; c < 4; ++c) {
+ lines[4 * row + r][4 * m + c] = lines[4 * row + r][4 * m + c] *
+ int((masks[m] >> (4 * r + c)) & 1);
+ }
+ }
+ }
+ }
+ static_assert(kOutputRowMajor, "Transpose here for ColMajor output");
+ // Save dense with zeros
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 8; ++i) {
+ cutlass::arch::global_store<Fragment, sizeof(Fragment)>(
+ lines[i], output + i * p.output_stride, true);
+ }
+}
+#endif
+
+Tensor _sparse_semi_structured_apply_dense(
+ const Tensor& input,
+ const Tensor& threads_masks) {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+ AT_ERROR("_sparse_semi_structured_apply_dense: not supported");
+ return Tensor{};
+#else
+ TORCH_CHECK(
+ input.scalar_type() == at::ScalarType::Half ||
+ input.scalar_type() == at::ScalarType::BFloat16,
+ "Unsupported `input` dtype");
+ TORCH_CHECK(
+ input.stride(0) == 1 || input.stride(1) == 1,
+ "`input` should be either RowMajor or ColMajor. Invalid memory layout - try .contiguous()?");
+
+ auto roundedx = cutlass::round_up(input.size(0), kWarpX);
+ auto roundedy = cutlass::round_up(input.size(1), kWarpY);
+
+ Params p;
+ p.input = (uint16_t const*)input.data_ptr();
+ p.input_dim0 = input.size(0);
+ p.input_dim1 = input.size(1);
+ p.threads_masks = (uint64_t const*)threads_masks.data_ptr();
+
+ TORCH_CHECK(threads_masks.dim() == 3);
+ TORCH_CHECK(threads_masks.size(0) == p.getBlocksGrid().x * p.getThreadsGrid().x);
+ TORCH_CHECK(threads_masks.size(1) == p.getBlocksGrid().y * p.getThreadsGrid().y);
+ TORCH_CHECK(threads_masks.stride(1) == sizeof(p.threads_masks[0]));
+ TORCH_CHECK(threads_masks.size(2) == sizeof(p.threads_masks[0]));
+ TORCH_CHECK(threads_masks.stride(2) == 1);
+ TORCH_CHECK(threads_masks.scalar_type() == at::ScalarType::Byte);
+
+ at::Tensor output = at::empty({p.input_dim0, p.input_dim1}, input.options());
+ TORCH_INTERNAL_ASSERT(output.stride(-1) == 1, "expected RowMajor?");
+ p.output = (uint16_t*)output.data_ptr();
+
+ bool inputRowMajor = input.stride(-1) == 1;
+ bool outputRowMajor = output.stride(-1) == 1;
+ p.input_stride = input.stride(inputRowMajor ? 0 : 1);
+ p.output_stride = output.stride(outputRowMajor ? 0 : 1);
+ at::cuda::CUDAGuard device_guard(input.device());
+
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+ size_t smem_bytes = 0;
+ if (inputRowMajor && outputRowMajor) {
+ sparse_semi_structured_apply_dense_k<true, true>
+ <<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
+ } else if (!inputRowMajor && outputRowMajor) {
+ sparse_semi_structured_apply_dense_k<false, true>
+ <<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
+ } else {
+ TORCH_CHECK(
+ false,
+ "Unsupported configuration: `input` is ",
+ inputRowMajor ? "RowMajor" : "ColMajor",
+ ", and `output` is ",
+ outputRowMajor ? "RowMajor" : "ColMajor");
+ }
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ return output;
+#endif
+}
+
+} // namespace
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h
new file mode 100644
index 0000000000..95cf466a76
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h
@@ -0,0 +1,520 @@
+#pragma once
+
+#include <ATen/native/sparse/cuda/StaticSort.h>
+#include <cutlass/arch/memory.h>
+#include <cutlass/array.h>
+#include <cutlass/bfloat16.h>
+#include <cutlass/fast_math.h>
+#include <cutlass/half.h>
+#include <cutlass/integer_subbyte.h>
+
+namespace at::native {
+
+using cutlass::uint1b_t;
+using cutlass::uint2b_t;
+using cutlass::uint4b_t;
+using uint8b_t = cutlass::integer_subbyte<8, false>;
+using ReorderedLayoutInputE = cutlass::layout::ColumnMajorInterleaved<2>;
+using ElementInputE = uint16_t;
+constexpr int kWarpX = 32;
+constexpr int kWarpY = 64;
+constexpr int kThreadX = 8;
+constexpr int kThreadY = 8;
+
+// bitmask of selected values, in col-major storage
+// eg: indices & (1 << (col + 4 * row))
+using Indices4x4 = uint16_t;
+
+struct Tile8x8Masks {
+ Indices4x4 a, b, c, d;
+ CUTLASS_DEVICE Tile8x8Masks() {
+ a = b = c = d = 0;
+ }
+};
+
+static_assert(sizeof(Tile8x8Masks) == 8, "should be exactly uint64_t");
+
+// Each thread has data for an 8x8 area of the input tensor
+// Due to the very specific format of the metadata, 32 consecutive bits
+// of the metadata tensor will live in 4 different threads.
+// This functions does the required warp shuffling to send data to the
+// right threads.
+// This took some time to write (and get right), hopefully these slides
+// can help
+// https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g249eb2e2f2e_0_28
+CUTLASS_DEVICE uint32_t
+warp_shuffle_meta(uint32_t meta_ab, bool transposed = false) {
+ // The required format is
+ // (one line = 32 bits)
+ // a[ 0, 0:16] a[ 8, 0:16] <- T0 [left]
+ // a[ 0, 16:32] a[ 8, 16:32]
+ // a[16, 0:16] a[24, 0:16]
+ // a[16, 16:32] a[24, 16:32]
+ // a[ 1, 0:16] a[ 9, 0:16] <- T4
+ // a[ 1, 16:32] a[ 9, 16:32]
+ // a[17, 0:16] a[25, 0:16]
+ // a[17, 16:32] a[25, 16:32]
+ // a[ 2, 0:16] a[10, 0:16] <- T1 [left, bottom]
+ // a[ 2, 16:32] a[10, 16:32]
+ // a[18, 0:16] a[26, 0:16]
+ // a[18, 16:32] a[26, 16:32]
+ // a[ 3, 0:16] a[11, 0:16] <- T5 [bottom]
+ // a[ 3, 16:32] a[11, 16:32]
+ // a[19, 0:16] a[27, 0:16]
+ // a[19, 16:32] a[27, 16:32]
+ // ...
+ // Use warp-shuffles to send data around threads
+ bool thread_left = (threadIdx.y % 2) == 0;
+ bool thread_bottom = threadIdx.x % 2;
+
+ if (transposed) {
+ thread_left = (threadIdx.x % 2) == 0;
+ thread_bottom = threadIdx.y % 2;
+ }
+
+ uint8b_t stage0_data[2] = {
+ uint8b_t(meta_ab >> (8 * thread_left)),
+ uint8b_t(meta_ab >> (8 * (thread_left + 2)))};
+ // shfl t0-t4 / t1-t5
+ stage0_data[0] =
+ __shfl_xor_sync(0xffffffff, stage0_data[0], transposed ? 1 : 4);
+ stage0_data[1] =
+ __shfl_xor_sync(0xffffffff, stage0_data[1], transposed ? 1 : 4);
+
+ uint16_t line0 = int(uint8b_t(meta_ab >> (8 * (1 - thread_left))))
+ << ((1 - thread_left) * 8);
+ line0 |= int(stage0_data[0]) << (thread_left * 8);
+ uint16_t line1 = int(uint8b_t(meta_ab >> (8 * (1 - thread_left + 2))))
+ << ((1 - thread_left) * 8);
+ line1 |= int(stage0_data[1]) << (thread_left * 8);
+
+ uint16_t stage1_data = thread_bottom ? line0 : line1;
+ stage1_data = __shfl_xor_sync(0xffffffff, stage1_data, transposed ? 4 : 1);
+
+ uint32_t final_metadata;
+ if (thread_bottom) {
+ final_metadata = uint32_t(stage1_data) | uint32_t(line1) << 16;
+ } else {
+ final_metadata = uint32_t(stage1_data) << 16 | uint32_t(line0);
+ }
+ return final_metadata;
+}
+
+CUTLASS_DEVICE void warp_shuffle_and_write_meta(
+ ElementInputE* metadata_quad,
+ uint32_t meta_ab,
+ bool transposed = false) {
+ bool thread_left = (threadIdx.y % 2) == 0;
+ bool thread_bottom = threadIdx.x % 2;
+
+ if (transposed) {
+ thread_left = (threadIdx.x % 2) == 0;
+ thread_bottom = threadIdx.y % 2;
+ }
+
+ uint32_t final_metadata = warp_shuffle_meta(meta_ab, transposed);
+
+ int index = (!thread_left + 2 * thread_bottom) * 4;
+ ((uint32_t*)metadata_quad)[index] = final_metadata;
+}
+
+template <typename Element_>
+struct KernelTypes {
+ using Element = Element_;
+ using Fragment =
+ cutlass::Array<Element, 8>; // always read from gmem in chunks of 128bits
+ using Fragment4 = cutlass::Array<Element, 4>;
+ using ValuesPacked = cutlass::Array<Element, 8>; // 4 first col, 4 second col
+
+ struct Params {
+ /// inputs
+ Element const* input;
+ int64_t input_s0;
+ int64_t input_dim0;
+ int64_t input_dim1;
+
+ /// outputs
+ Element* packed;
+ int64_t packed_stride;
+
+ Element* packed_trans;
+ int64_t packed_trans_stride;
+
+ uint64_t* threads_masks;
+
+ __host__ dim3 getBlocksGrid() const {
+ return dim3(
+ cutlass::ceil_div(input_dim0, kWarpX),
+ cutlass::ceil_div(input_dim1, kWarpY),
+ 1);
+ }
+
+ static CUTLASS_HOST_DEVICE dim3 getThreadsGrid() {
+ return dim3(kWarpX / kThreadX, kWarpY / kThreadY, 1);
+ }
+
+ CUTLASS_DEVICE Tile8x8Masks* getCurrentThreadIndices() const {
+ Tile8x8Masks* gmem_threads_masks = (Tile8x8Masks*)threads_masks;
+ gmem_threads_masks += blockIdx.y * getThreadsGrid().y + threadIdx.y;
+ int64_t strideX = gridDim.y * getThreadsGrid().y;
+ gmem_threads_masks +=
+ (blockIdx.x * getThreadsGrid().x + threadIdx.x) * strideX;
+ return gmem_threads_masks;
+ }
+ };
+
+ struct Tile4x4Accessor {
+ using Element = Element_;
+
+ Fragment (&_lines)[8];
+ int _start_row;
+ int _start_col;
+
+ CUTLASS_DEVICE Tile4x4Accessor(
+ Fragment (&lines)[8],
+ int start_row,
+ int start_col)
+ : _lines(lines), _start_row(start_row), _start_col(start_col) {}
+
+ CUTLASS_DEVICE typename Fragment::reference at(int r, int c) {
+ return _lines[r + _start_row][c + _start_col];
+ }
+ };
+
+ struct Tile4x4Packed {
+ Fragment4 values[2];
+ CUTLASS_DEVICE Tile4x4Packed() {
+ values[0].clear();
+ values[1].clear();
+ }
+ };
+
+ // Returns a packed 4x4 tile (eg 2x4 values) which correspond to the values
+ // that are in `indices`. Also fills the `meta` array in the right format
+ // for consumption in the TensorCores.
+ // Example:
+ // indices: 0011
+ // 1001
+ // 1001
+ // 0100 (<- note, only 1 value on the last line)
+ // packed: values[0][2] values[1][0] values[2][0] values[3][1]
+ // values[0][3] values[1][3] values[2][3] Element(0)
+ CUTLASS_DEVICE static Tile4x4Packed pack_4x4(
+ Indices4x4 indices,
+ Tile4x4Accessor tile,
+ uint32_t& meta,
+ int meta_pos,
+ bool transpose = false) {
+ Tile4x4Packed packed;
+ CUTLASS_PRAGMA_UNROLL
+ for (int row = 0; row < 4; ++row) {
+ uint2b_t col0_from, col1_from;
+ auto packValue = [&](uint2b_t col_to, uint2b_t col_from) {
+ auto value = transpose ? tile.at(col_from, row).get()
+ : tile.at(row, col_from).get();
+ packed.values[col_to][row] = value;
+ if (col_to == uint2b_t(0)) {
+ col0_from = col_from;
+ } else {
+ col1_from = col_from;
+ }
+ };
+ auto isSelected = [&](int col) {
+ if (transpose) {
+ return indices & (1 << (row + 4 * col));
+ }
+ return indices & (1 << (col + 4 * row));
+ };
+ // Process cols 0/1
+ // We know that col0 is always packed to position 0 if it's there
+ // and col1 is packed to pos 0 or 1 (depending if col0 is selected)
+ if (isSelected(1)) {
+ packValue(0, 1);
+ }
+ if (isSelected(0)) {
+ packValue(0, 0);
+ }
+ if (isSelected(0) && isSelected(1)) {
+ packValue(1, 1);
+ }
+ // Process cols 2/3
+ // same sort of heuristic
+ if (isSelected(2)) {
+ packValue(1, 2);
+ }
+ if (isSelected(3)) {
+ packValue(1, 3);
+ }
+ if (isSelected(2) && isSelected(3)) {
+ packValue(0, 2);
+ }
+ int add_mask = (col0_from | (col1_from << 2)) << (8 * row + meta_pos);
+ meta |= add_mask;
+ }
+ return packed;
+ }
+
+ struct Tile8x8Meta {
+ // meta_ab[row] |= (real_col << (8*row + 2*pos))
+ uint32_t meta_ab;
+ uint32_t meta_cd;
+
+ // meta_ac_trans[col] |= (real_row << (8*col + 2*pos))
+ uint32_t meta_ac_trans;
+ uint32_t meta_bd_trans;
+
+ CUTLASS_DEVICE Tile8x8Meta() {
+ meta_ab = meta_cd = meta_ac_trans = meta_bd_trans = 0;
+ }
+ };
+
+ CUTLASS_DEVICE static void writePacked(
+ Element* ptr,
+ Fragment4 packed0,
+ Fragment4 packed1) {
+ Fragment write;
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 4; ++i) {
+ write[i] = packed0[i].get();
+ write[i + 4] = packed1[i].get();
+ }
+ cutlass::arch::global_store<Fragment, sizeof(Fragment)>(write, ptr, true);
+ }
+
+ CUTLASS_DEVICE static void writePackedT(
+ Element* ptr,
+ int64_t stride,
+ Tile4x4Packed a,
+ Tile4x4Packed b) {
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 4; ++i) {
+ Fragment4 write;
+ write[0] = a.values[0][i].get();
+ write[1] = a.values[1][i].get();
+ write[2] = b.values[0][i].get();
+ write[3] = b.values[1][i].get();
+ cutlass::arch::global_store<Fragment4, sizeof(Fragment4)>(
+ write, ptr + i * stride, true);
+ }
+ }
+
+ template <typename Algorithm, typename MetadataStore>
+ CUTLASS_DEVICE static void sparse_semi_structured_tile_kernel(
+ Params p,
+ MetadataStore metadata_gmem,
+ Algorithm compute_tile_indices) {
+ // Each thread is responsible for an 8x8 tile, which contains 4 4x4 tiles:
+ // A, B, C and D, as displayed in the following schema:
+ // +---+---+
+ // | A | B |
+ // +---+---+
+ // | C | D |
+ // +---+---+
+ // Each warp (32 threads) will then be responsible for a 32x64 tile of the
+ // input.
+ // This configuration allows to read/write data in 128bits chunks. These
+ // memory accesses are coalesced at the warp-level into 128bytes. See also:
+ // https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g2494f30c7cf_0_0
+
+ // Top-left of the 8x8 tile we own
+ int warp_x = blockIdx.x * kWarpX;
+ int warp_y = blockIdx.y * kWarpY;
+ int x = warp_x + threadIdx.x * kThreadX;
+ int y = warp_y + threadIdx.y * kThreadY;
+
+ Element const* input = p.input + x * p.input_s0 + y;
+ Element* packed = p.packed + x * p.packed_stride + (y / 2);
+ Element* packed_trans =
+ p.packed_trans + (x / 2) + y * p.packed_trans_stride;
+
+ Fragment lines[8]; // Contains all values from the 8x8 tile
+
+ Tile8x8Meta metadata;
+ Tile8x8Masks indices;
+
+ // Load/process tiles `A` and `B`
+ Element fillValue = Algorithm::template outOfBoundsFillValue<Element>();
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 4; ++i) {
+ lines[i].fill(fillValue);
+ cutlass::arch::global_load<Fragment, sizeof(Fragment)>(
+ lines[i], input + i * p.input_s0, x + i < p.input_dim0);
+ }
+ indices.a = compute_tile_indices(Tile4x4Accessor(lines, 0, 0));
+ indices.b = compute_tile_indices(Tile4x4Accessor(lines, 0, 4));
+
+ // Compute packed tiles A & B
+ {
+ Tile4x4Packed packed_a = pack_4x4(
+ indices.a, Tile4x4Accessor(lines, 0, 0), metadata.meta_ab, 0);
+ Tile4x4Packed packed_b = pack_4x4(
+ indices.b, Tile4x4Accessor(lines, 0, 4), metadata.meta_ab, 4);
+ writePackedT(packed, p.packed_stride, packed_a, packed_b);
+ }
+
+ // Compute/store packed tiles A & B in transpose output
+ Tile4x4Packed packed_trans_a = pack_4x4(
+ indices.a,
+ Tile4x4Accessor(lines, 0, 0),
+ metadata.meta_ac_trans,
+ 0,
+ true);
+ Tile4x4Packed packed_trans_b = pack_4x4(
+ indices.b,
+ Tile4x4Accessor(lines, 0, 4),
+ metadata.meta_bd_trans,
+ 0,
+ true);
+ // (NOTE) Now we no longer need A & B (`lines[0:4]`)
+
+ // Load/process tiles `C` and `D`
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 4; i < 8; ++i) {
+ lines[i].fill(fillValue);
+ cutlass::arch::global_load<Fragment, sizeof(Fragment)>(
+ lines[i], input + i * p.input_s0, x + i < p.input_dim0);
+ }
+ indices.c = compute_tile_indices(Tile4x4Accessor(lines, 4, 0));
+ indices.d = compute_tile_indices(Tile4x4Accessor(lines, 4, 4));
+
+ // Compute packed tiles C & D
+ {
+ Tile4x4Packed packed_c = pack_4x4(
+ indices.c, Tile4x4Accessor(lines, 4, 0), metadata.meta_cd, 0);
+ Tile4x4Packed packed_d = pack_4x4(
+ indices.d, Tile4x4Accessor(lines, 4, 4), metadata.meta_cd, 4);
+ writePackedT(
+ packed + 4 * p.packed_stride, p.packed_stride, packed_c, packed_d);
+ }
+
+ // Compute/store packed tiles C & D in transpose output
+ Tile4x4Packed packed_trans_c = pack_4x4(
+ indices.c,
+ Tile4x4Accessor(lines, 4, 0),
+ metadata.meta_ac_trans,
+ 4,
+ true);
+ Tile4x4Packed packed_trans_d = pack_4x4(
+ indices.d,
+ Tile4x4Accessor(lines, 4, 4),
+ metadata.meta_bd_trans,
+ 4,
+ true);
+
+ // Dump the metadata in a nice format
+ *p.getCurrentThreadIndices() = indices;
+
+ // Store packed A, B, C & D for transposed matrix
+ writePackedT(
+ packed_trans, p.packed_trans_stride, packed_trans_a, packed_trans_c);
+ packed_trans += 4 * p.packed_trans_stride;
+ writePackedT(
+ packed_trans, p.packed_trans_stride, packed_trans_b, packed_trans_d);
+
+ // Writing meta non-transposed
+ {
+ ElementInputE* packed_meta_reordered = metadata_gmem.get_metaN(
+ warp_x, threadIdx.x * kThreadX, warp_y, threadIdx.y * kThreadY);
+ warp_shuffle_and_write_meta(packed_meta_reordered, metadata.meta_ab);
+ warp_shuffle_and_write_meta(packed_meta_reordered + 32, metadata.meta_cd);
+ }
+
+ // Writing meta transposed
+ {
+ ElementInputE* packed_trans_meta_reordered = metadata_gmem.get_metaT(
+ warp_x, threadIdx.x * kThreadX, warp_y, threadIdx.y * kThreadY);
+ warp_shuffle_and_write_meta(
+ packed_trans_meta_reordered, metadata.meta_ac_trans, true);
+ warp_shuffle_and_write_meta(
+ packed_trans_meta_reordered + 32, metadata.meta_bd_trans, true);
+ }
+ }
+
+ CUTLASS_DEVICE static void sparse_semi_structured_apply_kernel(Params p) {
+ // See `sparse24_sparsify_both_ways_kernel`
+ // It's basically the same, just that we skip
+ // the part where compute the indices we keep
+
+ // Top-left of the 8x8 tile we own
+ int warp_x = blockIdx.x * kWarpX;
+ int warp_y = blockIdx.y * kWarpY;
+ int x = warp_x + threadIdx.x * kThreadX;
+ int y = warp_y + threadIdx.y * kThreadY;
+
+ Element const* input = p.input + x * p.input_s0 + y;
+ Element* packed = p.packed + x * p.packed_stride + (y / 2);
+ Element* packed_trans =
+ p.packed_trans + (x / 2) + y * p.packed_trans_stride;
+
+ Fragment lines[8]; // Contains all values from the 8x8 tile
+
+ Tile8x8Meta metadata;
+ Tile8x8Masks indices = *p.getCurrentThreadIndices();
+
+ // Load/process tiles `A` and `B`
+ CUTLASS_PRAGMA_UNROLL
+ for (int i = 0; i < 8; ++i) {
+ // NB: Values outside bounds is undefined, but shouldn't
+ // be used anywhere
+ cutlass::arch::global_load<Fragment, sizeof(Fragment)>(
+ lines[i], input + i * p.input_s0, x + i < p.input_dim0);
+ }
+
+ // Compute packed tiles A & B
+ {
+ Tile4x4Packed packed_a = pack_4x4(
+ indices.a, Tile4x4Accessor(lines, 0, 0), metadata.meta_ab, 0);
+ Tile4x4Packed packed_b = pack_4x4(
+ indices.b, Tile4x4Accessor(lines, 0, 4), metadata.meta_ab, 4);
+ writePackedT(packed, p.packed_stride, packed_a, packed_b);
+ }
+
+ // Compute/store packed tiles A & B in transpose output
+ Tile4x4Packed packed_trans_a = pack_4x4(
+ indices.a,
+ Tile4x4Accessor(lines, 0, 0),
+ metadata.meta_ac_trans,
+ 0,
+ true);
+ Tile4x4Packed packed_trans_b = pack_4x4(
+ indices.b,
+ Tile4x4Accessor(lines, 0, 4),
+ metadata.meta_bd_trans,
+ 0,
+ true);
+ // (NOTE) Now we no longer need A & B (`lines[0:4]`)
+
+ // Compute packed tiles C & D
+ {
+ Tile4x4Packed packed_c = pack_4x4(
+ indices.c, Tile4x4Accessor(lines, 4, 0), metadata.meta_cd, 0);
+ Tile4x4Packed packed_d = pack_4x4(
+ indices.d, Tile4x4Accessor(lines, 4, 4), metadata.meta_cd, 4);
+ writePackedT(
+ packed + 4 * p.packed_stride, p.packed_stride, packed_c, packed_d);
+ }
+
+ // Compute/store packed tiles C & D in transpose output
+ Tile4x4Packed packed_trans_c = pack_4x4(
+ indices.c,
+ Tile4x4Accessor(lines, 4, 0),
+ metadata.meta_ac_trans,
+ 4,
+ true);
+ Tile4x4Packed packed_trans_d = pack_4x4(
+ indices.d,
+ Tile4x4Accessor(lines, 4, 4),
+ metadata.meta_bd_trans,
+ 4,
+ true);
+
+ // Store packed A, B, C & D for transposed matrix
+ writePackedT(
+ packed_trans, p.packed_trans_stride, packed_trans_a, packed_trans_c);
+ packed_trans += 4 * p.packed_trans_stride;
+ writePackedT(
+ packed_trans, p.packed_trans_stride, packed_trans_b, packed_trans_d);
+ }
+};
+
+} // namespace at::native
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu
new file mode 100644
index 0000000000..7598f1b59e
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu
@@ -0,0 +1,313 @@
+#include <ATen/ScalarOps.h>
+#include <ATen/Functions.h>
+#include <ATen/Tensor.h>
+#include <ATen/autocast_mode.h>
+#include <c10/cuda/CUDAGuard.h>
+#include <ATen/ATen.h>
+#include <ATen/core/Tensor.h>
+#include <ATen/cuda/CUDAUtils.h>
+#include <ATen/Dispatch.h>
+#include <torch/library.h>
+#include <torch/types.h>
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+#include <ATen/native/sparse/cuda/ComputeSparseTile.h>
+#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h>
+#include <cuda_runtime.h>
+#endif
+
+namespace at::native {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+struct MetadataCuSparseLt {
+ // Format used by cuSparseLt
+ // This is based on reverse-engineering, for a visual illustration:
+ // https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g29afe95bda8_0_0
+ static constexpr int kStrideBlock32x32 = (32 * 32) / (sizeof(ElementInputE) * 8);
+
+ ElementInputE* _meta;
+ ElementInputE* _meta_trans;
+ int64_t _rows;
+ int64_t _cols;
+
+ static int64_t getMetadataSize(int rows, int cols)
+ {
+ TORCH_CHECK(rows % 128 == 0 && cols % 128 == 0, "Only supports rows/cols multiples of 128");
+ // 1 bit per dense value
+ return (rows * cols) / (8 * sizeof(ElementInputE));
+ }
+
+ // < return value of the function, packed, packed_meta >
+ static std::tuple<Tensor, Tensor, Tensor> create_compressed_representation(int rows, int cols, at::Tensor const& like)
+ {
+ TORCH_CHECK(
+ like.scalar_type() == at::ScalarType::Half ||
+ like.scalar_type() == at::ScalarType::BFloat16);
+ constexpr int kBytesPerScalar = 2;
+ int64_t data_scalars = rows * cutlass::ceil_div(cols, 2);
+ int64_t meta_scalars = getMetadataSize(rows, cols);
+
+ at::Tensor storage = at::empty(
+ {(data_scalars + meta_scalars)},
+ at::TensorOptions().device(like.device()).dtype(like.dtype()));
+
+ using namespace torch::indexing;
+ at::Tensor packed = storage.index({Slice(None, data_scalars)})
+ .view({rows, cutlass::ceil_div(cols, 2)});
+ at::Tensor metadata = storage.index({Slice(data_scalars, None)});
+ // TODO: Cast metadata to Short
+ static_assert(kBytesPerScalar == 2, "or modify the last dim below");
+ metadata = metadata.view({rows / 128, cols / 32, 256});
+ return std::make_tuple(storage, packed, metadata);
+ }
+
+ MetadataCuSparseLt(at::Tensor metaN, at::Tensor metaT, int rows, int cols) {
+ _meta = (ElementInputE*)metaN.data_ptr();
+ _meta_trans = (ElementInputE*)metaT.data_ptr();
+ _rows = rows;
+ _cols = cols;
+ }
+ CUTLASS_HOST_DEVICE
+ static int64_t _get_meta_offset(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col,
+ int totalRows) {
+ int64_t offset = 0;
+ // warp-level: Find the 128x64 tile
+ offset += (warp_row / 128) * (kStrideBlock32x32 * 8);
+ offset += (warp_col / 64) * (kStrideBlock32x32 * 8) * (totalRows / 128);
+ // Find the 32x32 tile inside
+ offset += (((warp_row + thread_row) % 128) / 32) * kStrideBlock32x32;
+ offset += (((warp_col + thread_col) % 64) / 32) * (kStrideBlock32x32 * 4);
+ // Inside the 32x32 tile
+ offset += (warp_row % 32) * 2;
+ // Top/bottom 16x16 tile
+ offset += ((thread_row % 32) / 16) * 4;
+ // Left/right 16x16 tile
+ offset += ((thread_col % 32) / 16) * 2;
+ return offset;
+ }
+ CUTLASS_HOST_DEVICE
+ ElementInputE* get_metaN(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col) const {
+ return _meta +
+ _get_meta_offset(warp_row, thread_row, warp_col, thread_col, _rows);
+ }
+ CUTLASS_HOST_DEVICE
+ ElementInputE* get_metaT(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col) const {
+ return _meta_trans +
+ _get_meta_offset(warp_col, thread_col, warp_row, thread_row, _cols);
+ }
+};
+
+struct MetadataCutlass {
+ // Layout needed to run 2:4 gemms in CUTLASS
+ // There is basically a hardware specific value for every
+ // 32x32 dense tile (1024 bits). Then these tiles are
+ // stored in a Column-Major fashion
+ ElementInputE* _meta;
+ ElementInputE* _meta_trans;
+ int64_t _meta_reordered_sy;
+ int64_t _meta_trans_reordered_sx;
+
+ static std::tuple<
+ at::Tensor, // return value of the function
+ at::Tensor, // packed
+ at::Tensor // packed_meta
+ >
+ create_compressed_representation(int rows, int cols, at::Tensor const& like) {
+ TORCH_CHECK(
+ like.scalar_type() == at::ScalarType::Half ||
+ like.scalar_type() == at::ScalarType::BFloat16);
+ auto roundedx = cutlass::round_up(rows, kWarpX);
+ auto roundedy = cutlass::round_up(cols, kWarpY);
+
+ // NB: Writing to `packed` tensors in transposed manner
+ at::Tensor packed =
+ at::empty({roundedx, cutlass::ceil_div(roundedy, 2)}, like.options());
+ at::Tensor packed_meta = at::empty(
+ {roundedx * roundedy / 16},
+ like.options().dtype(at::ScalarType::Short))
+ .view({roundedy / 32, roundedx, 2})
+ .permute({1, 2, 0});
+ return std::make_tuple(packed, packed, packed_meta);
+ }
+ MetadataCutlass(at::Tensor metaN, at::Tensor metaT, int rows, int cols) {
+ _meta = (ElementInputE*)metaN.data_ptr();
+ _meta_reordered_sy = metaN.stride(2);
+ _meta_trans = (ElementInputE*)metaT.data_ptr();
+ _meta_trans_reordered_sx = metaT.stride(2);
+ }
+ CUTLASS_HOST_DEVICE
+ int64_t _get_meta_offset(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col,
+ int64_t stride) const {
+ int64_t offset = 0;
+ offset += warp_row * 2 + (warp_col / 32) * stride;
+ // A single warp is 32x64. The right 32x32 tile is at a different position
+ offset += 64 * (thread_row / 32);
+ offset += (thread_col / 32) * stride;
+ // Top/bottom 16x16 tile
+ offset += ((thread_row % 32) / 16) * 4;
+ // Left/right 16x16 tile
+ offset += ((thread_col % 32) / 16) * 2;
+ return offset;
+ }
+ CUTLASS_HOST_DEVICE
+ ElementInputE* get_metaN(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col) const {
+ return _meta +
+ _get_meta_offset(
+ warp_row, thread_row, warp_col, thread_col, _meta_reordered_sy);
+ }
+ CUTLASS_HOST_DEVICE
+ ElementInputE* get_metaT(
+ int warp_row,
+ int thread_row,
+ int warp_col,
+ int thread_col) const {
+ return _meta_trans +
+ _get_meta_offset(
+ warp_col,
+ thread_col,
+ warp_row,
+ thread_row,
+ _meta_trans_reordered_sx);
+ }
+};
+
+template <typename KT, typename Metadata, typename Algorithm>
+__global__ void __launch_bounds__(32 /* num_threads */, 20)
+ sparse_semi_structured_tile_kernel(
+ typename KT::Params p,
+ Metadata metadata,
+ Algorithm algo) {
+ KT::sparse_semi_structured_tile_kernel(p, metadata, algo);
+}
+
+template <typename Element, typename MetadataFormat>
+std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> sparse_semi_structured_tile_typed(
+ const at::Tensor input,
+ std::string algorithm)
+{
+ using KT = KernelTypes<Element>;
+ c10::optional<at::cuda::CUDAGuard> device_guard;
+ if (!input.is_meta()) {
+ device_guard.emplace(input.device());
+ }
+
+ TORCH_CHECK(input.dim() == 2, "Can only sparsify 2d tensors");
+ TORCH_CHECK(
+ input.stride(1) == 1,
+ "Can only sparsify contiguous tensors. Sparsify the transpose otherwise.");
+
+ auto rows = input.size(0);
+ auto cols = input.size(1);
+
+ auto [compressed, packed, packed_meta_reordered] =
+ MetadataFormat::create_compressed_representation(rows, cols, input);
+ auto [compressed_trans, packed_trans, packed_trans_meta_reordered] =
+ MetadataFormat::create_compressed_representation(cols, rows, input);
+ TORCH_CHECK(
+ input.size(1) % 32 == 0, "Number of cols should be multiple of 32");
+
+ typename KT::Params p;
+ p.input = (Element const*)input.data_ptr();
+ p.input_s0 = input.stride(0);
+ p.input_dim0 = input.size(0);
+ p.input_dim1 = input.size(1);
+
+ p.packed = (Element*)packed.data_ptr();
+ p.packed_stride = packed.stride(0);
+ p.packed_trans = (Element*)packed_trans.data_ptr();
+ p.packed_trans_stride = packed_trans.stride(0);
+
+ MetadataFormat metadata = MetadataFormat(
+ packed_meta_reordered, packed_trans_meta_reordered, rows, cols);
+ at::Tensor threads_masks = at::empty(
+ {p.getBlocksGrid().x * p.getThreadsGrid().x,
+ p.getBlocksGrid().y * p.getThreadsGrid().y,
+ sizeof(p.threads_masks[0])},
+ input.options().dtype(at::ScalarType::Byte));
+ p.threads_masks = (uint64_t*)threads_masks.data_ptr();
+
+ bool kernel_launched = false;
+ auto launchKernel = [&](auto algo, std::string const& algo_name) {
+ if (algo_name == algorithm) {
+ kernel_launched = true;
+ if (input.is_meta()) {
+ return;
+ }
+ size_t smem_bytes = 0;
+ sparse_semi_structured_tile_kernel<KT>
+ <<<p.getBlocksGrid(),
+ p.getThreadsGrid(),
+ smem_bytes,
+ at::cuda::getCurrentCUDAStream()>>>(p, metadata, algo);
+ }
+ };
+ named_algorithms(launchKernel);
+ TORCH_CHECK(kernel_launched, "Unknown algorithm \"", algorithm, "\"");
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ return std::make_tuple(
+ compressed,
+ packed_meta_reordered,
+ compressed_trans,
+ packed_trans_meta_reordered,
+ threads_masks);
+}
+#endif
+
+// <packed, packed_meta_reordered, packed_trans, packed_trans_meta_reorderd, threads_masks>
+std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _sparse_semi_structured_tile(
+ const Tensor& input,
+ c10::string_view algorithm,
+ bool use_cutlass)
+{
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+ AT_ERROR("_sparse_semi_structured_tile: not supported");
+ return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{}, Tensor{});
+#else
+ std::string algo(algorithm.data(), algorithm.size());
+
+ auto runTyped = [&](auto type)
+ {
+ using ElementT = decltype(type);
+ if (use_cutlass) {
+ return sparse_semi_structured_tile_typed<ElementT, MetadataCutlass>(input, algo);
+ }
+ else {
+ return sparse_semi_structured_tile_typed<ElementT, MetadataCuSparseLt>(input, algo);
+ }
+ };
+
+ if (input.scalar_type() == at::ScalarType::Half)
+ {
+ return runTyped(cutlass::half_t());
+ } else {
+ TORCH_CHECK(
+ input.scalar_type() == at::ScalarType::Half ||
+ input.scalar_type() == at::ScalarType::BFloat16, input.scalar_type());
+ return runTyped(cutlass::bfloat16_t());
+ }
+#endif
+}
+
+} // namespace at::native
diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu
new file mode 100644
index 0000000000..d2d280f84c
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu
@@ -0,0 +1,108 @@
+#include <ATen/ScalarOps.h>
+#include <ATen/Tensor.h>
+#include <ATen/Functions.h>
+#include <ATen/Utils.h>
+#include <c10/cuda/CUDAGuard.h>
+#include <c10/util/accumulate.h>
+#include <torch/library.h>
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h>
+#endif
+
+namespace at::native {
+
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+#else
+template <typename KT>
+__global__ void __launch_bounds__(32 /* num_threads */)
+ sparse_semi_structured_apply_kernel(typename KT::Params p)
+{
+ KT::sparse_semi_structured_apply_kernel(p);
+}
+
+// Apply a 2:4 sparsify pattern computed with
+// `_sparse_semi_structured_tile` to another Tensor
+template <bool kIsMeta, typename Element>
+std::tuple<Tensor, Tensor> _sparse_semi_structured_apply_typed(Tensor input, Tensor threads_masks)
+{
+ using KT = KernelTypes<Element>;
+ // TODO: Technically we should be able to deal with that
+ // by running on the transpose of `input` and swapping
+ // `packed` & `packed_t`.
+ // This would require to adapt the `threads_masks` a bit tho.
+ if (input.stride(1) != 1) {
+ input = input.contiguous();
+ }
+ c10::optional<at::cuda::CUDAGuard> device_guard;
+ if (!kIsMeta) {
+ device_guard.emplace(input.device());
+ }
+
+ TORCH_CHECK(input.dim() == 2);
+ TORCH_CHECK(input.stride(1) == 1);
+ TORCH_CHECK(input.stride(0) % 8 == 0);
+ TORCH_CHECK(input.size(1) % 32 == 0, "Wrong alignment shape[1]");
+
+ auto roundedx = cutlass::round_up(input.size(0), kWarpX);
+ auto roundedy = cutlass::round_up(input.size(1), kWarpY);
+ at::Tensor packed =
+ at::empty({roundedx, cutlass::ceil_div(roundedy, 2)}, input.options());
+ at::Tensor packed_trans =
+ at::empty({roundedy, cutlass::ceil_div(roundedx, 2)}, input.options());
+
+ typename KT::Params p;
+ p.input = (Element const*)input.data_ptr();
+ p.input_s0 = input.stride(0);
+ p.input_dim0 = input.size(0);
+ p.input_dim1 = input.size(1);
+
+ p.packed = (Element*)packed.data_ptr();
+ p.packed_stride = packed.stride(0);
+ p.packed_trans = (Element*)packed_trans.data_ptr();
+ p.packed_trans_stride = packed_trans.stride(0);
+
+ p.threads_masks = (uint64_t*)threads_masks.data_ptr();
+
+ TORCH_CHECK(threads_masks.dim() == 3);
+ TORCH_CHECK(
+ threads_masks.size(0) == p.getBlocksGrid().x * p.getThreadsGrid().x);
+ TORCH_CHECK(
+ threads_masks.size(1) == p.getBlocksGrid().y * p.getThreadsGrid().y);
+ TORCH_CHECK(threads_masks.stride(1) == sizeof(p.threads_masks[0]));
+ TORCH_CHECK(threads_masks.size(2) == sizeof(p.threads_masks[0]));
+ TORCH_CHECK(threads_masks.stride(2) == 1);
+ TORCH_CHECK(threads_masks.scalar_type() == at::ScalarType::Byte);
+
+ if (!kIsMeta) {
+ size_t smem_bytes = 0;
+ sparse_semi_structured_apply_kernel<KT>
+ <<<p.getBlocksGrid(),
+ p.getThreadsGrid(),
+ smem_bytes,
+ at::cuda::getCurrentCUDAStream()>>>(p);
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
+ }
+ return std::make_tuple(packed, packed_trans);
+}
+#endif
+
+std::tuple<Tensor, Tensor> _sparse_semi_structured_apply(const Tensor& input, const Tensor& threads_masks) // Returned by `_sparse_semi_structured_tile`
+{
+#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080)
+ AT_ERROR("_sparse_semi_structured_apply: not supported");
+ return std::make_tuple(Tensor{}, Tensor{});
+#else
+ TORCH_CHECK(
+ input.scalar_type() == at::ScalarType::Half || input.scalar_type() == at::ScalarType::BFloat16,
+ "Unsupported dtype - only `float16` and `bfloat16` are supported currently"
+ );
+ auto result = (input.scalar_type() == at::ScalarType::Half)
+ ? _sparse_semi_structured_apply_typed<false, cutlass::half_t>(input, threads_masks)
+ : _sparse_semi_structured_apply_typed<false, cutlass::bfloat16_t>(input, threads_masks);
+ return result;
+#endif
+}
+
+} // namespace
diff --git a/aten/src/ATen/native/sparse/cuda/StaticSort.h b/aten/src/ATen/native/sparse/cuda/StaticSort.h
new file mode 100644
index 0000000000..e2fc5675f6
--- /dev/null
+++ b/aten/src/ATen/native/sparse/cuda/StaticSort.h
@@ -0,0 +1,100 @@
+#pragma once
+#include <cutlass/cutlass.h>
+
+/**
+ * A Functor class to create a sort for fixed sized arrays/containers with a
+ * compile time generated Bose-Nelson sorting network.
+ * \tparam NumElements The number of elements in the array or container to
+ * sort. \tparam T The element type. \tparam Compare A
+ * comparator functor class that returns true if lhs < rhs.
+ */
+template <unsigned NumElements>
+class StaticSort {
+ template <class A>
+ struct Swap {
+ template <class T>
+ CUTLASS_HOST_DEVICE void s(T& v0, T& v1) {
+ // Explicitly code out the Min and Max to nudge the compiler
+ // to generate branchless code.
+ T t = v0 < v1 ? v0 : v1; // Min
+ v1 = v0 < v1 ? v1 : v0; // Max
+ v0 = t;
+ }
+
+ CUTLASS_HOST_DEVICE Swap(A& a, const int& i0, const int& i1) {
+ s(a[i0], a[i1]);
+ }
+ };
+
+ template <class A, int I, int J, int X, int Y>
+ struct PB {
+ CUTLASS_HOST_DEVICE PB(A& a) {
+ enum {
+ L = X >> 1,
+ M = (X & 1 ? Y : Y + 1) >> 1,
+ IAddL = I + L,
+ XSubL = X - L
+ };
+ PB<A, I, J, L, M> p0(a);
+ PB<A, IAddL, J + M, XSubL, Y - M> p1(a);
+ PB<A, IAddL, J, XSubL, M> p2(a);
+ }
+ };
+
+ template <class A, int I, int J>
+ struct PB<A, I, J, 1, 1> {
+ CUTLASS_HOST_DEVICE PB(A& a) {
+ Swap<A> s(a, I - 1, J - 1);
+ }
+ };
+
+ template <class A, int I, int J>
+ struct PB<A, I, J, 1, 2> {
+ CUTLASS_HOST_DEVICE PB(A& a) {
+ Swap<A> s0(a, I - 1, J);
+ Swap<A> s1(a, I - 1, J - 1);
+ }
+ };
+
+ template <class A, int I, int J>
+ struct PB<A, I, J, 2, 1> {
+ CUTLASS_HOST_DEVICE PB(A& a) {
+ Swap<A> s0(a, I - 1, J - 1);
+ Swap<A> s1(a, I, J - 1);
+ }
+ };
+
+ template <class A, int I, int M, bool Stop = false>
+ struct PS {
+ CUTLASS_HOST_DEVICE PS(A& a) {
+ enum { L = M >> 1, IAddL = I + L, MSubL = M - L };
+ PS<A, I, L, (L <= 1)> ps0(a);
+ PS<A, IAddL, MSubL, (MSubL <= 1)> ps1(a);
+ PB<A, I, IAddL, L, MSubL> pb(a);
+ }
+ };
+
+ template <class A, int I, int M>
+ struct PS<A, I, M, true> {
+ CUTLASS_HOST_DEVICE PS(A& a) {}
+ };
+
+ public:
+ /**
+ * Sorts the array/container arr.
+ * \param arr The array/container to be sorted.
+ */
+ template <class Container>
+ CUTLASS_HOST_DEVICE void operator()(Container& arr) const {
+ PS<Container, 1, NumElements, (NumElements <= 1)> ps(arr);
+ };
+
+ /**
+ * Sorts the array arr.
+ * \param arr The array to be sorted.
+ */
+ template <class T>
+ CUTLASS_HOST_DEVICE void operator()(T* arr) const {
+ PS<T*, 1, NumElements, (NumElements <= 1)> ps(arr);
+ };
+};
diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect
index 79a3455713..8fbdc431f4 100644
--- a/test/expect/HasDecompTest.test_has_decomposition.expect
+++ b/test/expect/HasDecompTest.test_has_decomposition.expect
@@ -524,8 +524,11 @@ aten::_sparse_mask_projection.out
aten::_sparse_mm_reduce_impl
aten::_sparse_mm_reduce_impl_backward
aten::_sparse_semi_structured_addmm
+aten::_sparse_semi_structured_apply
+aten::_sparse_semi_structured_apply_dense
aten::_sparse_semi_structured_linear
aten::_sparse_semi_structured_mm
+aten::_sparse_semi_structured_tile
aten::_sparse_softmax
aten::_sparse_softmax.out
aten::_sparse_softmax_backward_data
diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py
index a09e2647eb..12967820d3 100644
--- a/test/test_sparse_semi_structured.py
+++ b/test/test_sparse_semi_structured.py
@@ -5,6 +5,7 @@ import unittest
import torch
from torch import nn
+import torch.nn.functional as F
from torch.sparse import (
SparseSemiStructuredTensor,
@@ -13,6 +14,12 @@ from torch.sparse import (
to_sparse_semi_structured,
)
+from torch.sparse._semi_structured_conversions import (
+ sparse_semi_structured_from_dense_cutlass,
+ _sparse_semi_structured_tile,
+ _compute_compressed_swizzled_bitmask,
+)
+
from torch.testing import make_tensor
from torch.testing._internal.common_device_type import (
@@ -32,28 +39,48 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
)
-from torch.utils._triton import has_triton
+import pytest
-CUSPARSELT_NUM_ALG_IDS = 4
-CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32]
+from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_DTYPES = [torch.float16, torch.bfloat16, torch.float32, torch.int8]
-SEMI_STRUCTURED_SUPPORTED_BACKENDS = []
+SEMI_STRUCTURED_SUPPORTED_BACKENDS = {}
_IS_SM8X = False
+
if torch.cuda.is_available():
_IS_SM8X = torch.cuda.get_device_capability(0)[0] == 8
- SEMI_STRUCTURED_SUPPORTED_BACKENDS.append("cutlass")
+ SEMI_STRUCTURED_SUPPORTED_BACKENDS["cutlass"] = SparseSemiStructuredTensorCUTLASS
# check if cslt is available for now using this:
# TODO when we add cusparselt as a backend, we can update this to be use torch.cusparselt.is_available()
try:
torch._cslt_compress(torch.ones(128, 256).cuda())
- SEMI_STRUCTURED_SUPPORTED_BACKENDS.append("cusparselt")
+ SEMI_STRUCTURED_SUPPORTED_BACKENDS["cusparselt"] = SparseSemiStructuredTensorCUSPARSELT
except Exception:
pass
+inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.float32, torch.int8)
+training_dtypes = dtypes(torch.float16, torch.bfloat16)
+parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+
+atol_rtol_kw = {
+ torch.float16: {
+ "rtol": 1e-3,
+ "atol": 1e-3,
+ },
+ torch.bfloat16: {
+ "rtol": 1e-1,
+ "atol": 1e-1,
+ },
+}
+def sparse24_largest_mask_2d(original):
+ sparse = SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort(original)
+ return sparse.to_dense().bool()
+
+def sparsify24_dense(original):
+ return sparse24_largest_mask_2d(original) * original
def rand_sparse_semi_structured_mask(
r, c, dtype=torch.float16, device="cuda", choice=None
@@ -97,6 +124,7 @@ def rand_sparse_semi_structured(r, c, dtype, device, choice=None):
dense = dense.masked_fill(~mask, 0)
return dense
+
def rand_sparse_semi_structured_all_patterns(r, c, dtype, device):
pattern = '2by4' if dtype != torch.float32 else '1by2'
if pattern == '1by2':
@@ -171,8 +199,6 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
x = x.contiguous()
return torch.nn.functional.relu(x)
- SparseSemiStructuredTensor._FORCE_CUTLASS = backend == "cutlass"
-
input = torch.rand(dense_input_shape, device="cuda").half()
model = Model().eval().cuda().half()
mod_linear = model.linear
@@ -182,7 +208,7 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
mod_linear.weight = nn.Parameter(mod_linear.weight * mask)
dense_result = model(input)
- mod_linear.weight = nn.Parameter(to_sparse_semi_structured(mod_linear.weight))
+ mod_linear.weight = nn.Parameter(SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].from_dense(mod_linear.weight))
sparse_result = model(input)
model = torch.compile(model, backend="inductor", fullgraph=True)
@@ -213,20 +239,32 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase):
SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cutlass", dense_input_shape)
+ def test_sp24_compile(self) -> None:
+ x = torch.randn([1024, 512], device="cuda", dtype=torch.float16, requires_grad=True)
+ e = torch.eye(x.shape[0], x.shape[0], device="cuda", dtype=torch.float16)
+
+ def fn(x, e):
+ y = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(x)
+ y = y.t()
+ return x @ y
+
+ # Eager
+ output = fn(x, e)
+ output.backward(output)
+ # Torch compile
+ output = torch.compile(fn)(x, e)
+ output.backward(output)
+
class TestSparseSemiStructured(TestCase):
def setUp(self):
if not _IS_SM8X:
self.skipTest('Only runs on SM80')
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @inference_dtypes
+ @parametrize_backends
def test_to_sparse_semi_structured(self, dtype, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
-
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
-
A = rand_sparse_semi_structured_mask(128, 256, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
@@ -237,18 +275,14 @@ class TestSparseSemiStructured(TestCase):
assert isinstance(A, torch.Tensor)
assert isinstance(A_sparse, SparseSemiStructuredTensor)
-
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
+ @parametrize_backends
@parametrize("dense_input_shape", [(128, 1), (128, 64), (128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
def test_mm_sparse_first_NN(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A_sparse, B) is correct for float16 and will throw error for int8
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
-
A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
@@ -256,7 +290,6 @@ class TestSparseSemiStructured(TestCase):
# Currently we don't support int matmul on GPU, so evaluate on CPU and copy over
if dtype is torch.int8:
- # This should fail
if backend == "cutlass":
with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"):
sparse_result = torch.mm(A_sparse, B)
@@ -269,18 +302,15 @@ class TestSparseSemiStructured(TestCase):
sparse_result = torch.mm(A_sparse, B)
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
+ @parametrize_backends
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
def test_mm_sparse_first_NT(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A_sparse, B.t()) is correct for float16/bfloat16
and will throw an error for int8 + padding
"""
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
-
A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype)
A_sparse = to_sparse_semi_structured(A)
@@ -308,9 +338,9 @@ class TestSparseSemiStructured(TestCase):
sparse_result = torch.mm(A_sparse, B.t())
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_mm_sparse_first_TN(self, dtype, dense_input_shape, device, backend):
"""
Ensure torch.mm(A_sparse.t(), B) throws error
@@ -329,9 +359,9 @@ class TestSparseSemiStructured(TestCase):
):
torch.mm(A_sparse.t(), B)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_mm_sparse_second_NT(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A, B_sparse.t()) is correct
@@ -354,9 +384,9 @@ class TestSparseSemiStructured(TestCase):
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_mm_sparse_second_NN(self, dense_input_shape, dtype, device, backend):
"""
Ensure torch.mm(A, B_sparse) throws error
@@ -377,7 +407,7 @@ class TestSparseSemiStructured(TestCase):
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)])
@parametrize("inference_mode", [subtest(True), subtest(False)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_linear(self, dense_input_shape, inference_mode, device, backend):
"""
Test nn.Linear has the same numerics
@@ -405,11 +435,9 @@ class TestSparseSemiStructured(TestCase):
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
@parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)])
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_mlp(self, device, dense_input_shape, backend):
- SparseSemiStructuredTensor._FORCE_CUTLASS = backend == "cutlass"
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
+ SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
input = torch.rand(dense_input_shape, device=device).half()
model = (
nn.Sequential(
@@ -437,7 +465,7 @@ class TestSparseSemiStructured(TestCase):
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_values(self, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
@@ -447,7 +475,7 @@ class TestSparseSemiStructured(TestCase):
assert A_sparse.values().shape == (128, 64)
assert (A_sparse.values() == 1).all()
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_indices(self, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
@@ -456,16 +484,11 @@ class TestSparseSemiStructured(TestCase):
A_sparse = to_sparse_semi_structured(A)
assert A_sparse.indices().shape == (128, 8)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @inference_dtypes
+ @parametrize_backends
def test_min_sparse_shape(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
- if backend == "cutlass":
- config = SparseSemiStructuredTensorCUTLASS._DTYPE_SHAPE_CONSTRAINTS[dtype]
- elif backend == "cusparselt":
- config = SparseSemiStructuredTensorCUSPARSELT._DTYPE_SHAPE_CONSTRAINTS[dtype]
+ config = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend]._DTYPE_SHAPE_CONSTRAINTS[dtype]
A = rand_sparse_semi_structured_mask(config.sparse_min_rows, config.sparse_min_cols, dtype=dtype, device=device)
A_sparse = to_sparse_semi_structured(A)
B = torch.rand((config.sparse_min_cols, config.dense_min_cols), device=device).to(dtype)
@@ -479,8 +502,8 @@ class TestSparseSemiStructured(TestCase):
sparse_res = torch.mm(A_sparse, B)
assert torch.allclose(sparse_res, dense_res, rtol=1e-3, atol=1e-3)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @inference_dtypes
+ @parametrize_backends
def test_unsupported_shape(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
@@ -490,7 +513,7 @@ class TestSparseSemiStructured(TestCase):
A_sparse = to_sparse_semi_structured(A)
@dtypes(*all_types_and_complex())
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_unsupported_dtype(self, dtype, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
@@ -503,7 +526,7 @@ class TestSparseSemiStructured(TestCase):
else:
A_sparse = to_sparse_semi_structured(A)
- @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS)
+ @parametrize_backends
def test_unsupported_dim(self, device, backend):
SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
if backend == "cutlass" and IS_WINDOWS:
@@ -513,13 +536,323 @@ class TestSparseSemiStructured(TestCase):
with self.assertRaisesRegex(RuntimeError, "Error original_tensor.dim"):
A_sparse = to_sparse_semi_structured(A)
- @unittest.skipIf(TEST_WITH_ROCM or IS_WINDOWS, "ROCm and Windows doesn't support CUTLASS")
- @parametrize("backend", ["cutlass"])
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- def test_linear_cutlass(self, device, dtype, backend):
- SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
+
+def create_random_mask(shape) -> torch.Tensor:
+ r = random.Random(0)
+ mask = torch.zeros(shape, dtype=torch.bool)
+ for line in range(mask.shape[0]):
+ for col in range(0, mask.shape[1], 4):
+ sparsity = r.choice(
+ [
+ [False, False, True, True],
+ [False, True, False, True],
+ [True, False, False, True],
+ [False, True, True, False],
+ [True, False, True, False],
+ [True, True, False, False],
+ ]
+ )
+ mask[line, col : col + 4] = torch.tensor(sparsity, dtype=torch.bool)
+ return mask
+
+class TestSparseSemiStructuredTraining(TestCase):
+
+ def setUp(self):
+ if not _IS_SM8X:
+ self.skipTest('Only runs on SM80')
+
+
+ @training_dtypes
+ def test_prune_dense_static_sort(self, dtype) -> None:
+ # Ideally we would like to clone and compare, but that won't work because the sorting order will be different
+ # instead we pass the pruned matrix to the CUDA implementation and preserve the sparsity pattern.
+ dense = torch.randn(128, 128, device="cuda", dtype=dtype)
+ pruned = _sparse_semi_structured_tile(dense)
+
+ # CUTLASS
+ reference_cutlass = SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort(pruned, algorithm="largest_abs_values_greedy")
+ assert torch.allclose(pruned, reference_cutlass.to_dense())
+
+ packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned)
+ packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous())
+ meta_cutlass = meta_cutlass.as_strided(reference_cutlass.meta.shape, reference_cutlass.meta.stride())
+ meta_t_cutlass = meta_t_cutlass.as_strided(reference_cutlass.meta_t.shape, reference_cutlass.meta_t.stride())
+ compressed_swizzled_bitmask = _compute_compressed_swizzled_bitmask(pruned)
+ compressed_swizzled_bitmask = compressed_swizzled_bitmask.as_strided(reference_cutlass.compressed_swizzled_bitmask.shape,
+ reference_cutlass.compressed_swizzled_bitmask.stride())
+ cutlass = SparseSemiStructuredTensorCUTLASS(dense.shape,
+ packed_cutlass,
+ meta_cutlass,
+ packed_t_cutlass,
+ meta_t_cutlass,
+ compressed_swizzled_bitmask)
+ assert torch.allclose(reference_cutlass.to_dense(), cutlass.to_dense())
+
+ # CUSPARSELT
+ reference_cusparselt = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(pruned,
+ algorithm="largest_abs_values_greedy")
+ assert torch.allclose(pruned, reference_cusparselt.to_dense())
+
+ packed_cusparselt = torch._cslt_compress(pruned)
+ packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous())
+ cusparselt = SparseSemiStructuredTensorCUSPARSELT(dense.shape,
+ packed_cusparselt,
+ None,
+ packed_t_cusparselt,
+ None,
+ compressed_swizzled_bitmask)
+ assert torch.allclose(reference_cusparselt.to_dense(), cusparselt.to_dense())
+
+
+
+ @training_dtypes
+ @parametrize_backends
+ def test_pruning_algo_largest_abs_values_greedy(self, dtype, backend) -> None:
+ inp = torch.tensor(
+ [[4, 3, 2, 1], [-1, -3, 0.6, 0.5], [1, 2, 3, 4], [10, 2, -1, 5]],
+ device="cuda",
+ dtype=dtype,
+ )
+ inp = F.pad(inp, (0, 128 - 4, 0, 128 - 4), "constant", 1)
+ sInp = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].prune_dense_static_sort(inp, algorithm="largest_abs_values_greedy")
+
+ mask = sInp.to_dense() / inp
+ assert mask[:4, :4].int().tolist() == [
+ [1, 1, 0, 0],
+ [0, 1, 1, 0],
+ [0, 0, 1, 1],
+ [1, 0, 0, 1],
+ ]
+
+ @training_dtypes
+ def test_gemm(self, dtype) -> None:
+ M, N, K = 32, 32, 64
+ a = torch.randn([M, K], device="cuda", dtype=dtype)
+ b = torch.randn([K, N], device="cuda", dtype=dtype)
+ mask = rand_sparse_semi_structured_mask(M, K, dtype=torch.bool)
+
+ a.masked_fill_(~mask, 0)
+
+ a_sparse = to_sparse_semi_structured(a)
+
+ masked_a = a * mask
+ ref_out = masked_a @ b
+ sp24_out = a_sparse @ b
+ assert torch.allclose(ref_out, sp24_out, **atol_rtol_kw[dtype])
+
+
+ @training_dtypes
+ @parametrize_backends
+ def test_pack_both_ways_meta_correctness(self, dtype, backend) -> None:
+ M, N = 128, 256
+ # Construct x to make sure we always have exactly 8 elements per 4x4 tile
+ a = (4 * torch.arange(8))[:, None] + torch.arange(8)[None, :]
+ a = a.repeat(M // 8, N // 8)
+ assert a.shape == (M, N)
+ a = a.cuda().to(dtype)
+ b = torch.randn([a.shape[1], 128], device="cuda", dtype=dtype)
+
+ a_sparse = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].prune_dense_static_sort(a)
+
+ mask_dense = sparse24_largest_mask_2d(a).to(dtype)
+
+ if backend == "cutlass":
+ assert isinstance(a_sparse, SparseSemiStructuredTensorCUTLASS)
+ (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile(
+ mask_dense, use_cutlass=True)
+
+ sparse_mask = SparseSemiStructuredTensorCUTLASS(
+ mask_dense.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=bitmask,
+ )
+ assert torch.allclose(a_sparse.meta.view(torch.short), sparse_mask.meta)
+
+ ref_gemm = (mask_dense * a) @ b
+ pack_gemm = a_sparse @ b
+ assert torch.allclose(ref_gemm, pack_gemm, **atol_rtol_kw[dtype])
+
+ @training_dtypes
+ def test_pack_both_ways_id(self, dtype) -> None:
+ N = 512
+ torch.manual_seed(0)
+ a = torch.randn([N, N], dtype=dtype, device="cuda")
+ b = torch.eye(N, dtype=dtype, device="cuda")
+
+ packed, meta, packed_t, meta_t = torch._sparse_semi_structured_tile(a)[
+ :4
+ ]
+ # Heuristic to ensure we pack the same values
+ assert torch.allclose(
+ packed.to(torch.float64).sum(), packed_t.to(torch.float64).sum()
+ )
+
+ mask_dense = sparse24_largest_mask_2d(a.to(dtype))
+
+ ref_gemm = mask_dense * a
+ # Test A@B
+ pack_gemm = torch._sparse_semi_structured_linear(b.t(), packed, meta).t()
+ max_diff = (ref_gemm - pack_gemm).abs().argmax()
+ assert torch.allclose(
+ ref_gemm, pack_gemm,
+ **atol_rtol_kw[dtype]
+ ), f"packed is wrong at pos: ({max_diff // N}, {max_diff % N})"
+ # Test A.t@B
+ pack_gemm = torch._sparse_semi_structured_linear(b.t(), packed_t, meta_t)
+ max_diff = (ref_gemm - pack_gemm).abs().argmax()
+
+ assert torch.allclose(
+ ref_gemm, pack_gemm,
+ **atol_rtol_kw[dtype]
+ ), f"packed_t is wrong at pos: ({max_diff // N}, {max_diff % N})"
+
+ @training_dtypes
+ def test_pack_both_ways_edge_case1(self, dtype) -> None:
+ # In this case, the heuristic will keep 7 values out of 16
+ # instead of 8. let's see how the kernel handles this
+ quad = torch.tensor(
+ [
+ [2, -1, -2, -3], # Should be packed as `2 <null>`
+ [-1, 8, -1, 6],
+ [-1, -1, 4, 5],
+ [-1, 3, 7, -1],
+ ],
+ dtype=dtype,
+ device="cuda",
+ )
+ a = torch.randn([32, 64], dtype=dtype, device="cuda")
+ a[:4, :4] = quad
+ packed, meta, packed_t, meta_t = torch._sparse_semi_structured_tile(a)[:4]
+ # Check first line in A
+ assert packed[0, 0].item() == 2
+ assert packed[0, 1].item() == 0
+ # And first column in A.t
+ assert packed_t[0, 0].item() == 2
+ assert packed_t[0, 1].item() == 0
+
+ @training_dtypes
+ def test_sp24_apply(self, dtype) -> None:
+ M, N = 256, 1024
+ x = torch.randn([M, N], dtype=dtype, device="cuda")
+ (
+ packed,
+ meta,
+ packed_t,
+ meta_t,
+ bitmask,
+ ) = torch._sparse_semi_structured_tile(x)
+ packed2, packed_t2 = torch._sparse_semi_structured_apply(x, bitmask)
+ assert torch.allclose(packed, packed2)
+ assert torch.allclose(packed_t, packed_t2)
+
+ @training_dtypes
+ def test_sp24_apply_dense(self, dtype) -> None:
+ M, N = 256, 1024
+ x = torch.randn([M, N], dtype=dtype, device="cuda")
+ (
+ packed,
+ meta,
+ packed_t,
+ meta_t,
+ bitmask,
+ ) = torch._sparse_semi_structured_tile(x)
+
+ expected = SparseSemiStructuredTensorCUTLASS(
+ x.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=bitmask,
+ ).to_dense()
+
+ packed2, packed_t2 = torch._sparse_semi_structured_apply(x, bitmask)
+ sparse = SparseSemiStructuredTensorCUTLASS(
+ x.shape,
+ packed=packed2,
+ meta=meta,
+ packed_t=packed_t2,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=bitmask,
+ )
+
+ dense = torch._sparse_semi_structured_apply_dense(x, bitmask)
+
+ assert torch.allclose(dense, expected)
+ assert torch.allclose(sparse.to_dense(), expected)
+
+
+ @training_dtypes
+ def test_sp24_matmuls(self, dtype) -> None:
+ M, N, K = 64, 256, 1024
+ a = torch.randn([M, K], device="cuda", dtype=dtype)
+ b = torch.randn([K, N], device="cuda", dtype=dtype)
+ a_m = sparse24_largest_mask_2d(a)
+ b_m = sparse24_largest_mask_2d(b)
+ (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile(a)
+ a_s = SparseSemiStructuredTensorCUTLASS(
+ a.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=bitmask,
+ )
+ (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile(b)
+ b_s = SparseSemiStructuredTensorCUTLASS(
+ b.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=bitmask,
+ )
+
+ assert torch.allclose(a_s @ b, (a * a_m) @ b, rtol=1e-1, atol=1e-1)
+ assert torch.allclose(a @ b_s, a @ (b * b_m), rtol=1e-1, atol=1e-1)
+ assert torch.allclose(
+ a @ a_s.t(), a @ (a * a_m).t(), rtol=1e-1, atol=1e-1
+ )
+ assert torch.allclose(
+ a_s.t() @ a, (a * a_m).t() @ a, rtol=1e-1, atol=1e-1
+ )
+
+ def test_sp24_matmuls_mat_vec(self) -> None:
+ a = torch.randn([64, 128], device="cuda", dtype=torch.float16)
+ b = torch.randn([128], device="cuda", dtype=torch.float16)
+ a_m = sparse24_largest_mask_2d(a)
+ a_s = to_sparse_semi_structured(a)
+
+ with pytest.raises(NotImplementedError):
+ assert torch.allclose(a_s @ b, (a * a_m) @ b, **atol_rtol_kw[a.dtype])
+
+
+ def test_sp24_matmuls_bmm(self) -> None:
+ a = torch.randn([64, 128], device="cuda", dtype=torch.float16)
+ b = torch.randn([5, 6, 128], device="cuda", dtype=torch.float16)
+ a_m = sparse24_largest_mask_2d(a)
+ a_s = to_sparse_semi_structured(a)
+
+ with pytest.raises(NotImplementedError):
+ assert torch.allclose(a_s @ b, (a * a_m) @ b, **atol_rtol_kw[a.dtype])
+
+class TestSparseSemiStructuredCUTLASS(TestCase):
+ """
+ This contains CUTLASS specific tests for
+ - torch._sparse_semi_structured_linear
+ """
+ def setUp(self):
+ if not _IS_SM8X:
+ self.skipTest('Only runs on SM80')
+ if "cutlass" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS:
+ self.skipTest('CUTLASS not enabled')
+
+ @unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support CUTLASS")
+ @inference_dtypes
+ def test_linear_cutlass(self, device, dtype):
def run_test(batch_shape, m, n, k, device, dtype, dtype_out, add_bias, activation, rtol, atol):
weight = rand_sparse_semi_structured(m, k, dtype, device)
@@ -643,12 +976,8 @@ class TestSparseSemiStructured(TestCase):
@unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch")
- @parametrize("backend", ["cutlass"])
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- def test_conversions(self, device, dtype, backend):
- SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
+ @inference_dtypes
+ def test_conversions(self, device, dtype):
def run_test(r, c, device, dtype):
dense_ref = rand_sparse_semi_structured(r, c, dtype, device)
@@ -675,12 +1004,8 @@ class TestSparseSemiStructured(TestCase):
run_test(r, c, device, dtype)
@unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch")
- @parametrize("backend", ["cutlass"])
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
- def test_conversions_all_patterns(self, device, dtype, backend):
- SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass")
- if backend == "cutlass" and IS_WINDOWS:
- self.skipTest("CUTLASS not supported on Windows")
+ @inference_dtypes
+ def test_conversions_all_patterns(self, device, dtype):
r, c = 32, 128
dense_inv, dense_val = rand_sparse_semi_structured_all_patterns(r, c, dtype, device)
@@ -690,18 +1015,23 @@ class TestSparseSemiStructured(TestCase):
torch.testing.assert_close(dense, dense_val, rtol=0, atol=0)
-class TestCUSPARSELT(TestCase):
+
+
+CUSPARSELT_NUM_ALG_IDS = 4
+CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32]
+
+
+class TestSparseSemiStructuredCUSPARSELT(TestCase):
"""
- This contains cuSPARSELt specific tests.
+ This contains cuSPARSELt specific tests for
+ torch._cslt_compress
+ torch._cslt_sparse_mm
"""
-
def setUp(self):
if not _IS_SM8X:
self.skipTest('Only runs on SM80')
if "cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS:
self.skipTest('cuSPARSELt not enabled')
- else:
- SparseSemiStructuredTensor._FORCE_CUTLASS = False
@parametrize("out_dtype", CUSPARSELT_MIXED_DTYPE_SUPPORT)
@parametrize("dense_input_shape", [(128, 128)])
@@ -715,7 +1045,7 @@ class TestCUSPARSELT(TestCase):
sparse_result = torch._cslt_sparse_mm(A_compressed, B.t(), out_dtype=out_dtype)
assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3)
- @dtypes(torch.float16, torch.bfloat16)
+ @training_dtypes
def test_cslt_sparse_mm_alpha(self, dtype, device):
A = torch.Tensor([0, 0, 1, 1]).tile((128, 64)).to(dtype).cuda()
B = torch.ones((256, 128), device=device).to(dtype)
@@ -747,7 +1077,7 @@ class TestCUSPARSELT(TestCase):
assert torch.allclose(sparse_result, dense_result, rtol=1e-3, atol=1e-3)
@parametrize("alg_id", range(CUSPARSELT_NUM_ALG_IDS))
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
def test_cslt_sparse_mm_alg_id(self, device, dtype, alg_id):
# alg_id=3 not supported for float32 dtype
if dtype == torch.float32 and alg_id == 3:
@@ -764,7 +1094,7 @@ class TestCUSPARSELT(TestCase):
assert torch.allclose(sparse_result, dense_result, rtol=1e-3, atol=1e-3)
- @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES)
+ @inference_dtypes
def test_cslt_sparse_mm_search(self, device, dtype):
A = rand_sparse_semi_structured_mask(128, 128, dtype=dtype)
A_compressed = torch._cslt_compress(A)
@@ -777,9 +1107,10 @@ class TestCUSPARSELT(TestCase):
# in cuSPARSELt v0.5.0 there are only 4 alg_ids total, so we should remove the +1 here when we update.
assert alg_id in range(CUSPARSELT_NUM_ALG_IDS + 1)
-
instantiate_device_type_tests(TestSparseSemiStructured, globals(), only_for="cuda")
-instantiate_device_type_tests(TestCUSPARSELT, globals(), only_for="cuda")
+instantiate_device_type_tests(TestSparseSemiStructuredCUTLASS, globals(), only_for="cuda")
+instantiate_device_type_tests(TestSparseSemiStructuredCUSPARSELT, globals(), only_for="cuda")
+instantiate_device_type_tests(TestSparseSemiStructuredTraining, globals(), only_for="cuda")
if __name__ == "__main__":
run_tests()
diff --git a/torch/sparse/_semi_structured_conversions.py b/torch/sparse/_semi_structured_conversions.py
index c487b15149..5203ad245b 100644
--- a/torch/sparse/_semi_structured_conversions.py
+++ b/torch/sparse/_semi_structured_conversions.py
@@ -1,20 +1,22 @@
import torch
-# This is PyTorch implementation of main part of reorder_meta()
-# function, from tools/util/include/cutlass/util/host_reorder.h file
-# of CUTLASS source tree. Furthermore, CUTLASS template for sparse
-# GEMM decides upon layout of this matrix, and at the moment for the
-# sparse GEMM executed on tensor cores, this is layout described by
-# ColumnMajorInterleaved<2> data structure, in
-# include/cutlass/layout/matrix.h of CUTLASS source tree. The
-# reordering of meta matrix into meta_reordered matrix calculated
-# according to these segments of CUTLASS code is re-implemented here.
-# Note that this calculation produces offsets for scattering metadata
-# matrix elements into reordered metadata matrix elements (or,
-# equivalently, for gathering reordered metadata matrix element back
-# into metadata matrix elements).
def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device):
+ """
+ This is PyTorch implementation of main part of reorder_meta()
+ function, from tools/util/include/cutlass/util/host_reorder.h file
+ of CUTLASS source tree. Furthermore, CUTLASS template for sparse
+ GEMM decides upon layout of this matrix, and at the moment for the
+ sparse GEMM executed on tensor cores, this is layout described by
+ ColumnMajorInterleaved<2> data structure, in
+ include/cutlass/layout/matrix.h of CUTLASS source tree. The
+ reordering of meta matrix into meta_reordered matrix calculated
+ according to these segments of CUTLASS code is re-implemented here.
+ Note that this calculation produces offsets for scattering metadata
+ matrix elements into reordered metadata matrix elements (or,
+ equivalently, for gathering reordered metadata matrix element back
+ into metadata matrix elements).
+ """
dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols)
dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1)
@@ -41,10 +43,12 @@ def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device
return (cols_maj * m * interleave + dst_rows * interleave + cols_min).view(-1)
-# This function converts dense matrix into sparse semi-structured
-# representation, producing "compressed" matrix, in the layout used by
-# CUTLASS backend, and corresponding metadata matrix.
def sparse_semi_structured_from_dense_cutlass(dense):
+ """
+ This function converts dense matrix into sparse semi-structured
+ representation, producing "compressed" matrix, in the layout used by
+ CUTLASS backend, and corresponding metadata matrix.
+ """
if dense.dim() != 2:
raise RuntimeError(
f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor"
@@ -172,11 +176,13 @@ def sparse_semi_structured_from_dense_cutlass(dense):
return (sparse, meta_reordered.view(m, meta_ncols))
-# This function performs reverse of the function above - it
-# reconstructs dense matrix from a pair of "compressed" matrix, given
-# in the layout used by CUTLASS backend, and accompanying metadata
-# matrix.
def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered):
+ """
+ This function performs reverse of the function above - it
+ reconstructs dense matrix from a pair of "compressed" matrix, given
+ in the layout used by CUTLASS backend, and accompanying metadata
+ matrix.
+ """
if sparse.dim() != 2:
raise RuntimeError(
f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor"
@@ -273,3 +279,73 @@ def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered):
)
return dense.view(m, 2 * k)
+
+
+def _sparse_semi_structured_tile(dense):
+ """
+ This function computes a 2:4 sparse tile by greedily taking the largest values.
+
+ Since we take the largest values greedily, how the sorting algorithm handles duplicates affects
+ the ultimate sparsity pattern.
+
+ Note that this function does not have the same sorting semantics as our CUDA backend,
+ which is exposed via `torch._sparse_semi_structured_tile` and thus returns a different pattern.
+ """
+
+ def greedy_prune_tile(tile):
+ num_kept_row = [0, 0, 0, 0]
+ num_kept_col = [0, 0, 0, 0]
+
+ for x in tile.flatten().sort(descending=True, stable=True).indices:
+ r, c = x // 4, x % 4
+ if num_kept_row[r] < 2 and num_kept_col[c] < 2:
+ num_kept_row[r] += 1
+ num_kept_col[c] += 1
+ else:
+ tile[r, c] = 0
+
+ for batch in dense.unfold(0, 4, 4).unfold(1, 4, 4):
+ for tile in batch:
+ greedy_prune_tile(tile)
+
+ return dense
+
+
+def _compute_compressed_swizzled_bitmask(dense):
+ """
+ Calculates the compressed swizzled bitmask from a dense tensor
+ """
+
+ # first we need to convert the dense tensor to a bitmask
+ int_bitmask = dense.bool().to(torch.uint8)
+
+ # Each thread is responsible for an 8x8 tile, which contains 4 4x4 tiles:
+ # A, B, C and D, as displayed in the following schema:
+ # +---+---+
+ # | A | B |
+ # +---+---+
+ # | C | D |
+ # +---+---+
+
+ # we first need to split into the 8x8 tiles
+ bitmask_8x8_chunks = int_bitmask.unfold(0, 8, 8).unfold(1, 8, 8)
+
+ # then we unfold again to get our indivdual 4x4 tiles
+ bitmask_4x4_chunks = bitmask_8x8_chunks.unfold(2, 4, 4).unfold(3, 4, 4)
+
+ # Each 4x4 bitmask defines two 8-bit integers, which encode the sparsity pattern
+ # of that tile. Note that the least siginificant bit is stored first.
+ # [1 1 0 0]
+ # [1 1 0 0] -> 0011 0011 -> 51
+ # [0 0 1 1] 1100 1100 204
+ # [0 0 1 1]
+
+ # reshape tensor to expand tiles into 8-bit vectors
+ bitmask_binary_representation = bitmask_4x4_chunks.reshape(*bitmask_4x4_chunks.shape[:2], 4, 2, 8)
+
+ # to convert from binary representaiton, we can do a matmul with powers of two
+ powers_of_two = 2**torch.arange(8, dtype=torch.float, device="cuda")
+ # To run on GPU: cast to float to do matmul and then cast back
+ compressed_swizzled_bitmask = (bitmask_binary_representation.to(torch.float) @ powers_of_two).to(torch.uint8)
+
+ return compressed_swizzled_bitmask
diff --git a/torch/sparse/_semi_structured_ops.py b/torch/sparse/_semi_structured_ops.py
index eaa609b342..551111b429 100644
--- a/torch/sparse/_semi_structured_ops.py
+++ b/torch/sparse/_semi_structured_ops.py
@@ -70,8 +70,8 @@ def semi_sparse_t(func, types, args=(), kwargs=None) -> torch.Tensor:
meta=self.meta_t,
packed_t=self.packed,
meta_t=self.meta,
- threads_masks=self.threads_masks.transpose(0, 1)
- if self.threads_masks is not None
+ compressed_swizzled_bitmask=self.compressed_swizzled_bitmask.transpose(0, 1)
+ if self.compressed_swizzled_bitmask is not None
else None,
fuse_transpose_cusparselt=args[0].fuse_transpose_cusparselt,
alg_id_cusparselt=args[0].alg_id_cusparselt,
@@ -97,7 +97,7 @@ def semi_sparse_detach(func, types, args, kwargs) -> torch.Tensor:
meta=self.meta,
packed_t=self.packed_t,
meta_t=self.meta_t,
- threads_masks=self.threads_masks,
+ compressed_swizzled_bitmask=self.compressed_swizzled_bitmask,
requires_grad=False,
)
diff --git a/torch/sparse/semi_structured.py b/torch/sparse/semi_structured.py
index 7c86b0d43b..587fcc0d72 100644
--- a/torch/sparse/semi_structured.py
+++ b/torch/sparse/semi_structured.py
@@ -5,7 +5,7 @@ from typing import Any, Optional, Tuple, List, Callable, Dict
import torch
from torch.sparse._semi_structured_conversions import (
sparse_semi_structured_from_dense_cutlass,
- sparse_semi_structured_to_dense_cutlass,
+ sparse_semi_structured_to_dense_cutlass
)
from torch.sparse._semi_structured_ops import (
fallback_dispatcher,
@@ -56,17 +56,18 @@ class SparseSemiStructuredTensor(torch.Tensor):
_FUSE_TRANSPOSE: bool = False
_PROTOTYPE_WARNING_SHOWN: bool = False
+ BACKEND: str
SPARSE_DISPATCH: Dict[Callable, Callable]
packed: Optional[torch.Tensor]
meta: Optional[torch.Tensor]
packed_t: Optional[torch.Tensor]
meta_t: Optional[torch.Tensor]
- threads_masks: Optional[torch.Tensor]
+ compressed_swizzled_bitmask: Optional[torch.Tensor]
fuse_transpose_cusparselt: bool
alg_id_cusparselt: int
- __slots__ = ["packed", "meta", "packed_t", "meta_t", "threads_masks"]
+ __slots__ = ["packed", "meta", "packed_t", "meta_t", "compressed_swizzled_bitmask"]
@staticmethod
def __new__( # noqa: PYI034
@@ -76,7 +77,7 @@ class SparseSemiStructuredTensor(torch.Tensor):
meta: Optional[torch.Tensor],
packed_t: Optional[torch.Tensor],
meta_t: Optional[torch.Tensor],
- threads_masks: Optional[torch.Tensor],
+ compressed_swizzled_bitmask: Optional[torch.Tensor],
fuse_transpose_cusparselt: bool = False,
alg_id_cusparselt: int = 0,
requires_grad: bool = False,
@@ -95,8 +96,8 @@ class SparseSemiStructuredTensor(torch.Tensor):
meta: The metadata of the original dense tensor, if it is stored separately
packed_t: The compressed representation of the transposed original dense tensor
meta_t: The metadata of the transposed original dense tensor, if it is stored separately
- threads_masks: The masks used by the CUTLASS backend to determine which threads should participate in the computation.
- Used for pointwise ops.
+ compressed_swizzled_bitmask: The masks used by the CUTLASS backend to determine which threads should
+ participate in the computation. Used for pointwise ops.
fuse_transpose_cusparselt: When running with cuSPARSELt, we have the option to fuse a transposition
with a matmul, which is useful in the case of 2:4 sparse training.
alg_id_cusparselt: The algorithm id to use when using cuSPARSELT, will have effect on performance
@@ -124,6 +125,9 @@ class SparseSemiStructuredTensor(torch.Tensor):
# But this is useful since it allows users to overload the dispatch table for debugging / testing.
cls._load_dispatch_table()
+ # we can also register the classes with dynamo when the warning is shown.
+ torch._dynamo.allow_in_graph(cls)
+
if packed is not None:
previous_tensor = packed
elif packed_t is not None:
@@ -143,7 +147,7 @@ class SparseSemiStructuredTensor(torch.Tensor):
tensor.meta = meta
tensor.packed_t = packed_t
tensor.meta_t = meta_t
- tensor.threads_masks = threads_masks
+ tensor.compressed_swizzled_bitmask = compressed_swizzled_bitmask
tensor.fuse_transpose_cusparselt = fuse_transpose_cusparselt
tensor.alg_id_cusparselt = alg_id_cusparselt
return tensor
@@ -181,7 +185,7 @@ class SparseSemiStructuredTensor(torch.Tensor):
meta=inner_tensors.get("meta", None),
packed_t=inner_tensors.get("packed_t", None),
meta_t=inner_tensors.get("meta_t", None),
- threads_masks=inner_tensors.get("threads_masks", None),
+ compressed_swizzled_bitmask=inner_tensors.get("compressed_swizzled_bitmask", None),
fuse_transpose_cusparselt=fuse_transpose_cusparselt,
alg_id_cusparselt=alg_id_cusparselt,
requires_grad=requires_grad,
@@ -216,6 +220,7 @@ class SparseSemiStructuredTensor(torch.Tensor):
torch.ops.aten.matmul: semi_sparse_mm,
torch.ops.aten.addmm: semi_sparse_addmm,
torch.ops.aten.linear: semi_sparse_linear,
+ torch.ops.aten._to_copy: fallback_dispatcher,
}
if custom_dispatch_table is not None:
cls.SPARSE_DISPATCH.update(custom_dispatch_table)
@@ -359,13 +364,14 @@ def to_sparse_semi_structured(
"SparseSemiStructuredTensor only support contiguous input tensors. "
)
- sparse_subclass = (
+ # set from _FORCE_CUTLASS flag
+ SPARSE_SUBCLASS = (
torch.sparse.SparseSemiStructuredTensorCUTLASS
if SparseSemiStructuredTensor._FORCE_CUTLASS
else torch.sparse.SparseSemiStructuredTensorCUSPARSELT
)
- return sparse_subclass.from_dense(original_tensor)
+ return SPARSE_SUBCLASS.from_dense(original_tensor)
class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor):
"""
@@ -378,7 +384,7 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor):
When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and
sparse_semi_structured_from_dense for conversion to the compressed format.
"""
-
+ BACKEND = "cutlass"
_DTYPE_SHAPE_CONSTRAINTS = {
torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 128, 16, 16),
torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8),
@@ -401,19 +407,71 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor):
meta=meta_tensor_cutlass,
packed_t=None,
meta_t=None,
- threads_masks=None,
+ compressed_swizzled_bitmask=None,
requires_grad=original_tensor.requires_grad,
)
def to_dense(self):
assert self.meta is not None and self.packed is not None
- return (
- sparse_semi_structured_to_dense_cutlass(
- self.packed,
- self.meta,
- )
- if self.meta.ndim == 2
- else super().to_dense()
+ return sparse_semi_structured_to_dense_cutlass(
+ self.packed,
+ self.meta,
+ ) if self.meta.ndim == 2 else super().to_dense()
+
+ @classmethod
+ def prune_dense_static_sort(cls, original_tensor : torch.Tensor, algorithm="") -> "SparseSemiStructuredTensor":
+ """
+ This function takes in a unpruned dense tensor and runs a (branchless) static sort across a 4x4 tile.
+
+ It greedily picks the largest values in the tile, upholding the 2:4 sparsity constraint across both rows and columns.
+ The algorithm used to prune the matrix is implemented in `_sparse_semi_structured_tile`.
+
+ Then it creates the packed and meta tensors for the compressed sparse representation of the pruned dense tensor.
+ It also calculates the packed_t and meta_t tensors for the compressed sparse representation of the transposed
+ pruned dense tensor.
+ Since we cannot transpose the compressed representations, we store both for the fw/bw pass respectively.
+
+ Finally, this function also computes a compressed swizzled bitmask that encodes the sparsity pattern
+ This can be used in the backward pass to mask the gradients.
+
+ [9 1 7 4] [9 0 7 0]
+ [1 2 3 0] [0 2 0 0]
+ [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to CUTLASS semi-structured -> packed
+ [1 2 6 2] [0 0 6 2] -> metadata
+
+ -> pack to transposed CUTLASS -> packed_t
+ semi-structured representation -> metadata_t
+
+ -> compute swizzled bitmask -> compressed_swizzled_bitmask
+
+
+ The equivalent PyTorch code to create the same five outputs from the dense tensor can be found below:
+ ```
+ from torch.sparse import SparseSemiStructuredTensorCUTLASS
+ from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask
+
+ pruned = _sparse_semi_structured_tile(dense)
+ packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned)
+ packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous())
+ bitmask = _compute_compressed_swizzled_bitmask(pruned)
+
+ SparseSemiStructuredTensorCUTLASS(dense.shape, packed_cutlass, meta_cutlass, packed_t_cutlass, meta_t_cutlass, bitmask)
+ ```
+ """
+ # We can either pack to the CUTLASS or cuSPARSELt representation, depending on the use_cutlass flag.
+ (packed, meta, packed_t, meta_t, compressed_swizzled_bitmask) = torch._sparse_semi_structured_tile(
+ original_tensor,
+ algorithm=algorithm,
+ use_cutlass=True)
+
+ return cls(
+ original_tensor.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=compressed_swizzled_bitmask,
+ requires_grad=False,
)
def _mm(
@@ -459,7 +517,7 @@ class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor):
cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well
as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes.
"""
-
+ BACKEND = "cusparselt"
_DTYPE_SHAPE_CONSTRAINTS = {
torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16),
torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8),
@@ -476,12 +534,59 @@ class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor):
meta=None,
packed_t=None,
meta_t=None,
- threads_masks=None,
+ compressed_swizzled_bitmask=None,
fuse_transpose_cusparselt=SparseSemiStructuredTensor._FUSE_TRANSPOSE,
alg_id_cusparselt=SparseSemiStructuredTensor._DEFAULT_ALG_ID,
requires_grad=original_tensor.requires_grad,
)
+ @classmethod
+ def prune_dense_static_sort(cls, original_tensor : torch.Tensor, algorithm="") -> "SparseSemiStructuredTensor":
+ """
+ This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata
+ layout and sparse matmul.
+
+ The only functional difference is that cuSPARSELt stores `metadata` and `packed` together into a single tensor.
+
+ [9 1 7 4] [9 0 7 0]
+ [1 2 3 0] [0 2 0 0]
+ [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed
+ [1 2 6 2] [0 0 6 2]
+
+ -> pack to transposed cuSPARSELt -> packed_t
+ semi-structured representation
+
+ -> compute swizzled bitmask -> compressed_swizzled_bitmask
+
+
+ The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below:
+ ```
+ from torch.sparse import SparseSemiStructuredTensorCUSPARSELT
+ from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask
+
+ pruned = _sparse_semi_structured_tile(dense)
+ packed_cusparselt = torch._cslt_compress(pruned)
+ packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous())
+ bitmask = _compute_compressed_swizzled_bitmask(pruned)
+
+ SparseSemiStructuredTensorCUSPARSELT(dense.shape, packed_cutlass, None, packed_t_cutlass, None, bitmask)
+ ```
+ """
+ (packed, meta, packed_t, meta_t, compressed_swizzled_bitmask) = torch._sparse_semi_structured_tile(
+ original_tensor,
+ algorithm=algorithm,
+ use_cutlass=False)
+
+ return cls(
+ original_tensor.shape,
+ packed=packed,
+ meta=meta,
+ packed_t=packed_t,
+ meta_t=meta_t,
+ compressed_swizzled_bitmask=compressed_swizzled_bitmask,
+ requires_grad=False,
+ )
+
def _mm(
self,
B: torch.Tensor, | 2.41.0 |
de9e8237ac620f3ea8925cbd7f17aefc011f06d | Thu, 11 Apr 2024 10:39:48 -0700 | [PATCH 0059/1000] [dynamo] Bug fix for GET_YIELD_FROM_ITER (#122943) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/122943 Approved by: https://github.com/jansel | diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 79835450be..b3d876290a 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -8736,6 +8736,24 @@ def ___make_guard_fn():
self.assertEqual(eager, compiled)
self.assertEqual(counter.frame_count, 1)
+ def test_yield_from_in_a_loop(self):
+ def gen2():
+ yield 1
+
+ def gen1():
+ for value in range(5):
+ yield from gen2()
+
+ def fn(x):
+ c = 0
+ for i in gen1():
+ c = c + i
+ return x + c
+
+ opt_fn = torch.compile(fn, backend="eager")
+ x = torch.zeros(4)
+ self.assertEqual(fn(x), opt_fn(x))
+
def test_yield_gen_and_from(self):
def populate_and_multiply_sequence(n, multiplier):
# Inline generator
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 41f94e3ee9..2b6a525d16 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -2434,7 +2434,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase):
suffix = ""
# TODO: mlazos, add support for enabling multiple artifact logs
# with a single alias
- if torch._logging._internal.log_state.is_artifact_enabled("output_code"):
+ if torch._logging._internal.log_state.is_artifact_enabled("bytecode"):
suffix = f"\n{dis.Bytecode(code).dis()}"
if sys.version_info >= (3, 11):
cur_inst = parent.current_instruction
@@ -2655,13 +2655,18 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
return
try:
val = tos.next_variable(self)
+
+ # TODO(anijain2305,jansel) - The last pop is because
+ # YIELD_FROM. If we remove it from there, we don't need to
+ # pop it here.
self.push(val)
- # TODO(voz): Unclear if we need the push None in YIELD_VALUE?
self.YIELD_VALUE(inst)
self.pop()
+
+ # Pop the old iter and push the new iter
+ self.pop()
self.push(tos)
except (StopIteration, exc.UserStopIteration):
- # TODO(jansel): do we need a self.pop() here?
return
def SEND(self, inst):
@@ -2669,6 +2674,29 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
val = self.pop()
tos = self.stack[-1]
if isinstance(tos, ListIteratorVariable):
+ # We handle yield in a very differnt way than CPython does. Instead
+ # of returning to the parent frame on a yield, TorchDynamo instead
+ # just collects the generated_items and proceed to the next
+ # instruction in the same frame. From bytecode tracing stanpoint,
+ # this means that the iterator returned from the child funtion on
+ # `yield from ...` will always be exhausted.
+
+ # Therefore to implement SEND, we have to look at the implementation
+ # when the iterator returns StopIteration. This translates to this code
+ # 3.11 - https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2613-L2618
+ # 3.12 - https://github.com/python/cpython/blob/3.12/Python/bytecodes.c#L863-L865
+ # The implementation is different in 3.11 and 3.12. In 3.12, we rely
+ # on END_SEND to clean up. In 3.11, SEND does the cleanup as well.
+
+ if sys.version_info >= (3, 12):
+ # Do not pop, we will rely on END_SEND to pop the iterator
+ pass
+ else:
+ # Check that the iterator is exhausted. It should be because of
+ # how we implement yields.
+ assert tos.is_exhausted()
+ self.pop()
+
if isinstance(val, ConstantVariable) and val.value is None:
self.push(val)
self.instruction_pointer = self.indexof[inst.target]
diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py
index a23ebe05b6..1231eac8ca 100644
--- a/torch/_dynamo/variables/lists.py
+++ b/torch/_dynamo/variables/lists.py
@@ -678,6 +678,9 @@ class ListIteratorVariable(VariableTracker):
]
)
+ def is_exhausted(self):
+ return self.index >= len(self.items)
+
class TupleIteratorVariable(ListIteratorVariable):
pass | 2.41.0 |
06f7d1f2248a36d6ca7c325868eee1b1115668b | Fri, 12 Apr 2024 03:39:34 +0000 | [PATCH 0061/1000] Enable UFMT on `test/jit_hooks`, `test/lazy` and some files (#123807) | Part of: #123062 Ran lintrunner on: - `test/jit_hooks` - `test/lazy` - `test/linear.py` - `test/load_torchscript_model.py` - `test/mkl_verbose.py` - `test/mkldnn_verbose.py` with command: ```bash lintrunner -a --take UFMT --all-files ``` Co-authored-by: Edward Z. Yang <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123807 Approved by: https://github.com/ezyang | diff --git a/.lintrunner.toml b/.lintrunner.toml
index 6a4472073f..7c4e94ed24 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1162,18 +1162,6 @@ exclude_patterns = [
'test/functorch/test_vmap.py',
'test/functorch/test_vmap_registrations.py',
'test/functorch/xfail_suggester.py',
- 'test/lazy/__init__.py',
- 'test/lazy/test_bindings.py',
- 'test/lazy/test_debug_util.py',
- 'test/lazy/test_extract_compiled_graph.py',
- 'test/lazy/test_meta_kernel.py',
- 'test/lazy/test_reuse_ir.py',
- 'test/lazy/test_step_closures.py',
- 'test/lazy/test_ts_opinfo.py',
- 'test/linear.py',
- 'test/load_torchscript_model.py',
- 'test/mkl_verbose.py',
- 'test/mkldnn_verbose.py',
'test/nn/test_convolution.py',
'test/nn/test_dropout.py',
'test/nn/test_embedding.py',
diff --git a/test/lazy/test_bindings.py b/test/lazy/test_bindings.py
index 57151d4085..39466b33a1 100644
--- a/test/lazy/test_bindings.py
+++ b/test/lazy/test_bindings.py
@@ -2,6 +2,7 @@
import torch._lazy.metrics
+
def test_metrics():
names = torch._lazy.metrics.counter_names()
assert len(names) == 0, f"Expected no counter names, but got {names}"
diff --git a/test/lazy/test_debug_util.py b/test/lazy/test_debug_util.py
index df201d5473..3bb88d866b 100644
--- a/test/lazy/test_debug_util.py
+++ b/test/lazy/test_debug_util.py
@@ -3,11 +3,11 @@
import os
import re
import tempfile
-import torch.nn as nn
import unittest
import torch._lazy
import torch._lazy.ts_backend
+import torch.nn as nn
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
torch._lazy.ts_backend.init()
@@ -21,15 +21,16 @@ class DebugUtilTest(TestCase):
output = model(torch.randn(1, 5).to(device))
torch._lazy.mark_step()
-
def test_get_python_frames(self):
# We only care about the first "Python Stacktrace" part of the saved
# graph. However, we cannot save the whole stack for comparison given
# it depends on a lot of things.
- partial_graph = (r"Python Stacktrace:.*"
- r"mark_step \(.*/_lazy/__init__.py:[0-9]+\).*"
- r"_run_linear \(.*lazy/test_debug_util.py:[0-9]+\).*"
- r"test_get_python_frames \(.*lazy/test_debug_util.py:[0-9]+\)")
+ partial_graph = (
+ r"Python Stacktrace:.*"
+ r"mark_step \(.*/_lazy/__init__.py:[0-9]+\).*"
+ r"_run_linear \(.*lazy/test_debug_util.py:[0-9]+\).*"
+ r"test_get_python_frames \(.*lazy/test_debug_util.py:[0-9]+\)"
+ )
with tempfile.NamedTemporaryFile(mode="r+", encoding="utf-8") as graph_file:
os.environ["LTC_SAVE_TENSORS_FILE"] = graph_file.name
diff --git a/test/lazy/test_extract_compiled_graph.py b/test/lazy/test_extract_compiled_graph.py
index bde68ae4dc..e54c2c1457 100644
--- a/test/lazy/test_extract_compiled_graph.py
+++ b/test/lazy/test_extract_compiled_graph.py
@@ -3,37 +3,44 @@
import unittest
from torch._lazy.ts_backend import init as init_ts_backend
+
init_ts_backend()
-from torch._lazy import config
-from torch._lazy.extract_compiled_graph import extract_compiled_graph
-import torch
-from torch import nn
+import copy
import dis
import inspect
-from torch import fx
import re
from contextlib import contextmanager
-import copy
+
+import torch
+from torch import fx, nn
+from torch._lazy import config
+from torch._lazy.extract_compiled_graph import extract_compiled_graph
+
class ModuleConstScale(nn.Module):
def forward(self, a):
return a * 2
+
class ModuleSub(nn.Module):
def forward(self, a, b):
return a - b
+
class ModuleAddcmul(nn.Module):
"""
addcmul function takes a at::Scalar which results in a special TSData containing a Scalar rather than a Tensor.
"""
+
def forward(self, a, b, c):
return torch.addcmul(a, b, c, value=5)
+
class ModuleReturnMulti(nn.Module):
def forward(self, a, b):
return (b + 1, a - 1)
+
# The default fx tracer will convert torch.randn to a constant.. We may need
# a custom tracer.
# class ModuleEagerTensor(nn.Module):
@@ -58,21 +65,25 @@ class ModuleReturnMulti(nn.Module):
# def forward(self):
# return torch.tensor((2, 3), dtype=torch.float32)
+
class ModuleReturnDupTensor(nn.Module):
"""
Handle the corner case that the same tensor appears multiple times in the
returned tuple. torchbench like drq will hit this corner case when running
thru torchdynamo..
"""
+
def forward(self, a, b):
c = a + b
return a - b, c, a + 1, c
+
class ModuleInplaceUpdate(nn.Module):
def forward(self, a, b):
a.sub_(b)
return b - 1, b + 1
+
@contextmanager
def force_fallback_ctx_mgr(fallback_op):
oldconfig = config.get_force_fallback()
@@ -82,6 +93,7 @@ def force_fallback_ctx_mgr(fallback_op):
finally:
config.set_force_fallback(oldconfig)
+
@contextmanager
def nop_ctx_mgr():
try:
@@ -89,27 +101,33 @@ def nop_ctx_mgr():
finally:
pass
+
def gen_rand_args(mod):
args = []
for _ in range(len(inspect.signature(mod.forward).parameters)):
args.append(torch.randn(2, 3))
return args
+
def allclose(expected, actual):
def unwrap(cont):
if isinstance(cont, (list, tuple)) and len(cont) == 1:
return cont[0]
return cont
+
expected = unwrap(expected)
actual = unwrap(actual)
if isinstance(expected, torch.Tensor) and isinstance(actual, torch.Tensor):
return torch.allclose(expected, actual)
elif isinstance(expected, (tuple, list)) and isinstance(actual, (tuple, list)):
- return len(expected) == len(actual) and all(torch.allclose(a, b) for a, b in zip(expected, actual))
+ return len(expected) == len(actual) and all(
+ torch.allclose(a, b) for a, b in zip(expected, actual)
+ )
else:
raise RuntimeError("Unexpected types")
+
def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10):
args = gen_rand_args(mod)
out = mod(*args)
@@ -123,13 +141,17 @@ def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10):
raise e # reraise the exception
exception_message = str(e)
if not re.search(exception_msg_pattern, exception_message):
- raise RuntimeError(f"Exception message does not match the required pattern: {exception_message}") from e
+ raise RuntimeError(
+ f"Exception message does not match the required pattern: {exception_message}"
+ ) from e
else:
# We are done for the test case that expects an exception
return
if exception_msg_pattern is not None:
- raise RuntimeError(f"Expect an exception matching pattern {exception_msg_pattern}")
+ raise RuntimeError(
+ f"Expect an exception matching pattern {exception_msg_pattern}"
+ )
print("return value of optimized_mod", optimized_mod(*args))
# check correctness
@@ -148,13 +170,16 @@ def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10):
# make sure arguments match after calling the model forward method to handle inplace
# updates.
if not allclose(rand_args, rand_args_copy):
- print(f"Incorrect updated arguments. expected {rand_args}, actual {rand_args_copy}")
+ print(
+ f"Incorrect updated arguments. expected {rand_args}, actual {rand_args_copy}"
+ )
failed_index.append(i)
continue
if len(failed_index) > 0:
raise RuntimeError(f"Failed {len(failed_index)}/{ncase} cases")
+
def maketest(module_cls, exception_msg_pattern=None, ctxmgr=None):
def wrapper(self):
nonlocal ctxmgr
@@ -165,11 +190,16 @@ def maketest(module_cls, exception_msg_pattern=None, ctxmgr=None):
return wrapper
+
class OptimizeTest(unittest.TestCase):
test_sub = maketest(ModuleSub)
# Same as test_sub but force aten::sub to fallback
# We expect an exception caught because of LTC fallabck.
- test_ltc_fallback = maketest(ModuleSub, exception_msg_pattern="fallback.*aten::sub", ctxmgr=force_fallback_ctx_mgr("aten::sub"))
+ test_ltc_fallback = maketest(
+ ModuleSub,
+ exception_msg_pattern="fallback.*aten::sub",
+ ctxmgr=force_fallback_ctx_mgr("aten::sub"),
+ )
test_const_scale = maketest(ModuleConstScale)
test_addcmul = maketest(ModuleAddcmul)
test_return_multi = maketest(ModuleReturnMulti)
diff --git a/test/lazy/test_meta_kernel.py b/test/lazy/test_meta_kernel.py
index 96fe74d232..516121c547 100644
--- a/test/lazy/test_meta_kernel.py
+++ b/test/lazy/test_meta_kernel.py
@@ -1,16 +1,16 @@
# Owner(s): ["oncall: jit"]
import torch
-
-from torch.testing._internal.common_utils import TestCase
-from torch import float32, float16
import torch._lazy
import torch._lazy.ts_backend
+from torch import float16, float32
+
+from torch.testing._internal.common_utils import TestCase
torch._lazy.ts_backend.init()
-class TestMetaKernel(TestCase):
+class TestMetaKernel(TestCase):
def test_addmm_invalid_dtype(self):
"""Tests that the addmm meta kernel returns the correct output type"""
input = torch.ones(2, 2, dtype=torch.float16).to("lazy")
@@ -35,5 +35,5 @@ class TestMetaKernel(TestCase):
self.assertEqual(out_bias.dtype, torch.float16)
def test_add_invalid_device(self):
- with self.assertRaisesRegex(RuntimeError, '.*not a lazy tensor.*'):
+ with self.assertRaisesRegex(RuntimeError, ".*not a lazy tensor.*"):
_ = torch.tensor([1], device="cpu") + torch.tensor([1], device="lazy")
diff --git a/test/lazy/test_reuse_ir.py b/test/lazy/test_reuse_ir.py
index f7024e9519..70112cd6cc 100644
--- a/test/lazy/test_reuse_ir.py
+++ b/test/lazy/test_reuse_ir.py
@@ -1,20 +1,23 @@
# Owner(s): ["oncall: jit"]
+import os
+import unittest
+
import torch
import torch._lazy
import torch._lazy.config
import torch._lazy.ir_cache
-import torch._lazy.ts_backend
import torch._lazy.metrics as metrics
+import torch._lazy.ts_backend
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
-import os
-import unittest
torch._lazy.ts_backend.init()
torch._lazy.config.set_reuse_ir(True)
+
def get_test_device():
- return 'cuda' if 'LTC_TS_CUDA' in os.environ else 'cpu'
+ return "cuda" if "LTC_TS_CUDA" in os.environ else "cpu"
+
@unittest.skipIf(IS_WINDOWS, "To be fixed")
class TestLazyReuseIr(TestCase):
@@ -24,16 +27,16 @@ class TestLazyReuseIr(TestCase):
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
- device = 'lazy'
+ device = "lazy"
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
- z += (x + y)
+ z += x + y
for i in range(10):
- z_lazy += (x_lazy + y_lazy)
+ z_lazy += x_lazy + y_lazy
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
@@ -47,22 +50,22 @@ class TestLazyReuseIr(TestCase):
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
- device = 'lazy'
+ device = "lazy"
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
if i < 5:
- z += (x + y)
+ z += x + y
else:
- z += (x - y)
+ z += x - y
for i in range(10):
if i < 5:
- z_lazy += (x_lazy + y_lazy)
+ z_lazy += x_lazy + y_lazy
else:
- z_lazy += (x_lazy - y_lazy)
+ z_lazy += x_lazy - y_lazy
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
@@ -77,22 +80,22 @@ class TestLazyReuseIr(TestCase):
y = torch.randn(2, 3, 4, device=device)
z = torch.zeros(2, 3, 4, device=device)
- device = 'lazy'
+ device = "lazy"
x_lazy = x.detach().clone().to(device=device)
y_lazy = y.detach().clone().to(device=device)
z_lazy = z.detach().clone().to(device=device)
for i in range(10):
if i < 5:
- z += (x + y)
+ z += x + y
else:
- z += (x - y)
+ z += x - y
for i in range(10):
if i < 5:
- z_lazy += (x_lazy + y_lazy)
+ z_lazy += x_lazy + y_lazy
else:
- z_lazy += (x_lazy - y_lazy)
+ z_lazy += x_lazy - y_lazy
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
@@ -110,16 +113,24 @@ class TestLazyReuseIr(TestCase):
for i in range(10):
# BatchNorm2d does extra checks on dimensions which SymInts don't support yet
# so we call `torch.ops.aten.native_batch_norm` to bypass the checks.
- z, _, _ = torch.ops.aten.native_batch_norm(x, weight, bias, None, None, True, 0.1, 1e-5)
- z_legit, _, _ = torch.ops.aten._native_batch_norm_legit(x, weight, bias, True, 0.1, 1e-5)
+ z, _, _ = torch.ops.aten.native_batch_norm(
+ x, weight, bias, None, None, True, 0.1, 1e-5
+ )
+ z_legit, _, _ = torch.ops.aten._native_batch_norm_legit(
+ x, weight, bias, True, 0.1, 1e-5
+ )
device = "lazy"
x_lazy = x.detach().clone().to(device=device)
weight_lazy = weight.detach().clone().to(device=device)
bias_lazy = bias.detach().clone().to(device=device)
for i in range(10):
- z_lazy, _, _ = torch.ops.aten.native_batch_norm(x_lazy, weight_lazy, bias_lazy, None, None, True, 0.1, 1e-5)
- z_legit_lazy, _, _ = torch.ops.aten._native_batch_norm_legit(x_lazy, weight_lazy, bias_lazy, True, 0.1, 1e-5)
+ z_lazy, _, _ = torch.ops.aten.native_batch_norm(
+ x_lazy, weight_lazy, bias_lazy, None, None, True, 0.1, 1e-5
+ )
+ z_legit_lazy, _, _ = torch.ops.aten._native_batch_norm_legit(
+ x_lazy, weight_lazy, bias_lazy, True, 0.1, 1e-5
+ )
torch._lazy.mark_step()
torch.testing.assert_close(z.cpu(), z_lazy.cpu())
@@ -129,5 +140,5 @@ class TestLazyReuseIr(TestCase):
torch._lazy.ir_cache.reset()
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/lazy/test_ts_opinfo.py b/test/lazy/test_ts_opinfo.py
index 6088323506..c880c90a81 100644
--- a/test/lazy/test_ts_opinfo.py
+++ b/test/lazy/test_ts_opinfo.py
@@ -1,109 +1,125 @@
# Owner(s): ["oncall: jit"]
-from typing import Sequence
-import torch
import functools
+import itertools
+import os
+import pathlib
+from typing import Sequence
+from unittest import skip
-from torch.testing._internal.common_utils import run_tests, TestCase
-from torch.testing._internal.jit_utils import JitTestCase
-from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_device_type import ops, instantiate_device_type_tests
+import torch
import torch._lazy
import torch._lazy.config
-import torch._lazy.metrics
import torch._lazy.ir_cache
+import torch._lazy.metrics
import torch._lazy.ts_backend
-import itertools
import yaml
-import os
-import pathlib
-from unittest import skip
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ ops,
+)
+from torch.testing._internal.common_methods_invocations import op_db
+
+from torch.testing._internal.common_utils import run_tests, TestCase
+from torch.testing._internal.jit_utils import JitTestCase
torch._lazy.ts_backend.init()
+
def get_test_device():
- return 'cuda' if 'LTC_TS_CUDA' in os.environ else 'cpu'
+ return "cuda" if "LTC_TS_CUDA" in os.environ else "cpu"
+
def remove_suffixes(l):
return [x.split(".")[0] for x in l]
+
def init_lists():
path_to_script = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
- TS_NATIVE_FUNCTIONS_PATH = path_to_script.parent.parent / "aten/src/ATen/native/ts_native_functions.yaml"
+ TS_NATIVE_FUNCTIONS_PATH = (
+ path_to_script.parent.parent / "aten/src/ATen/native/ts_native_functions.yaml"
+ )
with open(TS_NATIVE_FUNCTIONS_PATH) as f:
yaml_ts = yaml.load(f, yaml.SafeLoader)
- LAZY_OPS_LIST = set(remove_suffixes(itertools.chain(yaml_ts["full_codegen"], yaml_ts["supported"], yaml_ts["autograd"])))
+ LAZY_OPS_LIST = set(
+ remove_suffixes(
+ itertools.chain(
+ yaml_ts["full_codegen"], yaml_ts["supported"], yaml_ts["autograd"]
+ )
+ )
+ )
HAS_SYMINT_SUFFIX = yaml_ts["symint"]
FALLBACK_LIST = {"clamp"}
SKIP_RUNTIME_ERROR_LIST = {
- 'index_select', # Empty output_sizes is not supported
- 'clone', # is clone decomposed?
-
+ "index_select", # Empty output_sizes is not supported
+ "clone", # is clone decomposed?
# General ASAN Failure due to related to generating bool values.
# https://github.com/pytorch/pytorch/issues/74519
# https://github.com/pytorch/pytorch/issues/63034
- 'nonzero', # ASAN failure (paste: P501906539)
- 'all', # ASAN failure
- 'any', # ASAN failure
- 'logdet', # ASAN failure
+ "nonzero", # ASAN failure (paste: P501906539)
+ "all", # ASAN failure
+ "any", # ASAN failure
+ "logdet", # ASAN failure
}
SKIP_INCORRECT_RESULTS_LIST = {
- 'squeeze', # Value out of range
- 't', # Value out of range
- 'transpose', # Value out of range
- 'bernoulli', # incorrect results
- 'pow', # incorrect results
- 'addcdiv', # incorrect results (on CI not locally?)
+ "squeeze", # Value out of range
+ "t", # Value out of range
+ "transpose", # Value out of range
+ "bernoulli", # incorrect results
+ "pow", # incorrect results
+ "addcdiv", # incorrect results (on CI not locally?)
}
# The following ops all show up directly in ts_native_functions.yaml,
# but run functionalized versions of the composite kernels in core.
# This means that we don't expect the ops to show directly in the LTC metrics.
FUNCTIONAL_DECOMPOSE_LIST = {
- 'diag_embed',
- 'block_diag',
- 'new_empty_strided',
- 'narrow_copy',
- 'pixel_shuffle',
- 'pixel_unshuffle',
- 'select_backward',
- '_trilinear',
- 'linalg_inv_ex',
- 'linalg_pinv.atol_rtol_tensor',
- 'logsumexp',
+ "diag_embed",
+ "block_diag",
+ "new_empty_strided",
+ "narrow_copy",
+ "pixel_shuffle",
+ "pixel_unshuffle",
+ "select_backward",
+ "_trilinear",
+ "linalg_inv_ex",
+ "linalg_pinv.atol_rtol_tensor",
+ "logsumexp",
}
# For some ops, we don't support all variants. Here we use formatted_name
# to uniquely identify the variant.
- SKIP_VARIANT_LIST = {
- 'norm_nuc',
- 'min_reduction_with_dim'
- }
-
- return (LAZY_OPS_LIST,
- FALLBACK_LIST,
- SKIP_RUNTIME_ERROR_LIST,
- SKIP_INCORRECT_RESULTS_LIST,
- FUNCTIONAL_DECOMPOSE_LIST,
- HAS_SYMINT_SUFFIX,
- SKIP_VARIANT_LIST)
-
-(LAZY_OPS_LIST,
- FALLBACK_LIST,
- SKIP_RUNTIME_ERROR_LIST,
- SKIP_INCORRECT_RESULTS_LIST,
- FUNCTIONAL_DECOMPOSE_LIST,
- HAS_SYMINT_SUFFIX,
- SKIP_VARIANT_LIST) = init_lists()
+ SKIP_VARIANT_LIST = {"norm_nuc", "min_reduction_with_dim"}
+
+ return (
+ LAZY_OPS_LIST,
+ FALLBACK_LIST,
+ SKIP_RUNTIME_ERROR_LIST,
+ SKIP_INCORRECT_RESULTS_LIST,
+ FUNCTIONAL_DECOMPOSE_LIST,
+ HAS_SYMINT_SUFFIX,
+ SKIP_VARIANT_LIST,
+ )
+
+
+(
+ LAZY_OPS_LIST,
+ FALLBACK_LIST,
+ SKIP_RUNTIME_ERROR_LIST,
+ SKIP_INCORRECT_RESULTS_LIST,
+ FUNCTIONAL_DECOMPOSE_LIST,
+ HAS_SYMINT_SUFFIX,
+ SKIP_VARIANT_LIST,
+) = init_lists()
torch.manual_seed(42)
+
def clone_move(t):
- dev = 'lazy'
+ dev = "lazy"
copy_t = t.detach().clone().requires_grad_(True).to(device=dev)
return copy_t
-class TestLazyTensor(JitTestCase):
-
+class TestLazyTensor(JitTestCase):
@skip("Disable until autograd supports symints")
def testConvolutionBackward(self):
test_device = get_test_device()
@@ -118,12 +134,15 @@ class TestLazyTensor(JitTestCase):
# run eager
conv_out = torch.nn.functional.conv2d(inp, weight, bias)
- (inp_grad, weight_grad, bias_grad) = torch.autograd.grad([conv_out], [inp, weight, bias], [grad])
+ (inp_grad, weight_grad, bias_grad) = torch.autograd.grad(
+ [conv_out], [inp, weight, bias], [grad]
+ )
# run lazy
conv_copy_out = torch.nn.functional.conv2d(inp_copy, weight_copy, bias_copy)
(inp_copy_grad, weight_copy_grad, bias_copy_grad) = torch.autograd.grad(
- [conv_copy_out], [inp_copy, weight_copy, bias_copy], [grad_copy])
+ [conv_copy_out], [inp_copy, weight_copy, bias_copy], [grad_copy]
+ )
# check numerics
torch.testing.assert_close(bias_copy_grad.cpu(), bias_grad.cpu())
@@ -148,7 +167,6 @@ class TestLazyTensor(JitTestCase):
y.add_(1)
return x
-
out_ref = foo(inp, mark_step=False)
out = foo(inp_lazy, mark_step=True)
# out will have some pending mutations, which will be synced by the .cpu() call.
@@ -157,7 +175,7 @@ class TestLazyTensor(JitTestCase):
def test_tensor_ctr(self):
test_device = get_test_device()
inp = torch.tensor([[1, 2, 3, 4, 5]], device=test_device)
- inp_lazy = torch.tensor([[1, 2, 3, 4, 5]], device='lazy')
+ inp_lazy = torch.tensor([[1, 2, 3, 4, 5]], device="lazy")
def foo(x):
# Calling a view op to ensure that functionalization wrapping occurs.
@@ -169,19 +187,23 @@ class TestLazyTensor(JitTestCase):
class TestLazyOpInfo(TestCase):
-
- @ops([op for op in op_db
- if op.name in LAZY_OPS_LIST
- and op.name not in SKIP_RUNTIME_ERROR_LIST
- and op.name not in FUNCTIONAL_DECOMPOSE_LIST
- and op.formatted_name not in SKIP_VARIANT_LIST
- ], allowed_dtypes=(torch.float,))
+ @ops(
+ [
+ op
+ for op in op_db
+ if op.name in LAZY_OPS_LIST
+ and op.name not in SKIP_RUNTIME_ERROR_LIST
+ and op.name not in FUNCTIONAL_DECOMPOSE_LIST
+ and op.formatted_name not in SKIP_VARIANT_LIST
+ ],
+ allowed_dtypes=(torch.float,),
+ )
def test_dispatched_to_lazy(self, device, dtype, op):
def get_name(op):
l = [op.name]
- if op.variant_test_name != '':
+ if op.variant_test_name != "":
l.append(op.variant_test_name)
- return '.'.join(l)
+ return ".".join(l)
global HAS_SYMINT_SUFFIX, FALLBACK_LIST
samples = op.sample_inputs("lazy", dtype, requires_grad=False)
@@ -197,20 +219,31 @@ class TestLazyOpInfo(TestCase):
torch._lazy.wait_device_ops()
prefix = "aten" if op.name in FALLBACK_LIST else "lazy"
symint_suffix = "_symint" if op.name in HAS_SYMINT_SUFFIX else ""
- found = f"{prefix}::{op.name}{symint_suffix}" in remove_suffixes(torch._lazy.metrics.counter_names())
+ found = f"{prefix}::{op.name}{symint_suffix}" in remove_suffixes(
+ torch._lazy.metrics.counter_names()
+ )
# check aliases
if not found:
for alias in op.aliases:
- alias_found = f"{prefix}::{alias.name}{symint_suffix}" in remove_suffixes(torch._lazy.metrics.counter_names())
+ alias_found = (
+ f"{prefix}::{alias.name}{symint_suffix}"
+ in remove_suffixes(torch._lazy.metrics.counter_names())
+ )
found = found or alias_found
if found:
break
self.assertTrue(found)
-
- @ops([op for op in op_db if op.name in LAZY_OPS_LIST and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST], allowed_dtypes=(torch.float,)) # noqa: B950
+ @ops(
+ [
+ op
+ for op in op_db
+ if op.name in LAZY_OPS_LIST
+ and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST
+ ],
+ allowed_dtypes=(torch.float,),
+ ) # noqa: B950
def test_correctness(self, device, dtype, op):
-
test_device = get_test_device()
def clone_to_device(input, dev):
@@ -224,7 +257,9 @@ class TestLazyOpInfo(TestCase):
a, b = t
self.assertEqual(type(a), type(b))
if isinstance(a, torch.Tensor):
- self.assertTrue(torch.allclose(clone_to_device(a, test_device), b, atol=1e-4))
+ self.assertTrue(
+ torch.allclose(clone_to_device(a, test_device), b, atol=1e-4)
+ )
if isinstance(a, Sequence):
map(assert_allclose_rec, zip(a, b))
@@ -244,7 +279,15 @@ class TestLazyOpInfo(TestCase):
torch._lazy.mark_step()
assert_allclose_rec((r_actual, r_exp))
- @ops([op for op in op_db if op.name in LAZY_OPS_LIST and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST], allowed_dtypes=(torch.float,)) # noqa: B950
+ @ops(
+ [
+ op
+ for op in op_db
+ if op.name in LAZY_OPS_LIST
+ and op.name not in SKIP_RUNTIME_ERROR_LIST | SKIP_INCORRECT_RESULTS_LIST
+ ],
+ allowed_dtypes=(torch.float,),
+ ) # noqa: B950
def test_correctness_with_reusing_ir(self, device, dtype, op):
torch._lazy.config.set_reuse_ir(True)
test_device = get_test_device()
@@ -260,7 +303,9 @@ class TestLazyOpInfo(TestCase):
a, b = t
self.assertEqual(type(a), type(b))
if isinstance(a, torch.Tensor):
- self.assertTrue(torch.allclose(clone_to_device(a, test_device), b, atol=1e-4))
+ self.assertTrue(
+ torch.allclose(clone_to_device(a, test_device), b, atol=1e-4)
+ )
if isinstance(a, Sequence):
map(assert_allclose_rec, zip(a, b))
@@ -284,7 +329,6 @@ class TestLazyOpInfo(TestCase):
torch._lazy.config.set_reuse_ir(False)
-
# TODO: after we move to master, add Lazy as a new Device here:
# https://github.com/pytorch/pytorch/blob/master/torch/testing/_internal/common_device_type.py#L532
instantiate_device_type_tests(TestLazyOpInfo, globals(), only_for="cpu")
@@ -306,7 +350,9 @@ class TestLazyDynamicOps(TestCase):
def test_nonzero_dynamic(self):
# Test that nonzero gives upper bounds sizes when symbolic shape mode is enabled
test_device = get_test_device()
- x1 = torch.tensor([[0, 1.0, 2.0], [3.0, 0, 0]], device=test_device, requires_grad=True)
+ x1 = torch.tensor(
+ [[0, 1.0, 2.0], [3.0, 0, 0]], device=test_device, requires_grad=True
+ )
x1_lazy = clone_move(x1)
x2_lazy = torch.nonzero(x1_lazy)
@@ -328,5 +374,6 @@ class TestLazyDynamicOps(TestCase):
self.assertEqual(out_cpu.shape, out_lazy.shape)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/linear.py b/test/linear.py
index dbf636303f..b473d447d9 100644
--- a/test/linear.py
+++ b/test/linear.py
@@ -1,4 +1,6 @@
import torch
+
+
class LinearMod(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -6,4 +8,5 @@ class LinearMod(torch.nn.Linear):
def forward(self, input):
return torch._C._nn.linear(input, self.weight, self.bias)
+
print(torch.jit.trace(LinearMod(20, 20), torch.rand([20, 20])).graph)
diff --git a/test/load_torchscript_model.py b/test/load_torchscript_model.py
index 19b3e3d31d..d04fae8076 100644
--- a/test/load_torchscript_model.py
+++ b/test/load_torchscript_model.py
@@ -1,7 +1,8 @@
import sys
+
import torch
-if __name__ == '__main__':
+if __name__ == "__main__":
script_mod = torch.jit.load(sys.argv[1])
mod = torch.load(sys.argv[1] + ".orig")
print(script_mod)
diff --git a/test/mkl_verbose.py b/test/mkl_verbose.py
index 879168f866..5c3530fb34 100644
--- a/test/mkl_verbose.py
+++ b/test/mkl_verbose.py
@@ -1,13 +1,16 @@
import argparse
+
import torch
+
def run_model(level):
m = torch.nn.Linear(20, 30)
input = torch.randn(128, 20)
with torch.backends.mkl.verbose(level):
m(input)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args()
diff --git a/test/mkldnn_verbose.py b/test/mkldnn_verbose.py
index 60fe87bd23..a2feb29ba3 100644
--- a/test/mkldnn_verbose.py
+++ b/test/mkldnn_verbose.py
@@ -1,6 +1,8 @@
import argparse
+
import torch
+
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -10,13 +12,15 @@ class Module(torch.nn.Module):
y = self.conv(x)
return y
+
def run_model(level):
m = Module().eval()
d = torch.rand(1, 1, 112, 112)
with torch.backends.mkldnn.verbose(level):
m(d)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args() | 2.41.0 |
b895ace1d36726e64781774f53b3d3098206116 | Thu, 11 Apr 2024 23:44:51 +0000 | [PATCH 0063/1000] Only run backward part of COW test if results are strided (#123870) | Fixes #123792 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123870 Approved by: https://github.com/ezyang | diff --git a/test/test_ops.py b/test/test_ops.py
index fc2e92e3c1..34462961df 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -1668,42 +1668,46 @@ class TestCompositeCompliance(TestCase):
leaf_results = pytree.tree_leaves(results_raw)
results = [r for r in leaf_results if isinstance(r, torch.Tensor) and r.requires_grad]
- output_grads_raw = [
- torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in results]
- output_grads_copy = []
- output_grads = []
-
- # Convert output grads to COW tensors and make copies
- for output_grad in output_grads_raw:
- output_grads_copy.append(output_grad.clone().detach())
- output_grads.append(torch._lazy_clone(output_grad))
-
- input_grads = torch.autograd.grad(
- results,
- leaf_tensors,
- output_grads,
- allow_unused=True,
- retain_graph=True)
-
- # Check that COW inputs remain COW after the backward op is executed
- for idx, arg in enumerate(args):
- check_cow_input(
- arg,
- args_copy[idx],
- idx,
- backward_or_forward='backward',
- supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
- allow_list=op.allow_cow_input_materialize_backward)
-
- # Check that COW inputs remain COW after the backward op is executed
- for idx, output_grad in enumerate(output_grads):
- check_cow_input(
- output_grad,
- output_grads_copy[idx],
- f'output grad {idx}',
- backward_or_forward='backward',
- supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
- allow_list=op.allow_cow_input_materialize_backward)
+ all_results_strided = all(is_strided_tensor(result) for result in results)
+
+ # Only test backward if the results are strided tensors
+ if all_results_strided:
+ output_grads_raw = [
+ torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in results]
+ output_grads_copy = []
+ output_grads = []
+
+ # Convert output grads to COW tensors and make copies
+ for output_grad in output_grads_raw:
+ output_grads_copy.append(output_grad.clone().detach())
+ output_grads.append(torch._lazy_clone(output_grad))
+
+ input_grads = torch.autograd.grad(
+ results,
+ leaf_tensors,
+ output_grads,
+ allow_unused=True,
+ retain_graph=True)
+
+ # Check that COW inputs remain COW after the backward op is executed
+ for idx, arg in enumerate(args):
+ check_cow_input(
+ arg,
+ args_copy[idx],
+ idx,
+ backward_or_forward='backward',
+ supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
+ allow_list=op.allow_cow_input_materialize_backward)
+
+ # Check that COW inputs remain COW after the backward op is executed
+ for idx, output_grad in enumerate(output_grads):
+ check_cow_input(
+ output_grad,
+ output_grads_copy[idx],
+ f'output grad {idx}',
+ backward_or_forward='backward',
+ supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
+ allow_list=op.allow_cow_input_materialize_backward)
@ops(op_db, allowed_dtypes=(torch.float,)) | 2.41.0 |
669334175bb2155316e7a74685b6278e127ecb4 | Fri, 12 Apr 2024 06:29:27 +0000 | [PATCH 0064/1000] Revert "Add Matmul recipe into x86_inductor_quantizer (#122776)" | This reverts commit e8e9261b906f69b397e4027362be801f98a68d62. Reverted https://github.com/pytorch/pytorch/pull/122776 on behalf of https://github.com/DanilBaibak due to Break internal build ([comment](https://github.com/pytorch/pytorch/pull/122776#issuecomment-2051073373)) | diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py
index 4af5a30ddf..c9df319bfd 100644
--- a/test/quantization/pt2e/test_x86inductor_quantizer.py
+++ b/test/quantization/pt2e/test_x86inductor_quantizer.py
@@ -289,42 +289,21 @@ class TestHelperModules:
return tmp + self.bn2(self.conv2(tmp))
class SelfAttnLikeModule(torch.nn.Module):
- def __init__(
- self,
- input_dim,
- transpose_for_score=False,
- num_attention_heads=None,
- attention_head_size=None,
- ) -> None:
+ def __init__(self, input_dim) -> None:
super().__init__()
self.input_dim = input_dim
self.q_proj = nn.Linear(input_dim, input_dim, bias=False)
self.k_proj = nn.Linear(input_dim, input_dim, bias=False)
self.v_proj = nn.Linear(input_dim, input_dim, bias=False)
self.softmax = nn.Softmax(dim=-1)
- self.transpose_for_score = transpose_for_score
- if self.transpose_for_score:
- assert num_attention_heads is not None
- assert attention_head_size is not None
- self.num_attention_heads = num_attention_heads
- self.attention_head_size = attention_head_size
-
- def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(new_x_shape)
- return x.permute(0, 2, 1, 3)
def forward(self, x):
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
- if self.transpose_for_score:
- q = self.transpose_for_scores(q)
- k = self.transpose_for_scores(k)
- v = self.transpose_for_scores(v)
- scores = torch.matmul(q, k.transpose(-1, -2)) / (self.input_dim ** 0.5)
+ scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim ** 0.5)
attention = self.softmax(scores)
- weighted = torch.matmul(attention, v)
+ weighted = torch.bmm(attention, v)
return weighted
class X86InductorQuantTestCase(QuantizationTestCase):
@@ -1469,68 +1448,3 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase):
node_occurrence,
node_list,
)
-
- @skipIfNoX86
- def test_attention_block(self):
- """
- Test pattern of Attention like Block with X86InductorQuantizer.
- """
- for annotate_matmul in [False, True]:
- with override_quantized_engine("x86"), torch.no_grad():
- m = TestHelperModules.SelfAttnLikeModule(
- input_dim=64 * 16,
- transpose_for_score=True,
- num_attention_heads=16,
- attention_head_size=64,
- ).eval()
- example_inputs = (torch.randn(2, 384, 1024),)
-
- m(*example_inputs)
-
- quantizer = X86InductorQuantizer().set_global(
- xiq.get_default_x86_inductor_quantization_config()
- )
-
- if annotate_matmul:
- quantizer.set_function_type_qconfig(torch.matmul, quantizer.get_global_quantization_config())
-
- node_occurrence = {
- torch.ops.quantized_decomposed.quantize_per_tensor.default: 5 if annotate_matmul else 1,
- torch.ops.quantized_decomposed.dequantize_per_tensor.default: 7 if annotate_matmul else 3,
- # quantize_per_channel for weights are const propagated
- torch.ops.quantized_decomposed.quantize_per_channel.default: 0,
- torch.ops.quantized_decomposed.dequantize_per_channel.default: 3,
- }
- if annotate_matmul:
- node_list = [
- torch.ops.quantized_decomposed.quantize_per_tensor.default,
- torch.ops.quantized_decomposed.dequantize_per_tensor.default,
- torch.ops.quantized_decomposed.dequantize_per_channel.default,
- torch.ops.aten.linear.default,
- torch.ops.aten.view.default,
- torch.ops.aten.permute.default,
- torch.ops.quantized_decomposed.quantize_per_tensor.default,
- torch.ops.quantized_decomposed.dequantize_per_tensor.default,
- torch.ops.aten.matmul.default,
- torch.ops.aten.div.Tensor,
- torch.ops.aten.softmax.int,
- ]
- else:
- node_list = [
- torch.ops.quantized_decomposed.quantize_per_tensor.default,
- torch.ops.quantized_decomposed.dequantize_per_tensor.default,
- torch.ops.quantized_decomposed.dequantize_per_channel.default,
- torch.ops.aten.linear.default,
- torch.ops.aten.view.default,
- torch.ops.aten.permute.default,
- torch.ops.aten.matmul.default,
- torch.ops.aten.div.Tensor,
- torch.ops.aten.softmax.int,
- ]
- self._test_quantizer(
- m,
- example_inputs,
- quantizer,
- node_occurrence,
- node_list,
- )
diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
index 226d722357..8889cf2df0 100644
--- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
+++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py
@@ -82,9 +82,7 @@ default_quantizable_ops = propagation_quantizable_ops | {
# A superset of default_quantizable_ops includes operators support the int8 data type
# but not enabled by default recipe of X86InductorQuantizer.
-quantizable_ops = default_quantizable_ops | {
- torch.ops.aten.matmul.default,
-}
+quantizable_ops = default_quantizable_ops
QUANT_ANNOTATION_KEY = "quantization_annotation"
@@ -112,12 +110,6 @@ def _map_module_function_to_aten_operator_type():
],
torch.ops.aten.flatten.using_ints,
),
- (
- [
- torch.matmul,
- ],
- torch.ops.aten.matmul.default,
- ),
)
for map_item in map_list:
module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload]
@@ -318,14 +310,6 @@ class X86InductorQuantizer(Quantizer):
self.global_config = quantization_config
return self
- def get_global_quantization_config(self):
- if not isinstance(self.global_config, QuantizationConfig):
- warnings.warn(
- "The global_config for X86InductorQuantizer is currently invalid. \
- Please ensure that you use set_global to establish the global quantization configuration."
- )
- return self.global_config
-
def set_function_type_qconfig(
self,
function_type: Callable,
@@ -515,7 +499,6 @@ class X86InductorQuantizer(Quantizer):
# Step1: Recipe of fusion patterns like conv/linear.
self._annotate_conv2d_fusion_pattern(model)
self._annotate_linear_fusion_pattern(model)
- self._annotate_matmul(model)
# Step2: Recipe to propagate annotation for patterns beside conv/linear.
# Go through all the nodes from start to end.
@@ -769,24 +752,6 @@ class X86InductorQuantizer(Quantizer):
self._annotate_linear_unary(model, config)
self._annotate_linear(model, config)
- def _annotate_matmul(self, model: torch.fx.GraphModule):
- if config := self._get_aten_operator_qconfig(torch.ops.aten.matmul.default):
- for node in model.graph.nodes:
- if node.target == torch.ops.aten.matmul.default and not _is_annotated(
- [node]
- ):
- input_qspec_map = {}
- matmul_node = node
- for input_node in matmul_node.args:
- input_qspec_map[input_node] = get_input_act_qspec(config)
- matmul_node.meta[
- QUANT_ANNOTATION_KEY
- ] = _X86InductorQuantizationAnnotation(
- input_qspec_map=input_qspec_map,
- _annotated=True,
- _is_output_of_quantized_pattern=True,
- )
-
def _annotate_conv2d_binary_unary(
self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig
) -> None: | 2.41.0 |
881d567f402f6bf16c68803a1bf4bf5c5e1673f | Fri, 12 Apr 2024 07:23:57 +0000 | [PATCH 0065/1000] Revert "[inductor] Write generated files from parent process (#123409)" | This reverts commit 79c565b24e6c305c09c8c908e27f4023f41dd567. Reverted https://github.com/pytorch/pytorch/pull/123409 on behalf of https://github.com/DanilBaibak due to Needs to be reverted because it blocks reverting of the broken PR. ([comment](https://github.com/pytorch/pytorch/pull/123409#issuecomment-2051166617)) | diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 4e84838504..98cf75fc23 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,7 +59,12 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux
+from torch._inductor.utils import (
+ cache_dir,
+ clear_on_fresh_inductor_cache,
+ developer_warning,
+ is_linux,
+)
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -2016,7 +2021,7 @@ def custom_op_wrapper(op: str, *args):
@clear_on_fresh_inductor_cache
class CppCodeCache:
- cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
+ cache: Dict[str, Union[CDLL, ModuleType]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@@ -2027,17 +2032,13 @@ class CppCodeCache:
@classmethod
def _load_library(cls, path: str, key: str) -> Union[CDLL, ModuleType]:
try:
- result = cls._load_library_inner(path, key)
- result.key = key # type: ignore[union-attr]
- return result
+ return cls._load_library_inner(path, key)
except (ImportError, OSError) as e:
if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
# hacky workaround for fbcode/buck
global _libgomp
_libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
- result = cls._load_library_inner(path, key)
- result.key = key # type: ignore[union-attr]
- return result
+ return cls._load_library_inner(path, key)
if "failed to map segment from shared object" in str(e):
raise OSError(
f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
@@ -2048,68 +2049,42 @@ class CppCodeCache:
raise
@classmethod
- def load_async(cls, source_code: str, cuda=False, submit_fn=None):
- compile_command = {
- **cls.cpp_compile_command_flags,
- "cuda": cuda,
- "vec_isa": pick_vec_isa(),
- }
- cpp_command = repr(cpp_compile_command("i", "o", **compile_command))
+ def load(cls, source_code: str, cuda: bool = False) -> Union[CDLL, ModuleType]:
+ cls.cpp_compile_command_flags.update({"cuda": cuda})
+ picked_vec_isa = pick_vec_isa()
+ cpp_command = repr(
+ cpp_compile_command(
+ "i", "o", vec_isa=picked_vec_isa, **cls.cpp_compile_command_flags
+ )
+ )
key, input_path = write(source_code, "cpp", extra=cpp_command)
-
if key not in cls.cache:
from filelock import FileLock
- lock_path = os.path.join(get_lock_dir(), key + ".lock")
- output_path = input_path[:-3] + "so"
- future: Optional[Future[Any]] = None
- lib = None
- worker_fn = functools.partial(
- _worker_compile_cpp,
- lock_path,
- input_path,
- output_path,
- cpp_compile_command(
- input=input_path, output=output_path, **compile_command
- ),
- )
-
- def load_fn():
- nonlocal lib
- if lib is None:
- if future is not None:
- future.result()
- worker_fn()
- lib = cls._load_library(output_path, key)
- assert lib is not None
- return lib
-
- if submit_fn is not None:
- with FileLock(lock_path, timeout=LOCK_TIMEOUT):
- if not os.path.exists(output_path):
- future = submit_fn(worker_fn)
-
- cls.cache[key] = load_fn
+ lock_dir = get_lock_dir()
+ lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
+ with lock:
+ output_path = input_path[:-3] + "so"
+ if not os.path.exists(output_path):
+ cmd = shlex.split(
+ cpp_compile_command(
+ input=input_path,
+ output=output_path,
+ vec_isa=picked_vec_isa,
+ **cls.cpp_compile_command_flags,
+ )
+ )
+ compile_file(input_path, output_path, cmd)
+ cls.cache[key] = cls._load_library(output_path, key)
+ cls.cache[key].key = key # type: ignore[union-attr]
return cls.cache[key]
- @classmethod
- def load(cls, source_code: str, cuda: bool = False):
- return cls.load_async(source_code, cuda)()
-
-
-def _worker_compile_cpp(lock_path, input_path, output_path, cmd):
- from filelock import FileLock
-
- with FileLock(lock_path, timeout=LOCK_TIMEOUT):
- if not os.path.exists(output_path):
- compile_file(input_path, output_path, shlex.split(cmd))
-
# Customized Python binding for cpp kernels
@clear_on_fresh_inductor_cache
class CppPythonBindingsCodeCache(CppCodeCache):
- cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
+ cache: Dict[str, Union[CDLL, ModuleType]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
@@ -2201,13 +2176,12 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return module
@classmethod
- def load_pybinding_async(
+ def load_pybinding(
cls,
argtypes: List[str],
source_code: str,
cuda: bool = False,
num_outputs: int = -1,
- submit_fn=None,
) -> Any:
"""
Wrap a C++ function in fast Python bindings.
@@ -2235,26 +2209,14 @@ class CppPythonBindingsCodeCache(CppCodeCache):
cls.entry_function,
cls.entry_function,
)
- get_result = cls.load_async(source_code + suffix, cuda, submit_fn=submit_fn)
- result = None
-
- def future():
- nonlocal result
- if result is None:
- result = get_result()
- assert isinstance(result, ModuleType)
- return getattr(result, cls.entry_function)
-
- return future
-
- @classmethod
- def load_pybinding(cls, *args, **kwargs) -> Any:
- return cls.load_pybinding_async(*args, **kwargs)()
+ result = cls.load(source_code + suffix, cuda)
+ assert isinstance(result, ModuleType)
+ return getattr(result, cls.entry_function)
@clear_on_fresh_inductor_cache
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
- cache: Dict[str, Callable[[], Union[CDLL, ModuleType]]] = {}
+ cache: Dict[str, Union[CDLL, ModuleType]] = {}
cache_clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
@@ -2315,10 +2277,6 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
-def _reload_python_module_in_subproc(key, path):
- return PyCodeCache.load_by_key_path(key, path)
-
-
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2372,11 +2330,6 @@ class PyCodeCache:
for k, v in attrs.items():
setattr(mod, k, v)
- if not (linemap or attrs):
- mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
- _reload_python_module_in_subproc, key, path
- )
-
return cls.cache[key]
@classmethod
@@ -2408,25 +2361,11 @@ class PyCodeCache:
return parse_stack_trace(entry)
-def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
- return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
-
-
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
mod = PyCodeCache.load(source_code)
- return cls._mod_to_kernel(mod, kernel_name)
-
- @classmethod
- def _mod_to_kernel(cls, mod, kernel_name):
- kernel = getattr(mod, kernel_name)
- kernel._reload_in_subproc = functools.partial(
- _reload_triton_kernel_in_subproc,
- mod._reload_in_subproc,
- kernel_name,
- )
- return kernel
+ return getattr(mod, kernel_name)
def _cuda_compiler() -> Optional[str]:
@@ -2713,7 +2652,6 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
[email protected]_cache(None)
def _set_triton_ptxas_path() -> None:
if os.environ.get("TRITON_PTXAS_PATH") is not None:
return
@@ -2728,50 +2666,54 @@ def _set_triton_ptxas_path() -> None:
warnings.warn(f"{ptxas_path} exists but is not an executable")
-def _worker_compile_triton(
- load_kernel: Callable[[], Any],
+def _worker_compile(
+ kernel_name: str,
+ source_code: str,
cc: int,
device: torch.device,
device_interface: Type[DeviceInterface],
-):
- _set_triton_ptxas_path()
+) -> None:
device_interface.Worker.set_device(device.index)
- kernel = load_kernel()
+ kernel = TritonCodeCache.load(kernel_name, source_code)
kernel.precompile(warm_cache_only_with_cc=cc)
-class CodeCacheFuture:
- def result(self):
- raise NotImplementedError()
+def _load_kernel(kernel_name: str, source_code: str) -> ModuleType:
+ _set_triton_ptxas_path()
+ kernel = TritonCodeCache.load(kernel_name, source_code)
+ kernel.precompile()
+ return kernel
-class TritonFuture(CodeCacheFuture):
+class TritonFuture:
kernel: ModuleType
def __init__(
self,
- kernel: Any,
- future: Optional[Future[Any]],
+ kernel_name: str,
+ source_code: str,
+ future: Future[Any],
) -> None:
- self.kernel = kernel
+ self.kernel_name = kernel_name
+ self.source_code = source_code
self.future = future
# @dynamo_utils.dynamo_timed
def result(self) -> ModuleType:
- if self.future is not None:
- # If the worker failed this will throw an exception.
- self.future.result()
- self.future = None
- self.kernel.precompile()
- return self.kernel
-
-
-class LambdaFuture(CodeCacheFuture):
- def __init__(self, result_fn):
- self.result_fn = result_fn
-
- def result(self):
- return self.result_fn()
+ t0 = time()
+ if hasattr(self, "kernel"):
+ return self.kernel
+ # If the worker failed this will throw an exception.
+ self.future.result()
+ kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
+ latency = time() - t0
+ if latency > 50:
+ developer_warning(
+ f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
+ )
+ developer_warning(self.source_code)
+ del self.kernel_name, self.source_code, self.future
+ return kernel
# If this process dies abnormally (e.g. segfault)
@@ -2805,21 +2747,10 @@ _pool_set: Set[ProcessPoolExecutor] = set()
def shutdown_compile_workers() -> None:
"""Shut down all outstanding compile-worker pools."""
+ global _pool_set
for pool in _pool_set:
pool.shutdown()
- after_fork()
-
-
-def after_fork():
- """Reset pools to initial state without shutting them down"""
_pool_set.clear()
- AsyncCompile.process_pool.cache_clear()
-
-
-try:
- os.register_at_fork(after_in_child=after_fork)
-except AttributeError:
- pass # register_at_fork does not exists on windows
class AsyncCompile:
@@ -2894,26 +2825,21 @@ class AsyncCompile:
return task()
return cls.pool().submit(task)
- def triton(self, kernel_name: str, source_code: str, device_str: str = "cuda"):
+ def triton(
+ self, kernel_name: str, source_code: str, device_str: str = "cuda"
+ ) -> Union[TritonFuture, ModuleType]:
_compile_start()
- _set_triton_ptxas_path()
- kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
device_interface = get_interface_for_device(device_str)
device = torch.device(device_str, device_interface.current_device())
cc = device_interface.get_compute_capability(device)
future = self.process_pool().submit(
- _worker_compile_triton,
- kernel._reload_in_subproc,
- cc,
- device,
- device_interface,
+ _worker_compile, kernel_name, source_code, cc, device, device_interface
)
- return TritonFuture(kernel, future)
+ return TritonFuture(kernel_name, source_code, future)
else:
- kernel.precompile()
- return kernel
+ return _load_kernel(kernel_name, source_code)
def multi_kernel(self, *args, **kwargs) -> Any:
from torch._inductor.codegen.multi_kernel import MultiKernelCall
@@ -2921,21 +2847,18 @@ class AsyncCompile:
# no need to call this in parallel since the sub-kernels are already parallel tasks
return MultiKernelCall(*args, **kwargs)
- def cpp(self, source_code: str):
- if config.compile_threads <= 1:
+ def cpp(self, source_code: str) -> ModuleType:
+ def task():
return CppCodeCache.load(source_code).kernel
- else:
- get_result = CppCodeCache.load_async(source_code, submit_fn=self.submit)
- return LambdaFuture(lambda: get_result().kernel)
- def cpp_pybinding(self, argtypes: List[str], source_code: str):
- if config.compile_threads <= 1:
- return CppPythonBindingsCodeCache.load_pybinding(argtypes, source_code)
- else:
- get_result = CppPythonBindingsCodeCache.load_pybinding_async(
- argtypes, source_code, submit_fn=self.submit
+ return self.submit(task)
+
+ def cpp_pybinding(self, argtypes: List[str], source_code: str) -> ModuleType:
+ return self.submit(
+ functools.partial(
+ CppPythonBindingsCodeCache.load_pybinding, argtypes, source_code
)
- return LambdaFuture(get_result)
+ )
def cuda(self, source_code, dst_file_ext):
def task():
@@ -2948,7 +2871,7 @@ class AsyncCompile:
[
value
for key, value in scope.items()
- if isinstance(value, (Future, CodeCacheFuture))
+ if isinstance(value, (Future, TritonFuture))
]
)
pbar = tqdm(
@@ -2961,18 +2884,18 @@ class AsyncCompile:
for key, result in scope.items():
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
pbar.set_postfix_str(key)
- if isinstance(result, (Future, CodeCacheFuture)):
+ if isinstance(result, (Future, TritonFuture)):
scope[key] = result.result()
pbar.update(1)
_compile_end()
-if (
- os.environ.get("TORCH_TNT_IN_USE", "0") == "1"
- or os.environ.get("TORCH_WARM_POOL", "1") != "1"
-):
- pass
+if os.environ.get("TORCH_TNT_IN_USE", "0") == "1":
+ # When TorchTNT is used, calling warm_pool() here will cause the
+ # compile workers created not being able to be shut down inside
+ # shutdown_compile_workers(). This may cause significant QPS drop.
+ log.info("Do not call AsyncCompile.warm_pool() because TorchTNT is in use.")
elif sys.version_info >= (3, 12):
log.info("AsyncCompile.warm_pool() is broken on 3.12+.")
else: | 2.41.0 |
994d993c05fdd93510dbaba4dcbfad4e4f20a1b | Fri, 12 Apr 2024 07:26:50 +0000 | [PATCH 0066/1000] Revert "[inductor] Fix fresh_inductor_cache() (#122661)" | This reverts commit cda383e7bcdac029a6d5508d63c0355a40bb0d32. Reverted https://github.com/pytorch/pytorch/pull/122661 on behalf of https://github.com/DanilBaibak due to Break internal build ([comment](https://github.com/pytorch/pytorch/pull/122661#issuecomment-2051171028)) | diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py
index 04ab69debc..89380bee53 100644
--- a/test/inductor/test_codecache.py
+++ b/test/inductor/test_codecache.py
@@ -14,12 +14,10 @@ from torch._inductor.codecache import (
CUDACodeCache,
FxGraphCachePickler,
FxGraphHashDetails,
- PyCodeCache,
TensorMetadata,
TensorMetadataAndValues,
)
from torch._inductor.test_case import run_tests, TestCase
-from torch._inductor.utils import cache_dir, fresh_inductor_cache
from torch.testing._internal.common_cuda import SM80OrLater
from torch.testing._internal.common_device_type import largeTensorTest
from torch.testing._internal.common_utils import (
@@ -555,28 +553,5 @@ class TestFxGraphCacheHashing(TestCase):
assert "-DNDEBUG" in cmd_parts, cmd_parts
-class TestUtils(TestCase):
- def test_fresh_inductor_cache(self):
- def fn(x, y):
- return x + y
-
- a = torch.rand(10)
- b = torch.rand(10)
-
- with fresh_inductor_cache():
- self.assertEqual(len(PyCodeCache.cache.keys()), 0)
- res1 = torch.compile(fn)(a, b)
- cache_dir1 = cache_dir()
-
- torch._dynamo.reset()
- with fresh_inductor_cache():
- self.assertEqual(len(PyCodeCache.cache.keys()), 0)
- res2 = torch.compile(fn)(a, b)
- cache_dir2 = cache_dir()
-
- self.assertEqual(res1, res2)
- self.assertNotEqual(cache_dir1, cache_dir2)
-
-
if __name__ == "__main__":
run_tests()
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index beee4f38fd..7486af13d1 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -280,17 +280,17 @@ class TestMaxAutotune(TestCase):
os.environ.pop("TRITON_CACHE_MANAGER", None)
with config.patch({"max_autotune": True}):
for _ in range(4):
- with fresh_inductor_cache():
- torch.compile(mm, dynamic=dynamic)(a, b)
+ torch.compile(mm, dynamic=dynamic)(a, b)
reset()
+ torch._inductor.codecache.PyCodeCache.clear()
self.assertEqual(num_get, 3)
self.assertEqual(num_put, 1)
num_get = 0
num_put = 0
for _ in range(4):
- with fresh_inductor_cache():
- torch.compile(f, dynamic=dynamic)(x, y)
+ torch.compile(f, dynamic=dynamic)(x, y)
reset()
+ torch._inductor.codecache.PyCodeCache.clear()
self.assertEqual(num_get, 3)
self.assertEqual(num_put, 1)
diff --git a/test/inductor/test_multi_kernel.py b/test/inductor/test_multi_kernel.py
index 808802ebfd..316ef6bae9 100644
--- a/test/inductor/test_multi_kernel.py
+++ b/test/inductor/test_multi_kernel.py
@@ -60,7 +60,7 @@ def make_cpp_wrapper_test(orig_test, **extra_args):
# the kernel with cpp_wrapper enabled.
from torch._inductor import codecache
- codecache.PyCodeCache.cache_clear()
+ codecache.PyCodeCache.clear()
return orig_test(self, **extra_args)
return fn
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 98cf75fc23..cf8f8f6e53 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -59,12 +59,7 @@ from torch._dynamo.device_interface import (
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.utils import (
- cache_dir,
- clear_on_fresh_inductor_cache,
- developer_warning,
- is_linux,
-)
+from torch._inductor.utils import cache_dir, developer_warning, is_linux
from torch._subclasses.fake_tensor import (
extract_tensor_metadata,
FakeTensor,
@@ -188,7 +183,6 @@ class CacheBase:
return system
@staticmethod
- @clear_on_fresh_inductor_cache
@functools.lru_cache(None)
def get_local_cache_path() -> Path:
return Path(os.path.join(cache_dir(), "cache", CacheBase.get_system()["hash"]))
@@ -208,21 +202,22 @@ class CacheBase:
self.system = CacheBase.get_system()
+ self.local_cache_path = CacheBase.get_local_cache_path()
+ self.global_cache_path = CacheBase.get_global_cache_path()
+
def get_local_cache(self) -> Dict[str, Any]:
- local_cache_path = self.get_local_cache_path()
- if not local_cache_path.is_file():
+ if not self.local_cache_path.is_file():
return {}
- with open(local_cache_path) as local_cache_fp:
+ with open(self.local_cache_path) as local_cache_fp:
local_cache = json.load(local_cache_fp)
return local_cache["cache"]
def update_local_cache(self, local_cache: Dict[str, Any]) -> None:
- local_cache_path = self.get_local_cache_path()
- if not os.path.exists(local_cache_path.parent):
- os.makedirs(local_cache_path.parent, exist_ok=True)
+ if not os.path.exists(self.local_cache_path.parent):
+ os.makedirs(self.local_cache_path.parent, exist_ok=True)
write_atomic(
- str(local_cache_path),
+ str(self.local_cache_path),
json.dumps({"system": self.system, "cache": local_cache}, indent=4),
)
@@ -255,10 +250,9 @@ class LocalCache(CacheBase):
class PersistentCache(CacheBase):
@functools.lru_cache(None)
def get_global_cache(self):
- global_cache_path = self.get_global_cache_path()
- if global_cache_path is None or not global_cache_path.is_file():
+ if self.global_cache_path is None or not self.global_cache_path.is_file():
return {}
- with open(global_cache_path) as global_cache_fp:
+ with open(self.global_cache_path) as global_cache_fp:
global_cache = json.load(global_cache_fp)
return global_cache["cache"]
@@ -1619,10 +1613,9 @@ def split_aot_inductor_output_path(path: str) -> Tuple[str, str]:
return path, ""
-@clear_on_fresh_inductor_cache
class CudaKernelParamCache:
cache: Dict[str, Dict[str, str]] = dict()
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
@classmethod
def set(cls, key: str, params: Dict[str, str], cubin: str) -> None:
@@ -1906,7 +1899,6 @@ class AotCodeCompiler:
# - valid_vec_isa_list()
# - VecISA.__bool__() <-- takes out a lock
# - compile_file() <-- imports cpp_prefix_path from cpp, which causes us to try to take out the same lock.
-@clear_on_fresh_inductor_cache
@functools.lru_cache
def cpp_prefix_path() -> str:
path = Path(__file__).parent / "codegen/cpp_prefix.h"
@@ -2019,10 +2011,9 @@ def custom_op_wrapper(op: str, *args):
return torch._C._aoti.unsafe_alloc_void_ptr_from_tensor(result)
-@clear_on_fresh_inductor_cache
class CppCodeCache:
cache: Dict[str, Union[CDLL, ModuleType]] = {}
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
cpp_compile_command_flags: Dict[str, Any] = {}
@staticmethod
@@ -2082,10 +2073,9 @@ class CppCodeCache:
# Customized Python binding for cpp kernels
-@clear_on_fresh_inductor_cache
class CppPythonBindingsCodeCache(CppCodeCache):
cache: Dict[str, Union[CDLL, ModuleType]] = {}
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
# kernels have no dependency on libtorch
"include_pytorch": False,
@@ -2214,10 +2204,9 @@ class CppPythonBindingsCodeCache(CppCodeCache):
return getattr(result, cls.entry_function)
-@clear_on_fresh_inductor_cache
class CppWrapperCodeCache(CppPythonBindingsCodeCache):
cache: Dict[str, Union[CDLL, ModuleType]] = {}
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
cpp_compile_command_flags = {
"include_pytorch": not config.abi_compatible,
"shared": True,
@@ -2277,11 +2266,10 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
-@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
linemaps: Dict[str, List[Tuple[Any, ...]]] = dict()
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
@classmethod
def write(cls, source_code: str, extra: str = "") -> Tuple[str, str]:
@@ -2561,7 +2549,6 @@ class DLLWrapper:
self.close()
-@clear_on_fresh_inductor_cache
class CUDACodeCache:
@dataclasses.dataclass
class CacheEntry:
@@ -2569,7 +2556,7 @@ class CUDACodeCache:
output_path: str
cache: Dict[str, CacheEntry] = dict()
- cache_clear = staticmethod(cache.clear)
+ clear = staticmethod(cache.clear)
_SOURCE_CODE_SUFFIX = "cu"
@classmethod
diff --git a/torch/_inductor/test_case.py b/torch/_inductor/test_case.py
index 0412d4eea5..546524d900 100644
--- a/torch/_inductor/test_case.py
+++ b/torch/_inductor/test_case.py
@@ -1,5 +1,6 @@
import contextlib
-import os
+import tempfile
+import unittest
from torch._dynamo.test_case import (
run_tests as dynamo_run_tests,
@@ -7,7 +8,6 @@ from torch._dynamo.test_case import (
)
from torch._inductor import config
-from torch._inductor.utils import fresh_inductor_cache
def run_tests(needs=()):
@@ -20,13 +20,34 @@ class TestCase(DynamoTestCase):
the cache directory for each test.
"""
+ _stack: contextlib.ExitStack
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls._stack = contextlib.ExitStack()
+ cls._stack.enter_context(config.patch({"fx_graph_cache": True}))
+
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+ cls._stack.close()
+
def setUp(self):
super().setUp()
- self._inductor_test_stack = contextlib.ExitStack()
- self._inductor_test_stack.enter_context(config.patch({"fx_graph_cache": True}))
- if os.environ.get("INDUCTOR_TEST_DISABLE_FRESH_CACHE") != "1":
- self._inductor_test_stack.enter_context(fresh_inductor_cache())
+
+ # For all tests, mock the tmp directory populated by the inductor
+ # FxGraphCache, both for test isolation and to avoid filling disk.
+ self._inductor_cache_tmp_dir = tempfile.TemporaryDirectory()
+ self._inductor_cache_get_tmp_dir_patch = unittest.mock.patch(
+ "torch._inductor.codecache.FxGraphCache._get_tmp_dir"
+ )
+ mock_get_dir = self._inductor_cache_get_tmp_dir_patch.start()
+ mock_get_dir.return_value = self._inductor_cache_tmp_dir.name
def tearDown(self):
super().tearDown()
- self._inductor_test_stack.close()
+
+ # Clean up the FxGraphCache tmp dir.
+ self._inductor_cache_get_tmp_dir_patch.stop()
+ self._inductor_cache_tmp_dir.cleanup()
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 132cb1e211..5f85704c99 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -734,22 +734,6 @@ else:
)
-_registered_caches: List[Any] = []
-
-
-def clear_on_fresh_inductor_cache(obj: Any):
- """
- Use this decorator to register any caches that should be cache_clear'd
- with fresh_inductor_cache().
- """
- if not hasattr(obj, "cache_clear") or not callable(obj.cache_clear):
- raise AttributeError(f"{obj} does not have a cache_clear method")
-
- _registered_caches.append(obj)
- return obj
-
-
-@clear_on_fresh_inductor_cache
@functools.lru_cache(None)
def cache_dir() -> str:
cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR")
@@ -771,9 +755,6 @@ def fresh_inductor_cache(cache_entries=None):
Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes
generated with this cache instance.
"""
- for obj in _registered_caches:
- obj.cache_clear()
-
with tempfile.TemporaryDirectory() as inductor_cache_dir:
with mock.patch.dict(
os.environ, {"TORCHINDUCTOR_CACHE_DIR": inductor_cache_dir} | 2.41.0 |
ff53e169f77518f5037f0d6020c610837f2fa94 | Fri, 12 Apr 2024 07:50:28 +0000 | [PATCH 0067/1000] add option to turn on return_tuple in _SplitterBase (#123868) | Summary: as title. split the oss change from D55871896 into this separate diff Test Plan: deploy Differential Revision: D56032268 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123868 Approved by: https://github.com/ZhengkaiZ, https://github.com/DanilBaibak | diff --git a/torch/fx/passes/splitter_base.py b/torch/fx/passes/splitter_base.py
index 3a493f4af3..d0c146eb86 100644
--- a/torch/fx/passes/splitter_base.py
+++ b/torch/fx/passes/splitter_base.py
@@ -306,6 +306,7 @@ class _SplitterBase:
operator_support: OperatorSupportBase,
settings: _SplitterSettingBase,
non_acc_submodule_name: str = "_run_on_cpu_",
+ return_tuple: bool = False,
):
"""
Preprocesses graph before splitting:
@@ -336,6 +337,7 @@ class _SplitterBase:
self.non_acc_submodule_name = non_acc_submodule_name
self._node_submodule_map: Dict[str, str] = {}
+ self._return_tuple = return_tuple
# ===============================================================
# Helpers for ctor and initial state
@@ -846,7 +848,7 @@ class _SplitterBase:
self._node_submodule_map[node.name] = tag
def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:
- split_module = split_by_tags(self.module, self.tags)
+ split_module = split_by_tags(self.module, self.tags, return_tuple=self._return_tuple)
if remove_tag:
for node in self.module.graph.nodes:
if hasattr(node, "tag"): | 2.41.0 |
dfeec9cdc246a6a003dff4b6ba0a5ceb60613f1 | Fri, 12 Apr 2024 08:56:06 +0000 | [PATCH 0070/1000] Add a mode to avoid clone() in DDPSink (#122927) | DDPSink clones the outputs of DDP to avoid in-place modification of loss (see https://github.com/pytorch/pytorch/issues/61982). However, when outputs are really large (2-3GB) this adds a lot of overhead for peak memory. As a result, adding a mode to avoid this clone in cases where users are not modifying loss in-place. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122927 Approved by: https://github.com/fegin, https://github.com/rohan-varma | diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py
index 56415aa84d..3db95fe14a 100644
--- a/torch/nn/parallel/distributed.py
+++ b/torch/nn/parallel/distributed.py
@@ -242,9 +242,11 @@ class _DDPSink(Function):
# None and are not filled with zeros.
ctx.set_materialize_grads(False)
ctx.ddp_weakref = ddp_weakref
- ret = tuple(
- inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs
- )
+ ret = inputs
+ if ddp_weakref()._ddp_sink_clone:
+ ret = tuple(
+ inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs
+ )
return ret
@staticmethod
@@ -901,6 +903,9 @@ class DistributedDataParallel(Module, Joinable):
if self._use_python_reducer:
self._register_accum_grad_hook()
+ # Whether or not DDPSink performs a clone.
+ self._ddp_sink_clone = True
+
def _register_accum_grad_hook(self):
import torch.distributed._functional_collectives as fcol
@@ -2361,3 +2366,16 @@ class DistributedDataParallel(Module, Joinable):
if not _rank_not_in_group(new_process_group):
self.process_group = new_process_group
self.reducer._update_process_group(new_process_group)
+
+ def _set_ddp_sink_clone(self, val: bool):
+ """
+ Sets whether or not DDPSink should clone the output tensors or not.
+ The default is True since if the loss is modified in place we run
+ into the view is modified in-place error.
+
+ Although, cloning the tensors can add significant memory and
+ performance hit if the number and size of tensors are large. As
+ a result, this can be set to False if you are not modifying the
+ loss in place.
+ """
+ self._ddp_sink_clone = val
diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py
index 4e5b915bbd..9e1e0b4361 100644
--- a/torch/testing/_internal/distributed/distributed_test.py
+++ b/torch/testing/_internal/distributed/distributed_test.py
@@ -26,6 +26,7 @@ import torch.nn as nn
import torch.nn.functional as F
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT
+from torch.utils._python_dispatch import TorchDispatchMode
from torch.autograd import DeviceType
from torch.cuda.amp import GradScaler, autocast
@@ -10059,5 +10060,43 @@ class DistributedTest:
for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()):
self.assertEqual(p1.grad, p2.grad)
+ @skip_if_lt_x_gpu(2)
+ @require_world_size(2)
+ @skip_but_pass_in_sandcastle_if(
+ BACKEND not in DistTestCases.backend_feature["ddp"],
+ f"The {BACKEND} backend does not support DistributedDataParallel",
+ )
+ def test_ddp_sink_noclone(self):
+ "Tests that we can configure DDP to avoid clone"
+
+ class OpPatcher(TorchDispatchMode):
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
+ func_packet = func._overloadpacket
+ if func_packet == torch.ops.aten.clone:
+ raise RuntimeError("clone encountered!")
+ kwargs = kwargs if kwargs else {}
+ return func(*args, **kwargs)
+
+ class MyModel(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc = torch.nn.Linear(10, 10)
+
+ def forward(self, input):
+ return self.fc(input)
+
+ model = MyModel().cuda(self.rank)
+ ddp = torch.nn.parallel.DistributedDataParallel(
+ model,
+ device_ids=[self.rank],
+ find_unused_parameters=True,
+ )
+ ddp._set_ddp_sink_clone(False)
+ input = torch.rand(10, 10).cuda(self.rank)
+
+ with OpPatcher() as patcher:
+ ddp(input).sum().backward()
+
+
instantiate_parametrized_tests(DistributedTest._DistTestBase) | 2.41.0 |
274d57037837c29ecdd572dc7ea430267ecf9c2 | Wed, 10 Apr 2024 21:20:22 -0700 | [PATCH 0071/1000] [compiled autograd][dynamo] Make compiled graph take in boxed inputs (#122353) | ### Context In today's Dynamo, we lift all tensors encountered during tracing to be individual graph inputs, even when they were in a container. And [Dynamo generates](https://github.com/pytorch/pytorch/blob/fdc281f2587f9a5a935de1f1368e7ad7ed0f9828/torch/_dynamo/codegen.py#L371) the runtime function's signature using the graph's graphargs. This means that the generated function will have each grapharg as an argument, which is problematic if we want to free the inputs in inductor codegen. See [python function arguments are kept alive for the duration of the function call](https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670). ```python # original code def forward(inputs): a, b, c, d, e = inputs inputs.clear() out = a out += b del b # frees memory out += c del c # frees memory out += d del d # frees memory out += e del e # frees memory return out # compiled code: def forward(a, b, c, d, e): # b, c, d, e can't be freed before end of function ``` This isn't a concern when compiling forward because a, b, c, d, e are all from user code, and should be kept alive. But when compiling backwards, a, b, c, d, e may be intermediate results i.e. activations, that we DO want to clear ASAP to remain on par with eager peak memory. ### Solution We have encountered similar memory problems in AOTAutograd before, where we adopted the boxed calling convention (wrapping to-be-freed objects in a list), adding list clearing to inductor codegen, and being careful about holding references to elements in the input list. We need to do something similar, but for inputs from the user program (compiled autograd fx graph in this case). This PR support lists as graphargs/placeholder nodes. When tracing a list of tensors, we create a node for it, and pre-emptively initialize variable trackers for its elements before they are used in the user program. Subsequent uses of those variables will find hits in the lookup table `input_source_to_var`. With the inputs as a list in the graph args, our compiled code can free inputs just like in the eager case. ```python def forward(inputs): # a, b, c, d, e can be freed within the function now ``` Currently, AOT/Inductor flattens list input via [flatten_graph_inputs wrapper](https://github.com/pytorch/pytorch/blob/597f479643f82859307ece38971f1c8e7d657c80/torch/_inductor/compile_fx.py#L1454-L1478), which is why this PR's CI can be green. Additional changes are needed to its runtime wrapper, done in the next PR. The next step is to ensure that we are careful in forwarding the list to inductor codegen without holding additional references. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122353 Approved by: https://github.com/jansel ghstack dependencies: #123630, #123674 | diff --git a/test/dynamo/test_backward_higher_order_ops.py b/test/dynamo/test_backward_higher_order_ops.py
index 0b68f15328..db34287583 100644
--- a/test/dynamo/test_backward_higher_order_ops.py
+++ b/test/dynamo/test_backward_higher_order_ops.py
@@ -126,12 +126,14 @@ class _multiply_invoke(torch.nn.Module):
actual,
"""\
class GraphModule(torch.nn.Module):
- def forward(self, s0 : torch.SymInt, L_inputs_0_ : torch.Tensor):
- l_inputs_0_ = L_inputs_0_
+ def forward(self, L_inputs_ : list):
+ l_inputs_ = L_inputs_
- new_grad = torch.clone(l_inputs_0_)
+ getitem = l_inputs_[0]; l_inputs_ = None
- result = l_inputs_0_ * l_inputs_0_; l_inputs_0_ = None
+ new_grad = torch.clone(getitem)
+
+ result = getitem * getitem; getitem = None
new_grad_1 = torch.clone(result); result = None
return (new_grad, new_grad_1)
@@ -190,12 +192,14 @@ class GraphModule(torch.nn.Module):
actual,
"""\
class GraphModule(torch.nn.Module):
- def forward(self, s0 : torch.SymInt, L_inputs_0_ : torch.Tensor):
- l_inputs_0_ = L_inputs_0_
+ def forward(self, L_inputs_ : list):
+ l_inputs_ = L_inputs_
+
+ getitem = l_inputs_[0]; l_inputs_ = None
- new_grad = torch.clone(l_inputs_0_)
+ new_grad = torch.clone(getitem)
- result = l_inputs_0_ * l_inputs_0_; l_inputs_0_ = None
+ result = getitem * getitem; getitem = None
new_grad_1 = torch.clone(result); result = None
return (new_grad, new_grad_1)
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 46864361e7..7acc225272 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -208,6 +208,55 @@ main()
self.check_output_and_recompiles(fn)
+ def test_dynamo_boxed(self):
+ def get_placeholders(gm_):
+ placeholders = []
+ for node in gm_.graph.nodes:
+ if node.op == "placeholder":
+ placeholders.append(node)
+ return placeholders
+
+ def eager_with_check(gm, is_bwd):
+ def inner_compiler(gm_, example_inputs_):
+ placeholders = get_placeholders(gm_)
+ if is_bwd:
+ # should be boxed inputs
+ assert len(placeholders) == 1
+ pass
+ else:
+ assert len(placeholders) > 1
+
+ return gm_
+
+ return torch.compile(gm, backend=inner_compiler)
+
+ fwd_compiler_fn = functools.partial(eager_with_check, is_bwd=False)
+ bwd_compiler_fn = functools.partial(eager_with_check, is_bwd=True)
+
+ def fn(inputs):
+ args_0, args_1, args_2 = inputs
+ out = torch.mm(args_0, args_1)
+ out = torch.mm(out, args_2)
+ loss = out.sum()
+ with compiled_autograd.enable(bwd_compiler_fn):
+ loss.backward()
+ yield args_0.grad
+ yield args_1.grad
+ yield args_2.grad
+
+ inputs = [
+ torch.randn([1, 2], requires_grad=True),
+ torch.randn([2, 3], requires_grad=True),
+ torch.randn([3, 4], requires_grad=True),
+ ]
+
+ compiled_fn = eager_with_check(fn, is_bwd=False)
+ grads = list(compiled_fn(inputs))
+ self.assertEqual(len(grads), 3)
+ self.assertNotEqual(grads[0], None)
+ self.assertNotEqual(grads[1], None)
+ self.assertNotEqual(grads[2], None)
+
def test_implicit_add(self):
def fn():
y = torch.randn(1, 4, requires_grad=True)
diff --git a/test/inductor/test_distributed_patterns.py b/test/inductor/test_distributed_patterns.py
index 4a65323131..d822ad0af6 100644
--- a/test/inductor/test_distributed_patterns.py
+++ b/test/inductor/test_distributed_patterns.py
@@ -267,7 +267,7 @@ class DistributedPatternTests(TestCase):
self.assertEqual(fw_cnt.frame_count, 1)
self.assertEqual(fw_cnt.op_count, 5)
self.assertEqual(bw_cnt.frame_count, 2) # grad=None and grad!=None
- self.assertEqual(bw_cnt.op_count, 39)
+ self.assertEqual(bw_cnt.op_count, 48)
def test_module_backward_hooks_aot(self):
m1, inp1 = init_module_bw_hooks(True)
diff --git a/torch/_dynamo/compiled_autograd.py b/torch/_dynamo/compiled_autograd.py
index 62c1e94116..18385171fa 100644
--- a/torch/_dynamo/compiled_autograd.py
+++ b/torch/_dynamo/compiled_autograd.py
@@ -5,7 +5,7 @@ from typing import List, Optional
import torch
from torch._dynamo.external_utils import call_backward, call_hook
from torch._dynamo.source import GetItemSource, LocalSource
-from torch._dynamo.utils import counters, lazy_format_graph_code
+from torch._dynamo.utils import counters, lazy_format_graph_code, set_locals_to_steal
from torch._logging import getArtifactLogger, trace_structured
from torch._prims_common import clone_preserve_strides
from torch._subclasses import FakeTensorMode
@@ -199,6 +199,7 @@ class AutogradCompilerInstance:
graph = GraphModule(
self.fx_tracer.root, self.fx_tracer.graph, "CompiledAutograd"
)
+ set_locals_to_steal(graph, ["inputs"])
compiled_autograd_log.info(
"%s", lazy_format_graph_code("Compiled autograd graph", graph)
)
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index ab43664f5c..c60a308672 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -2651,3 +2651,13 @@ def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
return compiled_fn(*pytree.arg_tree_leaves(*args))
return wrapper
+
+
+def get_locals_to_steal(maybe_gm):
+ if not isinstance(maybe_gm, torch.fx.GraphModule) or not hasattr(maybe_gm, "meta"):
+ return []
+ return maybe_gm.meta.get("locals_to_steal", [])
+
+
+def set_locals_to_steal(gm, locals_to_steal):
+ gm.meta["locals_to_steal"] = locals_to_steal
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index 1c6cf687ff..8d1626fbec 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -71,6 +71,7 @@ from ..utils import (
clone_input,
common_constant_types,
get_fake_value,
+ get_locals_to_steal,
get_static_address_type,
is_function_or_wrapper,
is_namedtuple,
@@ -876,6 +877,51 @@ class VariableBuilder:
for i, item in enumerate(value)
]
+ maybe_gm = self.tx.output.local_scope.get("self")
+ if isinstance(
+ self.source, LocalSource
+ ) and self.source.local_name in get_locals_to_steal(maybe_gm):
+ # The input tensor list to dynamo from compiled autograd may contain activations
+ # which are freed as they are used in inductor. Dynamo's default behavior is to
+ # lift all tensors to the graph inputs, but this will cause dynamo to hold an
+ # extra reference to the activation tensors and increase peak memory usage.
+ # To allow freeing ASAP, we keep the list as graph argument to the dynamo output
+ # graph, and unpack it locally.
+ # e.g. instead of `def forward(self, L_inputs_0_, L_inputs_1_, ...):`, we have
+ # `def forward(self, L_inputs_):`
+ source = self.source
+ assert isinstance(value, list)
+ tensor_list_proxy = self.tx.output.root_tracer.create_graph_input(
+ re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value), source=source
+ )
+
+ list_variable = wrap_fx_proxy_cls(
+ target_cls=TensorVariable,
+ tx=self.tx,
+ proxy=tensor_list_proxy,
+ example_value=value,
+ subclass_type=None,
+ source=source,
+ )
+
+ guards = []
+ for i, tensor_variable in enumerate(list_variable.items):
+ source_i = GetItemSource(base=source, index=i, index_is_slice=False)
+ # access unpacked tensor from this list instead of from a lifted arg
+ self.tx.output.input_source_to_var[source_i] = tensor_variable
+
+ guard = functools.partial(
+ GuardBuilder.TENSOR_MATCH, value=TensorWeakRef(value[i])
+ )
+ guards.append(source_i.make_guard(guard))
+
+ install_guard(*guards, skip=1)
+
+ grapharg = GraphArg(
+ source, value, is_unspecialized=False, fake_tensor=None, is_tensor=False
+ )
+ tensor_list_proxy.node.meta["grapharg"] = grapharg
+
result = BaseListVariable.cls_for_instance(value)(
output, mutable_local=MutableLocal()
) | 2.41.0 |
40b451e910c943e86badda7372eecfa628b8417 | Thu, 11 Apr 2024 22:47:38 -0700 | [PATCH 0072/1000] [compiled autograd][dynamo] Codegen aliases to keep grad mutated tensors alive (#123359) | The current codegen is problematic if __compiled_fn_0 clears the inputs list, since we need it for assignment afterwards ```python def forward(inputs): __compiled_fn_0 = ... # The actual function needs to be provided graph_out_0 = __compiled_fn_0(inputs) # clears inputs temp_list = [] temp_list.append(graph_out_0[0]) inputs[4].grad = graph_out_0[1] # inputs is empty, index error inputs[7].grad = graph_out_0[2] inputs[8].grad = graph_out_0[3] inputs[9].grad = graph_out_0[3] del graph_out_0 return temp_list ``` With this fix, we use aliases to keep the tensors alive ```python def forward(inputs): __compiled_fn_0 = ... # The actual function needs to be provided inputs_ref_1 = inputs[9] inputs_ref_2 = inputs[4] inputs_ref_3 = inputs[8] inputs_ref_4 = inputs[7] graph_out_0 = __compiled_fn_0(inputs) temp_list = [] temp_list.append(graph_out_0[0]) inputs_ref_2.grad = graph_out_0[1] inputs_ref_4.grad = graph_out_0[2] inputs_ref_3.grad = graph_out_0[3] inputs_ref_1.grad = graph_out_0[3] del graph_out_0 return temp_list ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123359 Approved by: https://github.com/jansel ghstack dependencies: #123630, #123674, #122353 | diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 7acc225272..da4f7a972a 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -257,6 +257,57 @@ main()
self.assertNotEqual(grads[1], None)
self.assertNotEqual(grads[2], None)
+ def test_inputs_aliasing_bytecode(self):
+ # Freeze compiled autograd graph
+ compiler = torch._dynamo.compiled_autograd.AutogradCompilerInstance(compiler_fn)
+ param = torch.ones(100)
+ activ = torch.ones(100) * 2
+ inputs = [param, activ]
+ proxies, _ = compiler.begin_capture(inputs=inputs, sizes=[])
+ param_proxy, activ_proxy = proxies
+ buf = activ_proxy * 2
+ torch.ops.inductor.accumulate_grad_.default(param_proxy, buf)
+ compiled_fn = compiler.end_capture(buf)
+
+ def bytecode_hook(code, out_code):
+ import dis
+ import sys
+
+ if sys.version_info < (3, 11):
+ call_op = "CALL_FUNCTION"
+ else:
+ call_op = "CALL"
+
+ insts = list(dis.get_instructions(out_code))
+ call_graph_idx = next(
+ i for i, inst in enumerate(insts) if inst.opname == call_op
+ )
+ # pre-graph should alias: inputs_ref_0 = inputs[0]
+ matches = [
+ inst
+ for inst in insts[:call_graph_idx]
+ if inst.opname == "STORE_FAST" and inst.argval == "inputs_ref_0"
+ ]
+ self.assertTrue(len(matches) == 1)
+ # post-graph should access inputs_ref_0 instead of inputs
+ matches = [
+ inst for inst in insts[call_graph_idx:] if inst.argval == "inputs"
+ ]
+ self.assertTrue(len(matches) == 0)
+ matches = [
+ inst
+ for inst in insts[call_graph_idx:]
+ if inst.opname == "LOAD_FAST" and inst.argval == "inputs_ref_0"
+ ]
+ self.assertTrue(len(matches) == 1)
+
+ torch._dynamo.reset()
+ handle = torch._dynamo.convert_frame.register_bytecode_hook(bytecode_hook)
+ try:
+ compiled_fn(inputs=[param, activ], sizes=(), hooks=())
+ finally:
+ handle.remove()
+
def test_implicit_add(self):
def fn():
y = torch.randn(1, 4, requires_grad=True)
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 2c9e20267d..af768761a5 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -51,11 +51,12 @@ from .exc import (
)
from .guards import GuardBuilder, install_guard
from .mutation_guard import is_dynamic_nn_module
-from .side_effects import SideEffects
+from .side_effects import AttributeMutationExisting, SideEffects
from .source import (
AttrSource,
BackwardStateSource,
ConstantSource,
+ GetItemSource,
GlobalStateSource,
is_constant_source,
is_from_local_source,
@@ -74,6 +75,7 @@ from .utils import (
counters,
dynamo_timed,
get_instruction_source_311,
+ get_locals_to_steal,
get_static_address_type,
graph_break_reasons,
increment_op_count,
@@ -850,6 +852,69 @@ class OutputGraph:
raise AssertionError("unreachable")
+ def get_attr_mutations_on_stolen_lists(
+ self,
+ ) -> Dict[str, List[AttributeMutationExisting]]:
+ attr_mutations_on_stolen_lists: Dict[str, List[AttributeMutationExisting]] = {}
+ maybe_gm = self.local_scope.get("self")
+ stolen_list_names = get_locals_to_steal(maybe_gm)
+ for attr_mutation in self.side_effects.store_attr_mutations.keys():
+ if (
+ not isinstance(attr_mutation, AttributeMutationExisting)
+ or not isinstance(attr_mutation.source, GetItemSource)
+ or not isinstance(attr_mutation.source.base, LocalSource)
+ ):
+ continue
+
+ list_name = attr_mutation.source.base.local_name
+ if list_name not in stolen_list_names:
+ continue
+
+ # mutation is of type `stolen_list[i].attr_name`, so we need to keep stolen_list[i] alive
+ if list_name not in attr_mutations_on_stolen_lists:
+ attr_mutations_on_stolen_lists[list_name] = []
+ attr_mutations_on_stolen_lists[list_name].append(attr_mutation)
+ return attr_mutations_on_stolen_lists
+
+ def handle_mutations_on_stolen_list_inputs(self):
+ # When mutations happen on inputs list elements, those elements must be kept alive after the function call.
+ # If the input list is stolen, we perform the mutation on aliases.
+ alias_insts = []
+ attr_mutations_on_stolen_lists = self.get_attr_mutations_on_stolen_lists()
+ for arg in self.graphargs:
+ if not (
+ isinstance(arg._example, list)
+ and isinstance(arg.source, LocalSource)
+ and arg.source.local_name in attr_mutations_on_stolen_lists
+ ):
+ continue
+
+ # arg is a list that will be cleared by the compiled function
+ list_name = arg.source.local_name
+ assert list_name in self.code_options["co_varnames"]
+ for mutation in attr_mutations_on_stolen_lists[list_name]:
+ assert mutation.source is not None
+ assert isinstance(mutation.source, GetItemSource)
+ list_idx = mutation.source.index
+ alias_name = self.new_var(
+ f"{list_name}_ref"
+ ) # self.new_var already adds unique id suffix
+
+ # bytecode of `alias_name = list_name[list_idx]`
+ alias_insts.extend(
+ [
+ create_instruction("LOAD_FAST", argval=list_name),
+ create_instruction("LOAD_CONST", argval=list_idx),
+ create_instruction("BINARY_SUBSCR"),
+ create_instruction("STORE_FAST", argval=alias_name),
+ ]
+ )
+
+ # perform mutation on alias, handled by suffix codegen
+ mutation.source = LocalSource(alias_name)
+
+ return alias_insts
+
def compile_subgraph(
self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None
):
@@ -890,6 +955,7 @@ class OutputGraph:
self.pregraph_bytecode and self.export
), "export does not support pregraph_bytecode"
prefix_insts.extend(self.pregraph_bytecode)
+ prefix_insts.extend(self.handle_mutations_on_stolen_list_inputs())
def append_prefix_insts():
self.add_output_instructions(prefix_insts) | 2.41.0 |
fc3aa5f8168414b9bfbcf59df0e33450d96c424 | Thu, 11 Apr 2024 22:47:39 -0700 | [PATCH 0073/1000] [compiled autograd][aot] Trim runtime refs for list inputs from dynamo (#122535) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/122535 Approved by: https://github.com/bdhirsh ghstack dependencies: #123630, #123674, #122353, #123359 | diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index da4f7a972a..7992ce8c40 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1191,6 +1191,43 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
self.check_output_and_recompiles(fn, 3)
+ @unittest.skipIf(not HAS_CUDA, "requires cuda")
+ def test_free_activation_memory(self):
+ self.assertTrue(torch.cuda.memory_allocated() == 0)
+
+ # Use an op to check that the memory is freed by the time the op is executed
+ def assertion_impl(to_clone):
+ mem_allocated = torch.cuda.memory_allocated()
+ self.assertTrue(
+ mem_allocated < 4000000, "activations should have been freed"
+ )
+ return to_clone.clone()
+
+ with torch.library._scoped_library("test_compiled_autograd", "FRAGMENT") as lib:
+ lib.define(
+ "assertion_op(Tensor x) -> Tensor", tags=(torch.Tag.pt2_compliant_tag,)
+ )
+ lib.impl("assertion_op", assertion_impl, "CPU")
+ lib.impl("assertion_op", lambda x: x.clone(), "Meta")
+
+ # Create a graph that allows inputs stealing
+ def forward(activations):
+ add = activations[0] + 1
+ out = add.cpu()
+ cloned_out = torch.ops.test_compiled_autograd.assertion_op(out)
+ return (cloned_out,)
+
+ gm = torch.fx.symbolic_trace(forward)
+ torch._dynamo.utils.set_locals_to_steal(gm, ["activations"])
+ compiled_fn = torch.compile(gm)
+
+ # allocate at least 4,000,000 bytes (1,000,000 * 4 bytes)
+ activations = [torch.ones(1000000, dtype=torch.float32, device="cuda")]
+ self.assertTrue(torch.cuda.memory_allocated() > 4000000)
+
+ out = compiled_fn(activations)
+ self.assertTrue(len(activations) == 0)
+
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
@@ -1362,6 +1399,7 @@ known_failing_tests = {
"test_save_for_backward_inputs_are_namedtuple", # torch._dynamo.exc.Unsupported: 'skip function
"test_autograd_function_backed_op", # RuntimeError: compiled_args not implemented
"test_setitem", # AssertionError: Tensor-likes are not close!
+ "test_grad_nonleaf_register_hook", # IndexError: list index out of range (NB: x.grad = y where both x and y are input tensors)
}
if not HAS_CUDA:
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index c60a308672..153a1c251e 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -2627,6 +2627,17 @@ def nn_module_proxy(mod):
return proxy
+class GmWrapper(torch.nn.Module):
+ def __init__(self, gm, spec):
+ super().__init__()
+ self.gm = gm
+ self.spec = spec
+
+ def forward(self, *args):
+ args: List[Any] = list(args)
+ return self.gm(*pytree.tree_unflatten(args, self.spec))
+
+
def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
"""
Mutate inputs so that they are flat and wrap gm such that it
@@ -2634,21 +2645,24 @@ def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
bumpy inputs.
"""
inputs, spec = pytree.tree_flatten(inputs)
+ compiled_fn = compile_gm(GmWrapper(gm, spec), inputs)
- class GmWrapper(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.gm = gm
-
- def forward(self, *args):
- args: List[Any] = list(args)
- return self.gm(*pytree.tree_unflatten(args, spec))
-
- compiled_fn = compile_gm(GmWrapper(), inputs)
+ idx_to_steal = [
+ i
+ for i, node in enumerate(gm.graph.nodes)
+ if node.op == "placeholder" and node.meta.get("steal_arg", False)
+ ]
def wrapper(*args):
# note this doesn't check the spec, assuming it is the same
- return compiled_fn(*pytree.arg_tree_leaves(*args))
+ flat_args = pytree.arg_tree_leaves(*args)
+
+ # flat_args is a new list, so we need to clear references from the old list
+ for i in idx_to_steal:
+ args[i].clear()
+
+ # this call is boxed to avoid increasing refcount until we reach aot_module_simplified forward
+ return compiled_fn(flat_args)
return wrapper
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index 8d1626fbec..f94c464094 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -894,6 +894,7 @@ class VariableBuilder:
tensor_list_proxy = self.tx.output.root_tracer.create_graph_input(
re.sub(r"[^a-zA-Z0-9]+", "_", self.name), type(value), source=source
)
+ tensor_list_proxy.node.meta["steal_arg"] = True
list_variable = wrap_fx_proxy_cls(
target_cls=TensorVariable,
diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index e2291ea6fc..4bbed04e7e 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -170,7 +170,6 @@ def aot_dispatch_base(
fakified_out = None
return out
- # args is a list because compiled_fw is boxed_call
if fw_metadata.is_rng_op_functionalized:
# Add the seed and offset to args
seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index 8ef179bf45..eaecf438ec 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -99,8 +99,9 @@ def create_runtime_wrapper(
assert num_tokens == 0
elif num_tokens > 0:
# Pass in effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
- # NOTE: this keeps an extra reference to the old args until the end of this function
+ old_args = args
args = [[None] * num_tokens, *args]
+ old_args.clear()
# stash a ref to each input tensor we plan to use after the compiled function
orig_inputs = {i: args[i] for i in epilogue_args_idx}
diff --git a/torch/_functorch/aot_autograd.py b/torch/_functorch/aot_autograd.py
index 818cfd0e84..44fa0248ad 100644
--- a/torch/_functorch/aot_autograd.py
+++ b/torch/_functorch/aot_autograd.py
@@ -954,10 +954,29 @@ def aot_module_simplified(
aot_config,
)
+ if isinstance(mod, torch._dynamo.utils.GmWrapper):
+ # This function is called by the flatten_graph_inputs wrapper, which boxes
+ # the inputs so that they can be freed before the end of this scope.
+ # For overhead reasons, this is not the default wrapper, see comment:
+ # https://github.com/pytorch/pytorch/pull/122535/files#r1560096481
+ def boxed_forward(runtime_args: List[Any]):
+ flat_args = []
+ flat_args.extend(params_flat)
+ flat_args.extend(runtime_args)
+ runtime_args.clear()
+ return compiled_fn(flat_args)
+
+ # Just for convenience
+ boxed_forward.zero_grad = mod.zero_grad
+ boxed_forward.named_parameters = mod.named_parameters
+ boxed_forward.named_buffers = mod.named_buffers
+ return boxed_forward
+
# TODO: There is something deeply wrong here; compiled_fn running with
# the boxed calling convention, but aot_module_simplified somehow
# historically returned a function that was not the boxed calling
# convention. This should get fixed...
+ # NB: GraphModule/nn.Module rely on the non-boxed calling convention here
def forward(*runtime_args: Tuple[Any]):
full_args = []
full_args.extend(params_flat) | 2.41.0 |
0eb162730e76132a5e29adbc16f8721ef125d68 | Fri, 12 Apr 2024 13:14:59 +0000 | [PATCH 0074/1000] Revert "Switch quantized_decomposed over to new custom ops API (#123454)" | This reverts commit 638729c0cdf3ce4274f4d68f8e46e5a1cd36cbe8. Reverted https://github.com/pytorch/pytorch/pull/123454 on behalf of https://github.com/DanilBaibak due to Break internal build ([comment](https://github.com/pytorch/pytorch/pull/123454#issuecomment-2051738976)) | diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py
index 6f25e2b9af..fefd7cedf9 100644
--- a/torch/_custom_op/impl.py
+++ b/torch/_custom_op/impl.py
@@ -882,11 +882,6 @@ SUPPORTED_RETURN_TYPES = {
def parse_return(annotation, error_fn):
- if annotation == inspect.Signature.empty:
- error_fn(
- "There was no return annotation. Please add one."
- )
-
if annotation is None:
return "()"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 67f7b3f509..18dd61c37c 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -4,11 +4,11 @@ from typing import Optional, Tuple
import torch
from torch._refs import _unsqueeze_multiple
from torch.ao.quantization.utils import determine_qparams, validate_qmin_qmax
-from torch.library import custom_op, Library, impl
+from torch.library import impl, Library
# Note: decomposed means decomposed quantized tensor, using decomposed so that the
# name is not too long
-ns = "quantized_decomposed"
+quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
_DTYPE_TO_QVALUE_BOUNDS = {
torch.uint8: (0, 255),
@@ -31,8 +31,11 @@ def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
"quant_max out of bound for dtype, " \
f"quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}"
+quantized_decomposed_lib.define(
+ "quantize_per_tensor(Tensor input, float scale, int zero_point, "
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
-@custom_op(f"{ns}::quantize_per_tensor", mutates_args=())
+@impl(quantized_decomposed_lib, "quantize_per_tensor", "CompositeExplicitAutograd")
def quantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -64,8 +67,8 @@ def quantize_per_tensor(
inv_scale = 1.0 / scale
return torch.clamp(torch.round(input * inv_scale) + zero_point, quant_min, quant_max).to(dtype)
-@quantize_per_tensor.register_fake
-def _(
+@impl(quantized_decomposed_lib, "quantize_per_tensor", "Meta")
+def quantize_per_tensor_meta(
input: torch.Tensor,
scale: float,
zero_point: int,
@@ -78,7 +81,11 @@ def _(
assert input.dtype == torch.float32, f"Expecting input to have dtype torch.float32, but got dtype: {input.dtype}"
return torch.empty_like(input, dtype=dtype)
-@custom_op(f"{ns}::quantize_per_tensor.tensor", mutates_args=())
+quantized_decomposed_lib.define(
+ "quantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
+
+@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "CompositeExplicitAutograd")
def quantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -96,7 +103,7 @@ def quantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)
-@quantize_per_tensor_tensor.register_fake
+@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor", "Meta")
def quantize_per_tensor_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -113,7 +120,11 @@ def quantize_per_tensor_tensor_meta(
return torch.empty_like(input, dtype=dtype)
# TODO: remove other variants and keep this one
-@custom_op(f"{ns}::quantize_per_tensor.tensor2", mutates_args=())
+quantized_decomposed_lib.define(
+ "quantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
+ "Tensor quant_min, Tensor quant_max, ScalarType dtype) -> Tensor")
+
+@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "CompositeExplicitAutograd")
def quantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -131,8 +142,8 @@ def quantize_per_tensor_tensor2(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype)
-@quantize_per_tensor_tensor2.register_fake
-def _(
+@impl(quantized_decomposed_lib, "quantize_per_tensor.tensor2", "Meta")
+def quantize_per_tensor_tensor2_meta(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -146,7 +157,11 @@ def _(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-@custom_op(f"{ns}::dequantize_per_tensor", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_tensor(Tensor input, float scale, int zero_point, "
+ "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
+
+@impl(quantized_decomposed_lib, "dequantize_per_tensor", "CompositeExplicitAutograd")
def dequantize_per_tensor(
input: torch.Tensor,
scale: float,
@@ -194,7 +209,7 @@ def dequantize_per_tensor(
else:
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
-@dequantize_per_tensor.register_fake
+@impl(quantized_decomposed_lib, "dequantize_per_tensor", "Meta")
def dequantize_per_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
@@ -209,7 +224,11 @@ def dequantize_per_tensor_meta(
out_dtype = torch.float32
return torch.empty_like(input, dtype=out_dtype)
-@custom_op(f"{ns}::dequantize_per_tensor.tensor", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_tensor.tensor(Tensor input, Tensor scale, Tensor zero_point, "
+ "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
+
+@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "CompositeExplicitAutograd")
def dequantize_per_tensor_tensor(
input: torch.Tensor,
scale: torch.Tensor,
@@ -229,8 +248,8 @@ def dequantize_per_tensor_tensor(
assert scale.numel() == 1, f"Expecting scale tensor to be one element, but received : {scale.numel()}"
return dequantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype, out_dtype=out_dtype)
-@dequantize_per_tensor_tensor.register_fake
-def dequantize_per_tensor_tensor_fake(
+@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor", "Meta")
+def dequantize_per_tensor_tensor_meta(
input: torch.Tensor,
scale: torch.Tensor,
zero_point: torch.Tensor,
@@ -251,7 +270,11 @@ def dequantize_per_tensor_tensor_fake(
raise ValueError(f"Unsupported dtype in dequantize_per_tensor: {dtype}")
# TODO: remove other variants and keep this one
-@custom_op(f"{ns}::dequantize_per_tensor.tensor2", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_tensor.tensor2(Tensor input, Tensor scale, Tensor zero_point, "
+ "Tensor quant_min, Tensor quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
+
+@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "CompositeExplicitAutograd")
def dequantize_per_tensor_tensor2(
input: torch.Tensor,
scale: torch.Tensor,
@@ -272,8 +295,8 @@ def dequantize_per_tensor_tensor2(
return dequantize_per_tensor(
input, scale.item(), zero_point.item(), quant_min.item(), quant_max.item(), dtype, out_dtype=out_dtype)
-@dequantize_per_tensor_tensor2.register_fake
-def _(
+@impl(quantized_decomposed_lib, "dequantize_per_tensor.tensor2", "Meta")
+def dequantize_per_tensor_tensor2_meta(
input,
scale,
zero_point,
@@ -283,9 +306,13 @@ def _(
*,
out_dtype: Optional[torch.dtype] = None
) -> torch.Tensor:
- return dequantize_per_tensor_tensor_fake(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
+ return dequantize_per_tensor_tensor_meta(input, scale, zero_point, quant_min, quant_max, dtype, out_dtype=out_dtype)
+
+quantized_decomposed_lib.define(
+ "choose_qparams.tensor(Tensor input, int quant_min, int quant_max, "
+ "float eps, ScalarType dtype) -> (Tensor, Tensor)")
-@custom_op(f"{ns}::choose_qparams.tensor", mutates_args=())
+@impl(quantized_decomposed_lib, "choose_qparams.tensor", "CompositeExplicitAutograd")
def choose_qparams_tensor(
input: torch.Tensor,
qmin: int,
@@ -320,7 +347,11 @@ def choose_qparams_tensor(
return determine_qparams(
min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)
-@custom_op(f"{ns}::choose_qparams_symmetric.tensor", mutates_args=())
+quantized_decomposed_lib.define(
+ "choose_qparams_symmetric.tensor(Tensor input, int quant_min, int quant_max, "
+ "float eps, ScalarType dtype) -> (Tensor, Tensor)")
+
+@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "CompositeExplicitAutograd")
def choose_qparams_symmetric_tensor(
input: torch.Tensor,
qmin: int,
@@ -362,8 +393,8 @@ def choose_qparams_symmetric_tensor(
qscheme=torch.per_tensor_symmetric
)
-@choose_qparams_tensor.register_fake
-def _(
+@impl(quantized_decomposed_lib, "choose_qparams.tensor", "Meta")
+def choose_qparams_tensor_meta(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -379,8 +410,8 @@ def _(
{quant_min} max: {quant_max}"
return torch.empty(1, dtype=torch.double, device=input.device), torch.empty(1, dtype=torch.int64, device=input.device)
-@choose_qparams_symmetric_tensor.register_fake
-def _(
+@impl(quantized_decomposed_lib, "choose_qparams_symmetric.tensor", "Meta")
+def choose_qparams_symmetric_tensor_meta(
input: torch.Tensor,
quant_min: int,
quant_max: int,
@@ -397,7 +428,11 @@ def _permute_to_axis_zero(x, axis):
y = x.permute(tuple(new_axis_list))
return y, new_axis_list
-@custom_op(f"{ns}::quantize_per_channel", mutates_args=())
+quantized_decomposed_lib.define(
+ "quantize_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor")
+
+@impl(quantized_decomposed_lib, "quantize_per_channel", "CompositeExplicitAutograd")
def quantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -442,7 +477,7 @@ def quantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out.to(dtype)
-@quantize_per_channel.register_fake
+@impl(quantized_decomposed_lib, "quantize_per_channel", "Meta")
def quantize_per_channel_meta(
input: torch.Tensor,
scales: torch.Tensor,
@@ -463,7 +498,11 @@ def quantize_per_channel_meta(
# the signature as metadata for the input Tensor, this might be useful for pattern
# matching in the future
# We will revisit this later if we found there are no use cases for it
-@custom_op(f"{ns}::dequantize_per_channel", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_channel(Tensor input, Tensor scales, Tensor? zero_points, int axis, "
+ "int quant_min, int quant_max, ScalarType dtype, *, ScalarType? out_dtype=None) -> Tensor")
+
+@impl(quantized_decomposed_lib, "dequantize_per_channel", "CompositeExplicitAutograd")
def dequantize_per_channel(
input: torch.Tensor,
scales: torch.Tensor,
@@ -521,8 +560,8 @@ def dequantize_per_channel(
out = res.permute(tuple(permute_axis_list))
return out
-@dequantize_per_channel.register_fake
-def _(
+@impl(quantized_decomposed_lib, "dequantize_per_channel", "Meta")
+def dequantize_per_channel_meta(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: Optional[torch.Tensor],
@@ -541,7 +580,16 @@ def _(
return torch.empty_like(input, dtype=out_dtype)
-@custom_op(f"{ns}::choose_qparams_per_token", mutates_args=())
+quantized_decomposed_lib.define(
+ "choose_qparams_per_token(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
+)
+
+
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token",
+ "CompositeExplicitAutograd",
+)
def choose_qparams_per_token(
input: torch.Tensor,
dtype: torch.dtype,
@@ -575,8 +623,12 @@ def choose_qparams_per_token(
return scales, zero_points
-@choose_qparams_per_token.register_fake
-def _(
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token",
+ "Meta",
+)
+def choose_qparams_per_token_meta(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -587,7 +639,16 @@ def _(
# TODO: move this to https://github.com/pytorch/pytorch/blob/main/torch/ao/quantization/fx/_decomposed.py
-@custom_op(f"{ns}::choose_qparams_per_token_asymmetric", mutates_args=())
+quantized_decomposed_lib.define(
+ "choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
+)
+
+
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token_asymmetric",
+ "CompositeExplicitAutograd",
+)
def choose_qparams_per_token_asymmetric(
input: torch.Tensor,
dtype: torch.dtype,
@@ -630,8 +691,12 @@ def choose_qparams_per_token_asymmetric(
return scale.to(torch.float32), zero_point.to(torch.float32)
-@choose_qparams_per_token_asymmetric.register_fake
-def _(
+@impl(
+ quantized_decomposed_lib,
+ "choose_qparams_per_token_asymmetric",
+ "Meta",
+)
+def choose_qparams_per_token_asymmetric_meta(
input: torch.Tensor,
dtype: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor]:
@@ -651,7 +716,13 @@ def _per_token_quant_qparam_dim_check(input, scales, zero_points):
), f"num_tokens: {num_tokens} zero_points: {zero_points.size()}"
-@custom_op(f"{ns}::quantize_per_token", mutates_args=())
+quantized_decomposed_lib.define(
+ "quantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
+ "int quant_min, int quant_max, ScalarType dtype) -> Tensor"
+)
+
+
+@impl(quantized_decomposed_lib, "quantize_per_token", "CompositeExplicitAutograd")
def quantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -659,7 +730,7 @@ def quantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
-) -> torch.Tensor:
+):
"""Per token quantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -686,8 +757,8 @@ def quantize_per_token(
return input
-@quantize_per_token.register_fake
-def _(
+@impl(quantized_decomposed_lib, "quantize_per_token", "Meta")
+def quantize_per_token_meta(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -699,7 +770,13 @@ def _(
return torch.empty_like(input, dtype=dtype)
-@custom_op(f"{ns}::dequantize_per_token", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_token(Tensor input, Tensor scales, Tensor zero_points, "
+ "int quant_min, int quant_max, ScalarType dtype, ScalarType output_dtype) -> Tensor"
+)
+
+
+@impl(quantized_decomposed_lib, "dequantize_per_token", "CompositeExplicitAutograd")
def dequantize_per_token(
input: torch.Tensor,
scales: torch.Tensor,
@@ -707,8 +784,8 @@ def dequantize_per_token(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- output_dtype: torch.dtype,
-) -> torch.Tensor:
+ output_dtype: torch.dtype = torch.float32,
+):
"""Per token dequantization for the Tensor using the quantization parameters to map
from floating point to quantized values. This means for a N dimension Tensor
(M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize
@@ -732,8 +809,8 @@ def dequantize_per_token(
return input
-@dequantize_per_token.register_fake
-def _(
+@impl(quantized_decomposed_lib, "dequantize_per_token", "Meta")
+def dequantize_per_token_meta(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
@@ -747,7 +824,16 @@ def _(
return torch.empty_like(input, dtype=output_dtype)
-@custom_op(f"{ns}::quantize_per_channel_group", mutates_args=())
+quantized_decomposed_lib.define(
+ "quantize_per_channel_group(Tensor input, Tensor scales, Tensor zero_points, int quant_min, "
+ "int quant_max, ScalarType dtype, int group_size) -> Tensor"
+)
+
+
+# TODO: dtype is ignored for now
+@impl(
+ quantized_decomposed_lib, "quantize_per_channel_group", "CompositeExplicitAutograd"
+)
def quantize_per_channel_group(
input: torch.Tensor,
scales: torch.Tensor,
@@ -755,8 +841,8 @@ def quantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size: int,
-) -> torch.Tensor:
+ group_size=128,
+):
assert group_size > 1
# needed for GPTQ single column quantize
if group_size > input.shape[-1] and scales.shape[-1] == 1:
@@ -784,16 +870,16 @@ def quantize_per_channel_group(
return input_int8
-@quantize_per_channel_group.register_fake
-def _(
+@impl(quantized_decomposed_lib, "quantize_per_channel_group", "Meta")
+def quantize_per_channel_group_meta(
input: torch.Tensor,
scales: torch.Tensor,
zero_points: torch.Tensor,
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size,
-) -> torch.Tensor:
+ group_size=128,
+):
"""Groupwise quantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -822,7 +908,17 @@ def _(
return torch.empty_like(input, dtype=dtype)
-@custom_op(f"{ns}::dequantize_per_channel_group", mutates_args=())
+quantized_decomposed_lib.define(
+ "dequantize_per_channel_group(Tensor input, Tensor scales, Tensor? zero_points, int quant_min, "
+ "int quant_max, ScalarType dtype, int group_size, ScalarType output_dtype) -> Tensor"
+)
+
+
+@impl(
+ quantized_decomposed_lib,
+ "dequantize_per_channel_group",
+ "CompositeExplicitAutograd",
+)
def dequantize_per_channel_group(
w_int8: torch.Tensor,
scales: torch.Tensor,
@@ -830,9 +926,9 @@ def dequantize_per_channel_group(
quant_min: int,
quant_max: int,
dtype: torch.dtype,
- group_size: int,
- output_dtype: torch.dtype,
-) -> torch.Tensor:
+ group_size: int = 128,
+ output_dtype: torch.dtype = torch.float32,
+):
"""Groupwise dequantization within each channel for an 2-d Tensor using the quantization parameters
to map from floating point to quantized values. This means for each row of a 2-d Tensor
(M, N), we calculate scales/zero_points for each `group_size` elements
@@ -869,10 +965,6 @@ def dequantize_per_channel_group(
return w_dq
-quantized_decomposed_lib = Library(ns, "DEF")
-
-# TODO: Migrate this to the new torch.library.custom_ops API. This requires a refactor
-# of the autograd.Function. We leave this work to the future.
quantized_decomposed_lib.define(
"fake_quant_per_channel(Tensor input, Tensor scales, Tensor zero_points, int axis, "
"int quant_min, int quant_max) -> Tensor") | 2.41.0 |
9f7ef33c42ebc49bce0274da97726ff17c5ca15 | Thu, 11 Apr 2024 12:39:27 -0700 | [PATCH 0075/1000] AOTAutograd: add config to error when overlapping input checks would cause slow compile / runtimes (#123455) | We should eventually make the non-overlapping checks faster when dynamic shapes are enabled, but this is pretty difficult to do. So for now this PR adds a config that lets us fail fast when this situation happens, instead of causing compile times to secretly come to a crawl. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123455 Approved by: https://github.com/ezyang | diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index 2811ea81ff..f98c84cf54 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -4271,6 +4271,30 @@ class ReproTests(torch._dynamo.test_case.TestCase):
opt_fn(lengths)
self.assertEqual(cnt.frame_count, 1)
+ def test_overlapping_inputs_with_dynamic_shapes_error(self):
+ @torch.compile(backend="aot_eager")
+ def fn(a, b, c, d, e, f):
+ a.mul_(2)
+ b.mul_(2)
+ c.mul_(2)
+ d.mul_(2)
+ e.mul_(2)
+ f.mul_(2)
+
+ base = torch.ones(2, 20)
+ a = base[:, 0:2]
+ b = base[:, 2:4]
+ c = base[:, 4:6]
+ d = base[:, 6:8]
+ e = base[:, 8:10]
+ f = base[:, 10:12]
+ f2 = base[:, 10:14]
+ out = fn(a, b, c, d, e, f)
+ with self.assertRaisesRegex(
+ AssertionError, "is being compiled with dynamic shapes"
+ ):
+ out2 = fn(a, b, c, d, e, f2)
+
def test_user_ctor_ctx_manager_custom_init(self):
class UserCtxManager:
def __init__(self, x):
diff --git a/torch/_functorch/_aot_autograd/input_output_analysis.py b/torch/_functorch/_aot_autograd/input_output_analysis.py
index 90d440f067..6f9f8c3916 100644
--- a/torch/_functorch/_aot_autograd/input_output_analysis.py
+++ b/torch/_functorch/_aot_autograd/input_output_analysis.py
@@ -17,6 +17,7 @@ import torch.utils._pytree as pytree
from torch import Tensor
from torch._subclasses.functional_tensor import FunctionalTensor
from torch.fx.experimental.symbolic_shapes import is_concrete_int
+from .. import config
from .collect_metadata_analysis import coerce_tangent
from .schemas import (
BackwardSignature,
@@ -335,11 +336,60 @@ def _tensors_definitely_do_not_overlap(x, y):
def compute_overlapping_inputs(fwd_inputs, aliased_input_indices):
+ max_aliased_inps_w_dyn_shapes = (
+ config._max_aliased_inputs_with_dynamic_shapes_enabled
+ )
+ definitely_error_on_dyn_shapes = False
+ # If the JK is false / not set, we will fall back to obeying the config above
+ # If it is true, we will always error when there are aliased + mutated inps with dynamic shapes
+ if torch._inductor.config.is_fbcode():
+ definitely_error_on_dyn_shapes = torch._utils_internal.justknobs_check(
+ "pytorch/dynamo:disable_aliased_inputs_with_mutation_and_dyn_shapes"
+ )
+
actual_aliased_indices = set()
- for j in range(len(aliased_input_indices)):
+ num_aliases = len(aliased_input_indices)
+ # > 2 check because num_aliases==1 means no aliasing
+ if num_aliases >= 2 and (
+ definitely_error_on_dyn_shapes or num_aliases > max_aliased_inps_w_dyn_shapes
+ ):
+ dynamic_shape_indices = set()
+ for j in range(num_aliases):
+ j_ = aliased_input_indices[j]
+ curr_inp = fwd_inputs[j_]
+ if any(
+ isinstance(x, torch.SymInt)
+ for x in itertools.chain(
+ curr_inp.shape, curr_inp.stride(), [curr_inp.storage_offset()]
+ )
+ ):
+ dynamic_shape_indices.add(j_)
+ assert (
+ len(dynamic_shape_indices) == 0
+ ), f"""\
+Encountered a graph where:
+- {num_aliases} graph inputs all share the same storage (input indices: {str(aliased_input_indices)})
+- at least one of these aliased inputs was mutated
+- at least one of these inputs is being compiled with dynamic shapes (indices: {str(dynamic_shape_indices)})
+
+Current limit: {str(max_aliased_inps_w_dyn_shapes)}
+Killswitch enabled: {str(definitely_error_on_dyn_shapes)}
+
+The most common way to run into this situation is when your model parameters are allocated as one giant buffer
+and are all mutated by the optimizer, and some of your parameters end up getting compiled with dynamic shapes.
+
+You can avoid this problem by marking your parameters so they explicitly do not participate in dynamic shapes,
+by marking each dim of your parameter static:
+
+torch._dynamo.mark_static(param, 0) # (1, 2, ... for every dimension on the parameter).
+
+If you are running into this issue in a situation where your parameters are static but some other inputs
+are aliased and mutated, and they should be dynamic, please file an issue.
+"""
+ for j in range(num_aliases):
for i in range(j):
- i_ = aliased_input_indices[i]
j_ = aliased_input_indices[j]
+ i_ = aliased_input_indices[i]
if not _tensors_definitely_do_not_overlap(fwd_inputs[i_], fwd_inputs[j_]):
actual_aliased_indices.add(i_)
actual_aliased_indices.add(j_)
diff --git a/torch/_functorch/config.py b/torch/_functorch/config.py
index 1adabccd0a..c3f34fa273 100644
--- a/torch/_functorch/config.py
+++ b/torch/_functorch/config.py
@@ -26,6 +26,16 @@ debug_assert = False
debug_partitioner = os.environ.get("AOT_PARTITIONER_DEBUG", False)
+# Today, if you are in a situation where there is "false aliasing"
+# (e.g. you have a bunch of model parameters that all alias the same underlying buffer),
+# our checks for this situation are very slow if these inputs have dynamic shapes.
+# This config is set to ensure that there aren't too many aliased inputs in this situation,
+# so that we error loudly instead of compiling forever.
+# Eventually, we should make these checks faster.
+# For now, however, you can simply turn off dynamic shapes by marking your inputs static
+# when you run into this situation.
+_max_aliased_inputs_with_dynamic_shapes_enabled = 5
+
static_weight_shapes = True
# Applies CSE to the graph before partitioning | 2.41.0 |
120dbbf81f394bf7ecd0ea19da8729b2fcece65 | Fri, 12 Apr 2024 13:26:07 +0000 | [PATCH 0076/1000] Revert "[sparse] Add fast semi-structured spasification kernels (#122350)" | This reverts commit aaec97a40364bb6ccfd968f28d309cfff8748d20. Reverted https://github.com/pytorch/pytorch/pull/122350 on behalf of https://github.com/DanilBaibak due to Break internal build ([comment](https://github.com/pytorch/pytorch/pull/122350#issuecomment-2051757450)) | diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 04cd21b27d..1953abbd31 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3342,18 +3342,6 @@
dispatch:
CUDA: _cslt_sparse_mm_search
| - dispatch: |
cdde98df4dd85841b138e3a18a2bb9c065cf83b | Fri, 12 Apr 2024 06:52:50 +0000 | [PATCH 0077/1000] Intel GPU oneDNN upstreaming for library compilation (#117098) | # Motivation As proposed in https://github.com/pytorch/pytorch/issues/114848 and https://github.com/pytorch/pytorch/issues/114723, oneDNN library is an important component for Intel GPU software ecosystem. This PR is intended to enable oneDNN compilation for Intel GPU. It is the first step for we enabling any operators like `at::baddmm`. With this PR, a static library `libdnnl.a` for GPU would be compiled in directory `/build/xpumkldnn_proj-prefix`. It can be further linked to `libtorch_xpu.so` in future. The compilation would depend on `USE_XPU` bool variables and runtime check like SYCL, which is defined in https://github.com/pytorch/pytorch/pull/116019 for runtime support. Once the #116019 merged, the compilation should be able to be triggered. The modification is independent to oneDNN CPU compilation, hence no modification would be introduced for CPU Cmakefiles(e.g. FindMKLDNN.cmake) Co-authored-by: xiaolil1 <[email protected]> Co-authored-by: lei,zhenyuan <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117098 Approved by: https://github.com/EikanWang, https://github.com/jgong5, https://github.com/atalman | diff --git a/cmake/Modules/FindMKLDNN.cmake b/cmake/Modules/FindMKLDNN.cmake
index d9ca7dfd54..f6a19812c8 100644
--- a/cmake/Modules/FindMKLDNN.cmake
+++ b/cmake/Modules/FindMKLDNN.cmake
@@ -18,6 +18,49 @@ IF(NOT MKLDNN_FOUND)
SET(IDEEP_ROOT "${PROJECT_SOURCE_DIR}/third_party/ideep")
SET(MKLDNN_ROOT "${PROJECT_SOURCE_DIR}/third_party/ideep/mkl-dnn")
+
+ if(USE_XPU) # Build oneDNN GPU library
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ set(DNNL_HOST_COMPILER "g++")
+ # g++ is soft linked to /usr/bin/cxx, oneDNN would not treat it as an absolute path
+ else()
+ message(FATAL_ERROR "oneDNN library currently only supports GUN g++ compiler for XPU backend")
+ endif()
+
+ set(DNNL_MAKE_COMMAND "cmake" "--build" ".")
+ ExternalProject_Add(xpu_mkldnn_proj
+ SOURCE_DIR ${MKLDNN_ROOT}
+ PREFIX ${XPU_MKLDNN_DIR_PREFIX}
+ BUILD_IN_SOURCE 0
+ CMAKE_ARGS -DCMAKE_C_COMPILER=icx
+ -DCMAKE_CXX_COMPILER=icpx
+ -DCMAKE_CXX_COMPILER_ID=IntelLLVM
+ -DDNNL_GPU_RUNTIME=SYCL
+ -DDNNL_CPU_RUNTIME=THREADPOOL
+ -DDNNL_BUILD_TESTS=OFF
+ -DDNNL_BUILD_EXAMPLES=OFF
+ -DONEDNN_BUILD_GRAPH=OFF
+ -DDNNL_LIBRARY_TYPE=STATIC
+ -DDNNL_DPCPP_HOST_COMPILER=${DNNL_HOST_COMPILER} # Use global cxx compiler as host compiler
+ -G ${CMAKE_GENERATOR} # Align Generator to Torch
+ BUILD_COMMAND ${DNNL_MAKE_COMMAND}
+ BUILD_BYPRODUCTS "xpu_mkldnn_proj-prefix/src/xpu_mkldnn_proj-build/src/libdnnl.a"
+ INSTALL_COMMAND ""
+ )
+
+ ExternalProject_Get_Property(xpu_mkldnn_proj BINARY_DIR)
+ set(__XPU_MKLDNN_BUILD_DIR ${BINARY_DIR})
+ set(XPU_MKLDNN_LIBRARIES ${__XPU_MKLDNN_BUILD_DIR}/src/libdnnl.a)
+ set(XPU_MKLDNN_INCLUDE ${__XPU_MKLDNN_BUILD_DIR}/include)
+ # This target would be further linked to libtorch_xpu.so.
+ # The libtorch_xpu.so would contain Conv&GEMM operators that depend on
+ # oneDNN primitive implementations inside libdnnl.a.
+ add_library(xpu_mkldnn INTERFACE)
+ add_dependencies(xpu_mkldnn xpu_mkldnn_proj)
+ target_link_libraries(xpu_mkldnn INTERFACE ${__XPU_MKLDNN_BUILD_DIR}/src/libdnnl.a)
+ target_include_directories(xpu_mkldnn INTERFACE ${XPU_MKLDNN_INCLUDE})
+ endif()
+
IF(NOT APPLE AND NOT WIN32 AND NOT BUILD_LITE_INTERPRETER)
MESSAGE("-- Will build oneDNN Graph")
SET(LLGA_ROOT "${PROJECT_SOURCE_DIR}/third_party/ideep/mkl-dnn") | 2.41.0 |
1613a0803f7cde7956f039bc80f94253b0843f9 | Fri, 12 Apr 2024 14:28:19 +0000 | [PATCH 0080/1000] [Profiler][PrivateUse1] Profiler support PrivateUse1 key (#120556) | Summary: 1.Package public headers of kineto if USE_KINETO so that they can be used by PrivateUse1 user. 2.Add PrivateUse1 key to ActivityType. 3. Support PrivateUse1 key in function deviceTypeFromActivity and _supported_activities. 4. Fix some bugs when processing profiler results. Co-authored-by: albanD <[email protected]> Co-authored-by: Aaron Shi <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/120556 Approved by: https://github.com/aaronenyeshi | diff --git a/setup.py b/setup.py
index 06ab04b80f..ce8f16df77 100644
--- a/setup.py
+++ b/setup.py
@@ -1386,6 +1386,12 @@ def main():
"include/tensorpipe/transport/uv/*.h",
]
)
+ if get_cmake_cache_vars()["USE_KINETO"]:
+ torch_package_data.extend(
+ [
+ "include/kineto/*.h",
+ ]
+ )
torchgen_package_data = [
# Recursive glob doesn't work in setup.py,
# https://github.com/pypa/setuptools/issues/1806
diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py
index ba020fb3cb..35bf9f645d 100644
--- a/torch/autograd/profiler.py
+++ b/torch/autograd/profiler.py
@@ -270,7 +270,6 @@ class profile:
self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1_FALLBACK
else:
self.kineto_activities.add(ProfilerActivity.PrivateUse1)
- self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1
assert (
len(self.kineto_activities) > 0
@@ -317,6 +316,10 @@ class profile:
return
if self.use_cuda:
torch.cuda.synchronize()
+ elif self.use_device and hasattr(torch, self.use_device):
+ privateuse1_module = getattr(torch, self.use_device)
+ if hasattr(privateuse1_module, "synchronize"):
+ privateuse1_module.synchronize()
t0 = perf_counter_ns()
self.kineto_results = _disable_profiler()
@@ -542,7 +545,10 @@ class profile:
and fe.id in device_corr_map
):
for f_evt in device_corr_map[fe.id]:
- if f_evt.device_type == DeviceType.CUDA:
+ if (
+ f_evt.device_type == DeviceType.CUDA
+ or f_evt.device_type == DeviceType.PrivateUse1
+ ):
fe.append_kernel(
f_evt.name,
f_evt.device_index,
diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py
index 71322704d9..bbe40f032f 100644
--- a/torch/autograd/profiler_util.py
+++ b/torch/autograd/profiler_util.py
@@ -598,7 +598,7 @@ class FunctionEvent(FormattedTimesMixin):
[child.privateuse1_time_total for child in self.cpu_children]
)
else:
- assert self.device_type == DeviceType.CUDA
+ assert self.device_type == DeviceType.PrivateUse1
return self.privateuse1_time_total
@property
@@ -867,7 +867,10 @@ def _build_table(
event.self_privateuse1_memory_usage > 0 for event in events
)
use_device = events[0].use_device
- if not use_device and (has_privateuse1_mem or has_privateuse1_time):
+ # Running on PrivateUse1 device with profiler but not enable
+ # ProfilerActivity.PrivateUse1 can also catch privateuse1 memory usage.
+ # Here only need to check has_privateuse1_time if not use_device.
+ if not use_device and has_privateuse1_time:
raise RuntimeError(
"use_device is None, but there is private device performance data."
)
@@ -951,7 +954,7 @@ def _build_table(
"Self CUDA Mem",
]
)
- if has_privateuse1_mem:
+ if use_device and has_privateuse1_mem:
privateuse1 = use_device.upper()
headers.extend(
[
@@ -1132,7 +1135,7 @@ def _build_table(
_format_memory(evt.self_cuda_memory_usage),
]
)
- if has_privateuse1_mem:
+ if use_device and has_privateuse1_mem:
row_values.extend(
[
# PrivateUse1 Mem Total
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 2bea7c4cda..3dd4cc97da 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -332,6 +332,9 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) {
if (at::hasMTIA()) {
activities.insert(torch::profiler::impl::ActivityType::MTIA);
}
+ if (c10::get_privateuse1_backend() != "privateuseone") {
+ activities.insert(torch::profiler::impl::ActivityType::PrivateUse1);
+ }
#endif
return activities;
});
diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp
index e30aba2d84..0c73c8b7a7 100644
--- a/torch/csrc/autograd/profiler_kineto.cpp
+++ b/torch/csrc/autograd/profiler_kineto.cpp
@@ -555,7 +555,9 @@ void prepareProfiler(
config.state == ProfilerState::KINETO_PRIVATEUSE1_FALLBACK,
"Supported only in Kineto profiler");
torch::profiler::impl::kineto::prepareTrace(
- /*cpuOnly=*/!(at::hasCUDA() || at::hasXPU() || at::hasMTIA()),
+ /*cpuOnly=*/!(
+ at::hasCUDA() || at::hasXPU() || at::hasMTIA() ||
+ c10::get_privateuse1_backend() != "privateuseone"),
activities,
config.experimental_config);
diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp
index 85f91bf8b2..9e0386c4c7 100644
--- a/torch/csrc/profiler/kineto_shim.cpp
+++ b/torch/csrc/profiler/kineto_shim.cpp
@@ -25,6 +25,8 @@ const std::set<libkineto::ActivityType> kCpuTypes{
libkineto::ActivityType::CUDA_RUNTIME,
libkineto::ActivityType::CUDA_DRIVER,
libkineto::ActivityType::PYTHON_FUNCTION,
+ libkineto::ActivityType::PRIVATEUSE1_RUNTIME,
+ libkineto::ActivityType::PRIVATEUSE1_DRIVER,
};
const std::set<libkineto::ActivityType> kCudaTypes = {
@@ -47,6 +49,15 @@ const std::set<libkineto::ActivityType> kMtiaTypes = {
libkineto::ActivityType::MTIA_CCP_EVENTS,
libkineto::ActivityType::MTIA_RUNTIME,
};
+const std::set<libkineto::ActivityType> kPrivateUse1Types = {
+ libkineto::ActivityType::GPU_MEMCPY,
+ libkineto::ActivityType::GPU_MEMSET,
+ libkineto::ActivityType::GPU_USER_ANNOTATION,
+ libkineto::ActivityType::CONCURRENT_KERNEL,
+ // PRIVATEUSE1_RUNTIME appears in both kCpuTypes and kPrivateUse1Types.
+ libkineto::ActivityType::PRIVATEUSE1_RUNTIME,
+ libkineto::ActivityType::PRIVATEUSE1_DRIVER,
+};
} // namespace
#endif // USE_KINETO
@@ -248,6 +259,9 @@ void prepareTrace(
if (collectivesProfilerExists()) {
k_activities.insert(libkineto::ActivityType::COLLECTIVE_COMM);
}
+ if (activities.count(torch::autograd::profiler::ActivityType::PrivateUse1)) {
+ k_activities.insert(kPrivateUse1Types.begin(), kPrivateUse1Types.end());
+ }
ExperimentalConfigWrapper configWrap(config);
@@ -336,8 +350,18 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) {
case libkineto::ActivityType::GPU_USER_ANNOTATION:
case libkineto::ActivityType::CUDA_PROFILER_RANGE:
// TODO: T151322015
- case libkineto::ActivityType::MTIA_CCP_EVENTS:
- return c10::DeviceType::CUDA;
+ case libkineto::ActivityType::MTIA_CCP_EVENTS: {
+ // PrivateUse1 kineto backend reuse above ActivityTypes,
+ // If PrivateUse1 backend enabled, this should return
+ // c10::DeviceType::PrivateUse1.
+ c10::DeviceType device_type = []() {
+ if (c10::get_privateuse1_backend() != "privateuseone") {
+ return c10::DeviceType::PrivateUse1;
+ }
+ return c10::DeviceType::CUDA;
+ }();
+ return device_type;
+ }
case libkineto::ActivityType::CPU_OP:
case libkineto::ActivityType::USER_ANNOTATION:
case libkineto::ActivityType::EXTERNAL_CORRELATION:
@@ -347,6 +371,8 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) {
case libkineto::ActivityType::MTIA_RUNTIME:
case libkineto::ActivityType::PYTHON_FUNCTION:
case libkineto::ActivityType::CUDA_DRIVER:
+ case libkineto::ActivityType::PRIVATEUSE1_RUNTIME:
+ case libkineto::ActivityType::PRIVATEUSE1_DRIVER:
return c10::DeviceType::CPU;
default: {
TORCH_WARN(
diff --git a/torch/csrc/profiler/orchestration/observer.h b/torch/csrc/profiler/orchestration/observer.h
index da675e0f3d..4230851607 100644
--- a/torch/csrc/profiler/orchestration/observer.h
+++ b/torch/csrc/profiler/orchestration/observer.h
@@ -17,6 +17,7 @@ enum class C10_API_ENUM ActivityType {
XPU, // XPU kernels, runtime
CUDA, // CUDA kernels, runtime
MTIA, // MTIA kernels, runtime
+ PrivateUse1, // PrivateUse1 kernels, runtime
NUM_KINETO_ACTIVITIES, // must be the last one
};
diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp
index 49caf22184..dc1c4580a1 100644
--- a/torch/csrc/profiler/python/init.cpp
+++ b/torch/csrc/profiler/python/init.cpp
@@ -325,7 +325,8 @@ void initPythonBindings(PyObject* module) {
.value("CPU", ActivityType::CPU)
.value("XPU", ActivityType::XPU)
.value("MTIA", ActivityType::MTIA)
- .value("CUDA", ActivityType::CUDA);
+ .value("CUDA", ActivityType::CUDA)
+ .value("PrivateUse1", ActivityType::PrivateUse1);
py::class_<ExperimentalConfig>(m, "_ExperimentalConfig")
.def(
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index fc7a61bf45..dd44678126 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -143,7 +143,9 @@ class _KinetoProfile:
use_cuda=(ProfilerActivity.CUDA in self.activities),
use_cpu=(ProfilerActivity.CPU in self.activities),
use_mtia=(ProfilerActivity.MTIA in self.activities),
- use_device=None,
+ use_device=self.use_device
+ if (ProfilerActivity.PrivateUse1 in self.activities)
+ else None,
record_shapes=self.record_shapes,
with_flops=self.with_flops,
profile_memory=self.profile_memory, | 2.41.0 |
024c0c2ef8d50e319ba5a9542dd648a35f7ed41 | Fri, 12 Apr 2024 15:36:47 +0000 | [PATCH 0082/1000] Convert MKL symbols from global to local (#122284) | PyTorch is statically linked to MKL but MKL symbols are visible to global, which may cause symbol conflicts. Such error has been observed when a different version of MKL is dynamically linked to the other components: `libtorch_cpu.so` was invoked incorrectly when MKL descriptor was freed. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122284 Approved by: https://github.com/EikanWang, https://github.com/cyyever, https://github.com/ezyang | diff --git a/cmake/public/mkl.cmake b/cmake/public/mkl.cmake
index 68bf1b9dc9..2f6d1fd905 100644
--- a/cmake/public/mkl.cmake
+++ b/cmake/public/mkl.cmake
@@ -21,3 +21,20 @@ endforeach()
set_property(
TARGET caffe2::mkl PROPERTY INTERFACE_LINK_DIRECTORIES
${MKL_ROOT}/lib ${MKL_ROOT}/lib/intel64 ${MKL_ROOT}/lib/intel64_win ${MKL_ROOT}/lib/win-x64)
+
+if(UNIX)
+ if(USE_STATIC_MKL)
+ foreach(MKL_LIB_PATH IN LISTS MKL_LIBRARIES)
+ if(NOT EXISTS "${MKL_LIB_PATH}")
+ continue()
+ endif()
+
+ get_filename_component(MKL_LIB_NAME "${MKL_LIB_PATH}" NAME)
+
+ # Match archive libraries starting with "libmkl_"
+ if(MKL_LIB_NAME MATCHES "^libmkl_" AND MKL_LIB_NAME MATCHES ".a$")
+ target_link_options(caffe2::mkl INTERFACE "-Wl,--exclude-libs,${MKL_LIB_NAME}")
+ endif()
+ endforeach()
+ endif()
+endif() | 2.41.0 |
cb3301f807e2d58d719bbd976b68a13999f1c70 | Fri, 12 Apr 2024 15:38:41 +0000 | [PATCH 0083/1000] [ROCm] Add cast to kFloat in amax calculation (#123872) | necessary cast to kFloat missed in previous amax PR Pull Request resolved: https://github.com/pytorch/pytorch/pull/123872 Approved by: https://github.com/drisspg | diff --git a/aten/src/ATen/native/cuda/Blas.cpp b/aten/src/ATen/native/cuda/Blas.cpp
index 15a05985fd..be8aa363a9 100644
--- a/aten/src/ATen/native/cuda/Blas.cpp
+++ b/aten/src/ATen/native/cuda/Blas.cpp
@@ -912,7 +912,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
#if defined(USE_ROCM) && ROCM_VERSION >= 60000
// rocm's hipblaslt does not yet support amax, so calculate separately
- amax = at::max(at::abs(out));
+ amax = at::max(at::abs(out.to(kFloat)));
#endif
return {out, amax}; | 2.41.0 |
51582949bd45bbe892d936f58ff4cafa1fd6204 | Fri, 12 Apr 2024 15:44:56 +0000 | [PATCH 0084/1000] [export] Enforce final classes in serialization. (#123861) | Summary: as title, these are private API and not meant to be used across repos. Test Plan: CI Differential Revision: D56027954 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123861 Approved by: https://github.com/tugsbayasgalan | diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index 9a24862271..18acbe1cae 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -1,5 +1,6 @@
import base64
import copy
+import copyreg
import dataclasses
import heapq
import inspect
@@ -8,9 +9,8 @@ import json
import logging
import math
import operator
-import typing
-import copyreg
import re
+import typing
from contextlib import contextmanager
from dataclasses import dataclass, field
@@ -20,6 +20,7 @@ from typing import (
Callable,
cast,
Dict,
+ final,
Iterator,
List,
Optional,
@@ -55,9 +56,9 @@ from .schema import ( # type: ignore[attr-defined]
InputSpec,
InputToBufferSpec,
InputToCustomObjSpec,
+ InputTokenSpec,
InputToParameterSpec,
InputToTensorConstantSpec,
- InputTokenSpec,
Layout,
LossOutputSpec,
MemoryFormat,
@@ -379,7 +380,16 @@ class GraphState:
custom_obj_values: Dict[str, CustomObjArgument] = field(default_factory=dict)
-class GraphModuleSerializer:
+class Final(type):
+ def __new__(metacls, name, bases, classdict):
+ for b in bases:
+ if isinstance(b, Final):
+ raise TypeError(f"type '{b.__name__}' is not an acceptable base type")
+ return type.__new__(metacls, name, bases, dict(classdict))
+
+
+@final
+class GraphModuleSerializer(metaclass=Final):
def __init__(
self,
graph_signature: ep.ExportGraphSignature,
@@ -1230,7 +1240,8 @@ class GraphModuleSerializer:
)
-class ExportedProgramSerializer:
+@final
+class ExportedProgramSerializer(metaclass=Final):
def __init__(self, opset_version: Optional[Dict[str, int]] = None):
self.opset_version: Dict[str, int] = {}
if opset_version:
@@ -1285,7 +1296,8 @@ class ExportedProgramSerializer:
)
-class GraphModuleDeserializer:
+@final
+class GraphModuleDeserializer(metaclass=Final):
@dataclasses.dataclass
class Result:
graph_module: torch.fx.GraphModule
@@ -2055,7 +2067,8 @@ class GraphModuleDeserializer:
]
-class ExportedProgramDeserializer:
+@final
+class ExportedProgramDeserializer(metaclass=Final):
def __init__(self, expected_opset_version: Optional[Dict[str, int]] = None):
self.expected_opset_version: Dict[str, int] = {}
if expected_opset_version: | 2.41.0 |
35db051d08c8a358ce43cc05670ac552e3d40c3 | Fri, 12 Apr 2024 06:38:31 -0700 | [PATCH 0087/1000] get torch.distributed.breakpoint() to work under Python/Meta contexts (#118645) | I noticed that when I put a `torch.distributed.breakpoint()` in [here](https://github.com/pytorch/pytorch/blob/main/torch/_subclasses/meta_utils.py#L605), it would fail. This fixes it. In theory, it would probably be better to have a way to get the `barrier()` call to skip the dispatcher completely. I wasn't sure how to do that though, and this seems to cover 90% of issues. Pull Request resolved: https://github.com/pytorch/pytorch/pull/118645 Approved by: https://github.com/yifuwang | diff --git a/torch/distributed/__init__.py b/torch/distributed/__init__.py
index 5fb05a3477..206a6e6f1f 100644
--- a/torch/distributed/__init__.py
+++ b/torch/distributed/__init__.py
@@ -86,7 +86,16 @@ if is_available():
f"Type 'up' to get to the frame that called dist.breakpoint(rank={rank})\n"
)
pdb.set_trace()
- barrier()
+ # If Meta/Python keys are in the TLS, we want to make sure that we ignore them
+ # and hit the (default) CPU/CUDA implementation of barrier.
+ meta_in_tls = torch._C._meta_in_tls_dispatch_include()
+ guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
+ torch._C._set_meta_in_tls_dispatch_include(False)
+ try:
+ barrier()
+ finally:
+ torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)
+ del guard
if sys.platform != "win32":
from torch._C._distributed_c10d import ( | 2.41.0 |
4c887fbf6fcc9b1864d10b3ab18294e5edebdfc | Thu, 11 Apr 2024 21:30:18 -0300 | [PATCH 0088/1000] [AOTAutograd] Replay views on output using `FunctionalTensor` metas. (#121007) | Fix: #120336 This PR fixes an issue on AOTAutograd, specifically on backends that don't support views by themselves (e.g. XLA). Previously, AOTAutograd tried to reconstruct output views by calling `as_strided` on the concrete bases using sizes and strides of the outputs that aliased them. Since backends such as XLA doesn't support tensor aliasing, the sizes and strides would be that of a contiguous tensor (not a view tensor). Because of that, calling `as_strided` would error, since the output tensor would be bigger than its base. Instead, this PR applies the sequence of `ViewMeta` gathered for each output during the functionalization phase. **Note:** we intentionally don't support base tensors that went through metadata mutation, i.e. in-place view operations. In summary, this PR: - Introduces one `FunctionalTensorWrapper` member function alongside its Python APIs - `apply_view_metas(base)`: applies the `ViewMeta` sequence of the given instance onto another base - Introduces a `OutputAliasInfo.functional_tensor` field - Saves the `FunctionalTensorWrapper` instance collected by the functionalization phase - Wraps it with a new `FunctionalTensorMetadataEq` class for comparing only the metadata of the tensors - Plumbs `OutputAliasInfo.functional_tensor` to `gen_alias_from_base` function - Applies the `ViewMeta` sequence of the saved `FunctionalTensor` onto `aliased_base_tensor` - Propagates `OutputAliasInfo.functional_tensor` when updating `fw_metadata` (this PR description was updated in order to reflect the most recent changes) Pull Request resolved: https://github.com/pytorch/pytorch/pull/121007 Approved by: https://github.com/bdhirsh | diff --git a/aten/src/ATen/FunctionalTensorWrapper.cpp b/aten/src/ATen/FunctionalTensorWrapper.cpp
index db36ca2bbd..a7ba697d13 100644
--- a/aten/src/ATen/FunctionalTensorWrapper.cpp
+++ b/aten/src/ATen/FunctionalTensorWrapper.cpp
@@ -337,16 +337,26 @@ void FunctionalTensorWrapper::sync_() {
regenerate_from_base();
}
+Tensor FunctionalTensorWrapper::apply_view_metas(const Tensor& base) {
+ auto t = base;
+
+ // Reapply views to get the viewed tensor from the base in alias_
+ for (auto& view_meta: view_metas_) {
+ t = view_meta.forward_fn(t, view_meta.out_index);
+ }
+
+ return t;
+}
+
void FunctionalTensorWrapper::regenerate_from_base() {
at::AutoDispatchSkipFunctionalize guard;
auto storage_impl = functional_storage_impl();
auto t = storage_impl->base();
+
TORCH_INTERNAL_ASSERT(!at::functionalization::impl::isFunctionalTensor(t));
- // Reapply views to get the viewed tensor from the base in alias_
- for (auto& view_meta: view_metas_) {
- t = view_meta.forward_fn(t, view_meta.out_index);
- }
+ t = apply_view_metas(t);
TORCH_INTERNAL_ASSERT(!at::functionalization::impl::isFunctionalTensor(t));
+
replace_(t, /*from_lazy_regenerate=*/true);
generation_ = storage_impl->generation();
}
diff --git a/aten/src/ATen/FunctionalTensorWrapper.h b/aten/src/ATen/FunctionalTensorWrapper.h
index f1d9cca6e4..d323708053 100644
--- a/aten/src/ATen/FunctionalTensorWrapper.h
+++ b/aten/src/ATen/FunctionalTensorWrapper.h
@@ -97,6 +97,10 @@ struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl {
->are_all_mutations_under_no_grad_or_inference_mode();
}
+ // Runs the forward_fn of every ViewMeta collected in the current instance
+ // to some other base.
+ Tensor apply_view_metas(const Tensor& base);
+
// Sync's the underlying tensor with its alias, if it's out of date. This
// involves two steps: 1) Apply any pending updates/mutations to the alias 2)
// Replay the views (if any) to regenerate the current tensor off of the
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_input_view_meta_replay
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_intermediate_view_meta_replay
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_output_view_meta_replay b/test/dynamo_expected_failures/TestAOTAutograd.test_output_aliases_output_view_meta_replay
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py
index bb900b21de..3c9cf40cf4 100644
--- a/test/functorch/test_aotdispatch.py
+++ b/test/functorch/test_aotdispatch.py
@@ -2778,6 +2778,67 @@ def forward(self, tangents_1):
with self.assertRaisesRegex(Exception, "Can't call metadata"):
make_fx(m, tracing_mode="symbolic", _allow_non_fake_inputs=True)(inp)
+ def _compile_and_erase_bases(self, *output_view_indices):
+ # Overrides _base and _view_func tensor attributes, so as to avoid the view-replay
+ # execution path when reconstructing views.
+ class NoViewReplayTensor(torch.Tensor):
+ @property
+ def _base(self):
+ return None
+
+ @property
+ def _view_func(self):
+ return None
+
+ # Wraps the outputs that are views of the FX graph 'g' with NoViewReplayTensor,
+ # since they are the only ones that will get reconstructed.
+ def wrapper(g, *args, **kwargs):
+ outs = g(*args, **kwargs)
+ for i in output_view_indices:
+ outs[i] = NoViewReplayTensor(outs[i])
+ return outs
+
+ return lambda f: aot_function(f, fw_compiler=lambda g, _: partial(wrapper, g))
+
+ def test_output_aliases_input_view_meta_replay(self):
+ @self._compile_and_erase_bases(0)
+ def f(a):
+ return a.view(-1)
+
+ inp = torch.ones(2, 2, requires_grad=True)
+ out = f(inp)
+
+ self.assertIsNotNone(out.grad_fn)
+ self.assertExpectedInline(str(out.grad_fn.__class__), """<class 'ViewBackward0'>""")
+
+ def test_output_aliases_intermediate_view_meta_replay(self):
+ @self._compile_and_erase_bases(0, 1)
+ def f(a):
+ b = a.clone()
+ return b.view(-1), b.view(-1)
+
+ inp = torch.ones(2, 2, requires_grad=True)
+ out1, out2 = f(inp)
+
+ self.assertIsNotNone(out1.grad_fn)
+ self.assertExpectedInline(str(out1.grad_fn.__class__), """<class 'ViewBackward0'>""")
+
+ self.assertIsNotNone(out2.grad_fn)
+ self.assertExpectedInline(str(out2.grad_fn.__class__), """<class 'ViewBackward0'>""")
+
+ def test_output_aliases_output_view_meta_replay(self):
+ @self._compile_and_erase_bases(1)
+ def f(a):
+ b = a.add(10)
+ return b, b.view(-1)
+
+ inp = torch.ones(2, 2, requires_grad=True)
+ out1, out2 = f(inp)
+
+ self.assertEqual(out1.untyped_storage(), out2.untyped_storage())
+ self.assertIsNotNone(out2.grad_fn)
+ self.assertExpectedInline(str(out2.grad_fn.__class__), """<class 'ViewBackward0'>""")
+
def extract_graph(fx_g, _, graph_cell):
graph_cell[0] = fx_g
diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py
index 4c86040a8c..1d27b44a23 100644
--- a/test/inductor/test_torchinductor_opinfo.py
+++ b/test/inductor/test_torchinductor_opinfo.py
@@ -246,7 +246,6 @@ inductor_expected_failures_single_sample["cuda"] = {
# intentionally not handled
intentionally_not_handled = {
- ("as_strided", "partial_views"): {b8, f16, f32, f64, i32, i64},
"resize_": {b8, f16, f32, f64, i32, i64},
"resize_as_": {b8, f16, f32, f64, i32, i64},
}
diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py
index b879605823..bafccc5125 100644
--- a/tools/pyi/gen_pyi.py
+++ b/tools/pyi/gen_pyi.py
@@ -799,6 +799,15 @@ def gen_pyi(
"def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ..."
],
"_functionalize_sync": ["def _functionalize_sync(t: Tensor) -> None: ..."],
+ "_functionalize_was_storage_changed": [
+ "def _functionalize_was_storage_changed(tensor: Tensor) -> _bool: ..."
+ ],
+ "_functionalize_has_metadata_mutation": [
+ "def _functionalize_has_metadata_mutation(tensor: Tensor) -> _bool: ..."
+ ],
+ "_functionalize_apply_view_metas": [
+ "def _functionalize_apply_view_metas(tensor: Tensor, base: Tensor) -> Tensor: ..."
+ ],
"_enable_functionalization": [
"def _enable_functionalization(*, reapply_views: _bool = False): ..."
],
diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
index a2a6a53624..29c3ef0831 100644
--- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
+++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
@@ -33,6 +33,7 @@ from .functional_utils import (
to_fun,
)
from .schemas import (
+ FunctionalTensorMetadataEq,
InputAliasInfo,
MutationType,
OutputAliasInfo,
@@ -532,7 +533,6 @@ from a multi-output view call"
and len(outs_with_identical_metadata_that_require_grad) > 0
and not o.requires_grad
):
- assert len(outs_with_identical_metadata_that_require_grad) > 0
# In theory we could use any of these tensors to regenerate the aliased outputs from,
# since they all alias each other and have identical metatadata
out_alias = outs_with_identical_metadata_that_require_grad[0]
@@ -549,12 +549,55 @@ from a multi-output view call"
}
else:
dynamic_dims = None
+
+ # Save the current FunctionalTensor output.
+ #
+ # This will be used at runtime for reconstructing output views from
+ # their respective base tensors.
+ #
+ # The FunctionalTensor will be saved if one of the 2 conditions below
+ # is true:
+ functional_tensor = None
+ if (
+ # 1. If the output_type is either of:
+ # (i) alias_of_intermediate;
+ # (ii) alias_of_intermediate_save_as_output; or
+ # (iii) alias_of_intermediate_base_is_user_output.
+ #
+ # No need to worry about in-place view operations here, since
+ # this functionalization step elimitates mutations.
+ #
+ # i.e. we have access to the actual base tensor, before the
+ # in-place operation was applied.
+ output_type
+ in (
+ OutputType.alias_of_intermediate,
+ OutputType.alias_of_intermediate_save_as_output,
+ OutputType.alias_of_intermediate_base_is_user_output,
+ )
+ ) or (
+ # 2. If the output_type is alias_of_input, and no in-place view
+ # operationthe was run on the input (base tensor).
+ #
+ # In this case, we need to check for metadata mutation because
+ # the runtime explicitly reconstructs the inputs, before actually
+ # reconstructing the outputs. Due to in-place view operations, the
+ # fully reconstructed input may not be this output base tensor
+ # anymore.
+ output_type == OutputType.alias_of_input
+ and base_idx is not None
+ and not input_info[base_idx].mutates_metadata
+ ):
+ if isinstance(o, FunctionalTensor):
+ functional_tensor = FunctionalTensorMetadataEq(o.elem)
+
out_info = OutputAliasInfo(
output_type=output_type,
raw_type=type(o),
base_idx=base_idx,
dynamic_dims=dynamic_dims,
requires_grad=isinstance(o, torch.Tensor) and o.requires_grad,
+ functional_tensor=functional_tensor,
)
output_info.append(out_info)
diff --git a/torch/_functorch/_aot_autograd/functional_utils.py b/torch/_functorch/_aot_autograd/functional_utils.py
index fc7cfe1a0e..b863f40efa 100644
--- a/torch/_functorch/_aot_autograd/functional_utils.py
+++ b/torch/_functorch/_aot_autograd/functional_utils.py
@@ -6,8 +6,10 @@ This file contains utilities related to functionalization in AOTAutograd:
4. checking if a graph is functional i.e. whether it contains any mutation ops
"""
+
import torch
from torch import Tensor
+from torch._logging import getArtifactLogger
from torch._subclasses.fake_tensor import FakeTensor
from torch._subclasses.functional_tensor import FunctionalTensor
from torch.fx.experimental.symbolic_shapes import definitely_true, sym_eq
@@ -17,6 +19,8 @@ from torch.utils._python_dispatch import (
transform_subclass,
)
+aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
+
def to_fun(t):
if isinstance(t, Tensor):
@@ -191,7 +195,65 @@ def has_metadata_mutation(f_arg, arg, *, check_only_storage_mutation: bool):
return has_metadata_mutation_
-def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires_grad):
+def gen_alias_from_base(
+ aliased_base_tensor,
+ target_meta_tensor,
+ target_requires_grad,
+ # Actual type: Optional[FunctionalTensorMetadataEq]
+ # Can't use it here because it lives inside schemas.py. Importing that class would lead
+ # to an error due to an import cycle.
+ target_functional_tensor=None,
+):
+ # Patch the correct requires_grad field of the output tensor, depending on whether:
+ # (i) the reconstructed output (out) was came from a tensor that requires grad or not;
+ # and (ii) the concrete returned output does require grad or not.
+ def patch_requires_grad(out):
+ if aliased_base_tensor.requires_grad and not target_requires_grad:
+ out = out.detach()
+ elif not aliased_base_tensor.requires_grad and target_requires_grad:
+ out.requires_grad_(True)
+ return out
+
+ # If provided, use the target functional tensor for replaying the views.
+ #
+ # In summary, we use the fact that FunctionalTensorWrapper saves the view
+ # functions applied to itself (collected during functionalization) so as
+ # to replay them (view functions) on the aliased_base_tensor.
+ if target_functional_tensor is not None:
+ from .schemas import FunctionalTensorMetadataEq
+
+ assert isinstance(target_functional_tensor, FunctionalTensorMetadataEq)
+ functional_tensor = target_functional_tensor.tensor
+
+ try:
+ out = torch._functionalize_apply_view_metas(
+ functional_tensor, aliased_base_tensor
+ )
+ except RuntimeError as e:
+ # NYI for dynamic shapes.
+ #
+ # On functionalization, the ViewMeta lambdas will have symbolic shapes.
+ # When trying to apply those lambdas on concrete tensors, it will fail.
+ #
+ # In order for this to work, we should have a way to replace those
+ # symbolic shapes with concrete numbers.
+ aot_joint_log.warning(
+ "could not reconstruct view by re-applying a ViewMeta sequence. "
+ "This error is possibly caused by dynamic shapes. "
+ "Fallbacking to reconstruction using as_strided. "
+ "Error message: %s",
+ str(e),
+ )
+ else:
+ # If re-applying the ViewMeta sequence succeeded, there should be no more
+ # problems going forward. We just check we got to the target shape and
+ # patch requires_grad flag.
+ assert out.shape == target_meta_tensor.shape, (
+ "incorrect out shape after application of ViewMeta sequence: "
+ f"{tuple(out.shape)} (actual) vs {tuple(target_meta_tensor.shape)} (expected)"
+ )
+ return patch_requires_grad(out)
+
# Try to do view-replay if possible.
# fall back to .as_strided() if we can't.
if target_meta_tensor._base is not None:
@@ -218,11 +280,8 @@ def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires
#
# As a stopgap, we'll fall back to as_strided.
if out is not None and out.shape == target_meta_tensor.shape:
- if aliased_base_tensor.requires_grad and not target_requires_grad:
- out = out.detach()
- elif not aliased_base_tensor.requires_grad and target_requires_grad:
- out.requires_grad_(True)
- return out
+ return patch_requires_grad(out)
+
size = target_meta_tensor.size()
stride = target_meta_tensor.stride()
storage_offset = target_meta_tensor.storage_offset()
@@ -237,10 +296,7 @@ def gen_alias_from_base(aliased_base_tensor, target_meta_tensor, target_requires
else:
aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset)
# For outputs aliasing inputs, we need to check if the requires-gradness has changed.
- if aliased_base_tensor.requires_grad and not target_requires_grad:
- aliased_out = aliased_out.detach()
- elif not aliased_base_tensor.requires_grad and target_requires_grad:
- aliased_out.requires_grad_(True)
+ aliased_out = patch_requires_grad(aliased_out)
# For outputs aliasing inputs, we need to check if the dtype has changed.
# as_strided() is the "most generic" view, but it does not cover cross-dtype views
if aliased_out.dtype != target_meta_tensor.dtype:
diff --git a/torch/_functorch/_aot_autograd/input_output_analysis.py b/torch/_functorch/_aot_autograd/input_output_analysis.py
index 6f9f8c3916..bac6e3a3f2 100644
--- a/torch/_functorch/_aot_autograd/input_output_analysis.py
+++ b/torch/_functorch/_aot_autograd/input_output_analysis.py
@@ -64,6 +64,7 @@ def remove_dupe_metadata(
dynamic_dims=o.dynamic_dims,
base_idx=None if o.base_idx is None else add_dupe_map[o.base_idx],
requires_grad=o.requires_grad,
+ functional_tensor=o.functional_tensor,
)
for o in m.output_info
],
@@ -222,6 +223,7 @@ def create_synthetic_base_metadata(
# Map the input idx pre-synthetic-bases to the new idx post-synthetic-bases
base_idx=new_base_idx, # type: ignore[arg-type]
requires_grad=o.requires_grad,
+ functional_tensor=o.functional_tensor,
)
)
diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py
index eaecf438ec..2ac3158873 100644
--- a/torch/_functorch/_aot_autograd/runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py
@@ -260,7 +260,7 @@ def create_runtime_wrapper(
if info.output_type == OutputType.alias_of_input:
aliased_base_tensor = orig_inputs[info.base_idx + num_tokens] # type: ignore[index]
regenerated_out = gen_alias_from_base(
- aliased_base_tensor, o_, o_grad
+ aliased_base_tensor, o_, o_grad, info.functional_tensor
)
fw_outs_including_aliases.append(regenerated_out)
continue
@@ -285,7 +285,9 @@ def create_runtime_wrapper(
# TODO: handle the custom autograd function case here.
# We need a way to check whether a tensor came from a custom autograd fn from python,
# AND a way to replay that custom view fn.
- regenerated_out = gen_alias_from_base(aliased_base_tensor, o_, o_grad)
+ regenerated_out = gen_alias_from_base(
+ aliased_base_tensor, o_, o_grad, info.functional_tensor
+ )
fw_outs_including_aliases.append(regenerated_out)
ret_outs = fw_outs_including_aliases
else:
diff --git a/torch/_functorch/_aot_autograd/schemas.py b/torch/_functorch/_aot_autograd/schemas.py
index 6ce589f255..e0e8b88398 100644
--- a/torch/_functorch/_aot_autograd/schemas.py
+++ b/torch/_functorch/_aot_autograd/schemas.py
@@ -17,7 +17,7 @@ from torch._subclasses.fake_tensor import is_fake
from .. import config
-from .functional_utils import _check_if_mutation_can_be_in_graph
+from .functional_utils import _check_if_mutation_can_be_in_graph, has_same_metadata
from .utils import strict_zip
zip = strict_zip
@@ -54,6 +54,27 @@ OutputType = Enum(
)
+# Wrapper around a FunctionalTensorWrapper for comparing only the resulting metadata
+# after applying all the ViewMeta operations.
+class FunctionalTensorMetadataEq:
+ def __init__(self, tensor: torch.Tensor) -> None:
+ assert torch._is_functional_tensor(tensor)
+ self.tensor = tensor
+
+ def __eq__(self, other: object) -> bool:
+ # If other is None, then it probably means that we weren't able to recreate
+ # the FunctionalTensorMetadataEq. One of this cases is when we update the
+ # view metadata by calling: create_synthetic_base_metadata.
+ if other is None:
+ return True
+
+ # Comparison agains any other type is not implemented.
+ if not isinstance(other, FunctionalTensorMetadataEq):
+ return NotImplemented
+
+ return has_same_metadata(self.tensor, other.tensor)
+
+
# This class stores info about every user output.
@dataclass(frozen=True)
class OutputAliasInfo:
@@ -84,6 +105,15 @@ class OutputAliasInfo:
dynamic_dims: Optional[Set[int]]
# requires_grad
requires_grad: bool
+ # FunctionalTensorWrapper that represents this output.
+ #
+ # Provides us the means to replay views from it.
+ #
+ # We need to wrap the actual FunctionalTensorWrapper with this class so that
+ # we only compare the tensor's metadata. That's because with the transformations
+ # of the model throughout AOTAutograd, the sequence of ViewMeta and the base
+ # tensor might change.
+ functional_tensor: Optional[FunctionalTensorMetadataEq] = None
class MutationType(Enum):
diff --git a/torch/csrc/autograd/python_torch_functions_manual.cpp b/torch/csrc/autograd/python_torch_functions_manual.cpp
index 825cabe45f..1e94b0cf34 100644
--- a/torch/csrc/autograd/python_torch_functions_manual.cpp
+++ b/torch/csrc/autograd/python_torch_functions_manual.cpp
@@ -664,6 +664,25 @@ static PyObject* THPVariable__functionalize_sync(
END_HANDLE_TH_ERRORS
}
+static PyObject* THPVariable__functionalize_apply_view_metas(
+ PyObject* self,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ static PythonArgParser parser(
+ {"_functionalize_apply_view_metas(Tensor tensor, Tensor base)"},
+ /*traceable=*/true);
+
+ ParsedArgs<4> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ auto tensor = r.tensor(0);
+ TORCH_INTERNAL_ASSERT(
+ at::functionalization::impl::isFunctionalTensor(tensor));
+ auto impl = at::functionalization::impl::unsafeGetFunctionalWrapper(tensor);
+ return wrap(impl->apply_view_metas(r.tensor(1)));
+ END_HANDLE_TH_ERRORS
+}
+
static PyObject* THPVariable__functionalize_mark_mutation_hidden_from_autograd(
PyObject* self,
PyObject* args,
@@ -777,6 +796,10 @@ static PyMethodDef torch_functions_manual[] = {
castPyCFunctionWithKeywords(THPVariable__functionalize_sync),
METH_VARARGS | METH_KEYWORDS | METH_STATIC,
nullptr},
+ {"_functionalize_apply_view_metas",
+ castPyCFunctionWithKeywords(THPVariable__functionalize_apply_view_metas),
+ METH_VARARGS | METH_KEYWORDS | METH_STATIC,
+ nullptr},
{"_enable_functionalization",
castPyCFunctionWithKeywords(THPVariable__enable_functionalization),
METH_VARARGS | METH_KEYWORDS | METH_STATIC, | 2.41.0 |
c0a380bdf756414bb8dffd99256d3e7c360ad3e | Thu, 11 Apr 2024 14:47:40 -0700 | [PATCH 0089/1000] [pt2e][qat] Support conv-transpose-bn[-relu] QAT fusion (#123652) | Summary: This commit adds support for QAT fusion for the [conv-transpose-bn] and [conv-transpose-bn-relu] patterns. Test Plan: python test/test_quantization.py TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn python test/test_quantization.py TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn_relu python test/test_quantization.py TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn python test/test_quantization.py TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn_relu Reviewers: jerryzh168 Subscribers: jerryzh168, supriyar Tasks: https://github.com/pytorch/pytorch/issues/122224 Differential Revision: [D55930704](https://our.internmc.facebook.com/intern/diff/D55930704) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123652 Approved by: https://github.com/jerryzh168 | diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_transpose_bn_relu
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_transpose_bn_relu
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/quantization/pt2e/test_quantize_pt2e_qat.py b/test/quantization/pt2e/test_quantize_pt2e_qat.py
index b635c5986d..e016ba4e49 100644
--- a/test/quantization/pt2e/test_quantize_pt2e_qat.py
+++ b/test/quantization/pt2e/test_quantize_pt2e_qat.py
@@ -61,8 +61,12 @@ class PT2EQATTestCase(QuantizationTestCase):
**conv_kwargs,
):
super().__init__()
- self.conv = conv_class(3, 3, 3, bias=has_conv_bias, **conv_kwargs)
- self.bn = bn_class(3) if has_bn else None
+ conv_kwargs.setdefault("in_channels", 3)
+ conv_kwargs.setdefault("out_channels", 3)
+ conv_kwargs.setdefault("kernel_size", 3)
+ conv_kwargs.setdefault("bias", has_conv_bias)
+ self.conv = conv_class(**conv_kwargs)
+ self.bn = bn_class(conv_kwargs["out_channels"]) if has_bn else None
self.relu = torch.nn.ReLU() if has_relu else None
def forward(self, x):
@@ -78,6 +82,7 @@ class PT2EQATTestCase(QuantizationTestCase):
has_conv_bias: bool = True,
has_bn: bool = True,
has_relu: bool = False,
+ transpose: bool = False,
**conv_kwargs,
):
"""
@@ -86,7 +91,7 @@ class PT2EQATTestCase(QuantizationTestCase):
conv-bn model with conv bias.
"""
return self._BaseConvBnModel(
- self.conv_class,
+ self.conv_transpose_class if transpose else self.conv_class,
self.bn_class,
has_conv_bias,
has_bn,
@@ -179,6 +184,8 @@ class PT2EQATTestCase(QuantizationTestCase):
has_bias: bool = True,
is_cuda: bool = False,
expected_conv_literal_args: Optional[Tuple[Any, ...]] = None,
+ # TODO: set this to true by default
+ verify_convert: bool = False,
):
self._verify_symmetric_xnnpack_qat_graph_helper(
m,
@@ -188,6 +195,7 @@ class PT2EQATTestCase(QuantizationTestCase):
has_bias=has_bias,
is_cuda=is_cuda,
expected_conv_literal_args=expected_conv_literal_args,
+ verify_convert=verify_convert,
)
self._verify_symmetric_xnnpack_qat_graph_helper(
m,
@@ -197,6 +205,7 @@ class PT2EQATTestCase(QuantizationTestCase):
has_bias=has_bias,
is_cuda=is_cuda,
expected_conv_literal_args=expected_conv_literal_args,
+ verify_convert=verify_convert,
)
def _verify_symmetric_xnnpack_qat_graph_helper(
@@ -208,6 +217,7 @@ class PT2EQATTestCase(QuantizationTestCase):
has_bias: bool = True,
is_cuda: bool = False,
expected_conv_literal_args: Optional[Tuple[Any, ...]] = None,
+ verify_convert: bool = False,
):
"""
Verify that the graph module matches the fused QAT [conv - bn (- relu)] pattern
@@ -267,6 +277,7 @@ class PT2EQATTestCase(QuantizationTestCase):
else:
div_scale_factor_node = bn_node.args[0]
(conv_node, scale_factor_reshape_node) = div_scale_factor_node.args
+ conv_op = conv_node.target
self.assertEqual(div_scale_factor_node.target, torch.ops.aten.div.Tensor)
self.assertTrue(_is_conv_node(conv_node))
self.assertEqual(
@@ -347,12 +358,72 @@ class PT2EQATTestCase(QuantizationTestCase):
self.assertTrue("bn_running_var" in bn_running_var_node.target)
self.assertEqual(eps, 1e-5)
+ # Optionally check the converted graph
+ if verify_convert:
+ m = convert_pt2e(m)
+ m(*example_inputs)
+
+ if is_per_channel:
+ conv_weight_dq_op = (
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ )
+ node_occurrence = {
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ ): 1,
+ }
+ else:
+ conv_weight_dq_op = (
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ )
+ node_occurrence = {
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 3,
+ }
+ node_list = [
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ns.call_function(conv_weight_dq_op),
+ ns.call_function(conv_op),
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ]
+
+ self.checkGraphModuleNodes(
+ m,
+ expected_node_list=node_list,
+ expected_node_occurrence=node_occurrence,
+ )
+
class TestQuantizePT2EQAT_ConvBn_Base(PT2EQATTestCase):
"""
Base TestCase to be used for all conv-bn[-relu] fusion patterns.
"""
+ # TODO: how can we avoid adding every new test to dynamo/expected_test_failures?
+ # Otherwise it fails with the following error:
+ # torch._dynamo.exc.InternalTorchDynamoError:
+ # 'QuantizationConfig' object has no attribute '__bool__'
+
def setUp(self):
# NB: Skip the test if this is a base class, this is to handle the test
# discovery logic in buck which finds and runs all tests here including
@@ -761,13 +832,36 @@ class TestQuantizePT2EQAT_ConvBn_Base(PT2EQATTestCase):
self.assertEqual(dq_qmax, 2**31 - 1)
self.assertEqual(dq_dtype, torch.int32)
+ def _do_test_qat_conv_transpose_bn(self, has_relu: bool):
+ # Use different in/out channel sizes to test if conv weight is
+ # properly transposed in QAT pattern
+ m = self._get_conv_bn_model(
+ has_relu=has_relu,
+ transpose=True,
+ in_channels=3,
+ out_channels=5,
+ kernel_size=3,
+ )
+ self._verify_symmetric_xnnpack_qat_graph(
+ m,
+ self.example_inputs,
+ has_relu=has_relu,
+ verify_convert=True,
+ )
+
+ def test_qat_conv_transpose_bn(self):
+ self._do_test_qat_conv_transpose_bn(has_relu=False)
+
+ def test_qat_conv_transpose_bn_relu(self):
+ self._do_test_qat_conv_transpose_bn(has_relu=True)
+
-# TODO: enable this in the next PR
@skipIfNoQNNPACK
class TestQuantizePT2EQAT_ConvBn1d(TestQuantizePT2EQAT_ConvBn_Base):
dim = 1
example_inputs = (torch.randn(1, 3, 5),)
conv_class = torch.nn.Conv1d
+ conv_transpose_class = torch.nn.ConvTranspose1d
bn_class = torch.nn.BatchNorm1d
@@ -776,6 +870,7 @@ class TestQuantizePT2EQAT_ConvBn2d(TestQuantizePT2EQAT_ConvBn_Base):
dim = 2
example_inputs = (torch.randn(1, 3, 5, 5),)
conv_class = torch.nn.Conv2d
+ conv_transpose_class = torch.nn.ConvTranspose2d
bn_class = torch.nn.BatchNorm2d
@@ -783,6 +878,10 @@ def _is_conv_node(n: torch.fx.Node):
return n.op == "call_function" and n.target in [
torch.ops.aten.conv1d.default,
torch.ops.aten.conv2d.default,
+ torch.ops.aten.conv_transpose1d,
+ torch.ops.aten.conv_transpose1d.default,
+ torch.ops.aten.conv_transpose2d,
+ torch.ops.aten.conv_transpose2d.input,
]
diff --git a/torch/ao/quantization/pt2e/qat_utils.py b/torch/ao/quantization/pt2e/qat_utils.py
index 644ec07fee..1434a75df6 100644
--- a/torch/ao/quantization/pt2e/qat_utils.py
+++ b/torch/ao/quantization/pt2e/qat_utils.py
@@ -21,8 +21,9 @@ from torch.ao.quantization.quantizer import (
from .utils import (
_conv1d_bn_example_inputs,
_conv2d_bn_example_inputs,
- _is_conv_node,
_is_bn_node,
+ _is_conv_or_conv_transpose_node,
+ _is_conv_transpose_fn,
fold_bn_weights_into_conv_node,
_get_aten_graph_module_for_pattern,
)
@@ -113,7 +114,8 @@ def _get_qat_conv_bn_pattern(conv_fn: Callable) -> Callable:
running_std = torch.sqrt(bn_running_var + bn_eps)
scale_factor = bn_weight / running_std
weight_shape = [1] * len(conv_weight.shape)
- weight_shape[0] = -1
+ weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0
+ weight_shape[weight_in_channel_axis] = -1
bias_shape = [1] * len(conv_weight.shape)
bias_shape[1] = -1
scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
@@ -144,7 +146,8 @@ def _get_qat_conv_bn_pattern_no_conv_bias(conv_fn: Callable) -> Callable:
running_std = torch.sqrt(bn_running_var + bn_eps)
scale_factor = bn_weight / running_std
weight_shape = [1] * len(conv_weight.shape)
- weight_shape[0] = -1
+ weight_in_channel_axis = 1 if _is_conv_transpose_fn(conv_fn) else 0
+ weight_shape[weight_in_channel_axis] = -1
bias_shape = [1] * len(conv_weight.shape)
bias_shape[1] = -1
scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
@@ -271,7 +274,7 @@ def _has_conv_bias_filter(
the original graph has bias.
"""
for n in match.nodes_map.values():
- if _is_conv_node(n):
+ if _is_conv_or_conv_transpose_node(n):
return len(n.args) > 2 and n.args[2] is not None
raise ValueError("Could not find conv node in matched conv + bn pattern")
@@ -325,7 +328,7 @@ def _get_conv_bn_pattern_nodes(r: ReplacedPatterns) -> Dict[str, Tuple[Node, Nod
for n in nodes:
if n.op != "call_function":
continue
- if _is_conv_node(n):
+ if _is_conv_or_conv_transpose_node(n):
assert conv_node is None
conv_node = n
if _is_bn_node(n):
@@ -440,8 +443,8 @@ def _copy_over_literal_conv_args(original_node: Node, new_node: Node):
Note: Unlike other tensor args like conv weights and biases, literal args are
preserved in the original nodes after replacement, so we can access them here.
"""
- assert _is_conv_node(original_node)
- assert _is_conv_node(new_node)
+ assert _is_conv_or_conv_transpose_node(original_node)
+ assert _is_conv_or_conv_transpose_node(new_node)
# x, weight, bias, [stride, padding, dilation, transposed, output_padding, groups]
new_args = list(new_node.args)
if len(new_args) < 3:
@@ -457,8 +460,8 @@ def _update_conv_input_qspec_map_after_replacement(original_node: Node, replacem
so the keys in the `input_qspec_map` will need to be updated to reflect
the corresponding nodes in the replacement graph.
"""
- assert _is_conv_node(original_node)
- assert _is_conv_node(replacement_node)
+ assert _is_conv_or_conv_transpose_node(original_node)
+ assert _is_conv_or_conv_transpose_node(replacement_node)
if "quantization_annotation" not in original_node.meta:
return
original_input_qspec_map = original_node.meta["quantization_annotation"].input_qspec_map
@@ -522,11 +525,12 @@ def _fuse_conv_bn_qat(m: GraphModule) -> GraphModule:
has_bn = any(_is_bn_node(n) for n in m.graph.nodes)
if not has_bn:
return m
- m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=False)
- m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=False)
- if torch.cuda.is_available():
- m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=True)
- m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=True)
+ is_cuda_options = [True, False] if torch.cuda.is_available() else [False]
+ for is_cuda in is_cuda_options:
+ m = _fuse_conv_bn_qat_helper(m, F.conv1d, _conv1d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fuse_conv_bn_qat_helper(m, F.conv2d, _conv2d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fuse_conv_bn_qat_helper(m, F.conv_transpose1d, _conv1d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fuse_conv_bn_qat_helper(m, F.conv_transpose2d, _conv2d_bn_example_inputs, is_cuda=is_cuda)
return m
def _fuse_conv_bn_qat_helper(
@@ -609,7 +613,7 @@ def _fuse_conv_bn_qat_helper(
for original_node, replacement_node in _get_conv_bn_pattern_nodes(r).values():
# Step (3a): Copy over metadata for all nodes in [conv - bn - getitem]
replacement_node.meta = original_node.meta
- if _is_conv_node(original_node):
+ if _is_conv_or_conv_transpose_node(original_node):
# Step (3b): Copy over conv literal args
_copy_over_literal_conv_args(original_node, replacement_node)
# Step (3c): Update old references in the conv node's input_qspec_map
@@ -701,11 +705,12 @@ def _fold_conv_bn_qat(m: GraphModule) -> GraphModule:
has_bn = any(_is_bn_node(n) for n in m.graph.nodes)
if not has_bn:
return m
- m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=False)
- m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=False)
- if torch.cuda.is_available():
- m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=True)
- m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=True)
+ is_cuda_options = [True, False] if torch.cuda.is_available() else [False]
+ for is_cuda in is_cuda_options:
+ m = _fold_conv_bn_qat_helper(m, F.conv1d, _quantized_conv1d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fold_conv_bn_qat_helper(m, F.conv2d, _quantized_conv2d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fold_conv_bn_qat_helper(m, F.conv_transpose1d, _quantized_conv1d_bn_example_inputs, is_cuda=is_cuda)
+ m = _fold_conv_bn_qat_helper(m, F.conv_transpose2d, _quantized_conv2d_bn_example_inputs, is_cuda=is_cuda)
return m
def _fold_conv_bn_qat_helper(
@@ -780,7 +785,7 @@ def _fold_conv_bn_qat_helper(
# Copy over literal args for conv
for original_node in _filter_nodes_map(r.nodes_map).values():
- if _is_conv_node(original_node):
+ if _is_conv_or_conv_transpose_node(original_node):
_copy_over_literal_conv_args(original_node, conv_node)
m.graph.eliminate_dead_code()
diff --git a/torch/ao/quantization/pt2e/utils.py b/torch/ao/quantization/pt2e/utils.py
index 0404f377a1..051f02de2d 100644
--- a/torch/ao/quantization/pt2e/utils.py
+++ b/torch/ao/quantization/pt2e/utils.py
@@ -7,6 +7,7 @@ from torch.fx import (
GraphModule,
Node,
)
+import torch.nn.functional as F
from torch.nn.utils.fusion import fuse_conv_bn_weights
from typing import Any, Callable, Dict, Optional, Tuple, List, Union
from torch.utils._pytree import LeafSpec
@@ -169,7 +170,7 @@ def _is_supported_batch_norm_for_training(node: Node):
# TODO: move this to torch/ao/quantization/utils.py
def _is_conv_node(n: Node):
"""
- Return whether the node refers to an aten conv or conv transpose op.
+ Return whether the node refers to an aten conv op.
"""
return n.op == "call_function" and n.target in [
torch.ops.aten.conv1d.default,
@@ -182,10 +183,20 @@ def _is_conv_transpose_node(n: Node):
"""
return n.op == "call_function" and n.target in [
torch.ops.aten.conv_transpose1d,
+ torch.ops.aten.conv_transpose1d.default,
torch.ops.aten.conv_transpose2d,
torch.ops.aten.conv_transpose2d.input,
]
+def _is_conv_or_conv_transpose_node(n: Node):
+ """
+ Return whether the node refers to an aten conv or conv transpose op.
+ """
+ return _is_conv_node(n) or _is_conv_transpose_node(n)
+
+def _is_conv_transpose_fn(conv_fn: Callable):
+ return conv_fn in [F.conv_transpose1d, F.conv_transpose2d]
+
def _is_bn_node(n: Node):
return _is_supported_batch_norm_for_training(n) or n.target == torch.ops.aten._native_batch_norm_legit_no_training.default
@@ -270,7 +281,7 @@ def _fuse_conv_bn_(m: GraphModule) -> None:
continue
bn_node = n
n = bn_node.args[0]
- if not (_is_conv_node(n) or _is_conv_transpose_node(n)):
+ if not _is_conv_or_conv_transpose_node(n):
continue
conv_node = n
conv_weight_node = conv_node.args[1]
diff --git a/torch/ao/quantization/quantizer/xnnpack_quantizer.py b/torch/ao/quantization/quantizer/xnnpack_quantizer.py
index 5caa27ac9e..1f7dad387f 100644
--- a/torch/ao/quantization/quantizer/xnnpack_quantizer.py
+++ b/torch/ao/quantization/quantizer/xnnpack_quantizer.py
@@ -267,6 +267,8 @@ class XNNPACKQuantizer(Quantizer):
STATIC_QAT_ONLY_OPS = [
"conv_bn_relu",
"conv_bn",
+ "conv_transpose_bn_relu",
+ "conv_transpose_bn",
]
# static quantization ops (both PTQ and QAT)
@@ -276,6 +278,7 @@ class XNNPACKQuantizer(Quantizer):
"linear",
"conv_relu",
"conv",
+ "conv_transpose_relu",
"adaptive_avg_pool2d",
# TODO: move this to BoltNNQuantizer?
"gru_io_only",
diff --git a/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py b/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py
index f699a7af32..2db136a156 100644
--- a/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py
+++ b/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py
@@ -13,6 +13,8 @@ from torch.ao.quantization.pt2e.utils import (
_conv1d_bn_example_inputs,
_conv2d_bn_example_inputs,
_get_aten_graph_module_for_pattern,
+ _is_conv_node,
+ _is_conv_transpose_node,
)
from torch.ao.quantization.quantizer import (
QuantizationAnnotation,
@@ -344,22 +346,9 @@ def _do_annotate_conv_relu(
continue
relu_node = n
maybe_conv_node = n.args[0]
- # TODO: refactor with is_conv_node and is_conv_transpose_node
- if is_conv_transpose:
- conv_ops = [
- torch.ops.aten.conv_transpose1d,
- torch.ops.aten.conv_transpose2d.input,
- ]
- else:
- conv_ops = [
- torch.ops.aten.conv1d.default,
- torch.ops.aten.conv2d.default,
- ]
- if (
- not isinstance(maybe_conv_node, Node)
- or maybe_conv_node.op != "call_function"
- or maybe_conv_node.target not in conv_ops
- ):
+
+ is_conv_node = _is_conv_transpose_node if is_conv_transpose else _is_conv_node
+ if not isinstance(maybe_conv_node, Node) or not is_conv_node(maybe_conv_node):
continue
conv_node = maybe_conv_node
| 2.41.0 |
b647bd325ead8e256199f807ec8bb8765a0772a | Fri, 12 Apr 2024 17:17:34 +0000 | [PATCH 0090/1000] Add missing interfaces of `torch.optim.swa_utils` (#117036) | Add type hints for the function/class interfaces that appear in torch/optim/swa_utils.py but are missing in torch/optim/swa_utils.pyi. - get_ema_multi_avg_fn - get_swa_multi_avg_fn - get_ema_avg_fn - get_swa_avg_fn - AveragedModel.__init__(multi_avg_fn) - SWALR.get_lr Co-authored-by: Jane (Yuan) Xu <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117036 Approved by: https://github.com/janeyx99 | diff --git a/torch/distributed/pipeline/sync/batchnorm.py b/torch/distributed/pipeline/sync/batchnorm.py
index ad375f8933..868ad50cf3 100644
--- a/torch/distributed/pipeline/sync/batchnorm.py
+++ b/torch/distributed/pipeline/sync/batchnorm.py
@@ -5,7 +5,7 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Tracks the running statistics per mini-batch instead of micro-batch."""
-from typing import TypeVar, cast
+from typing import TypeVar, Optional, cast
import torch
from torch import Tensor, nn
@@ -33,7 +33,7 @@ class DeferredBatchNorm(_BatchNorm):
self,
num_features: int,
eps: float = 1e-5,
- momentum: float = 0.1,
+ momentum: Optional[float] = 0.1,
affine: bool = True,
chunks: int = 1,
) -> None:
diff --git a/torch/nn/modules/batchnorm.py b/torch/nn/modules/batchnorm.py
index 0eac5cef2d..3c48e56d5e 100644
--- a/torch/nn/modules/batchnorm.py
+++ b/torch/nn/modules/batchnorm.py
@@ -21,7 +21,7 @@ class _NormBase(Module):
__constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
num_features: int
eps: float
- momentum: float
+ momentum: Optional[float]
affine: bool
track_running_stats: bool
# WARNING: weight and bias purposely not defined here.
@@ -31,7 +31,7 @@ class _NormBase(Module):
self,
num_features: int,
eps: float = 1e-5,
- momentum: float = 0.1,
+ momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
@@ -127,7 +127,7 @@ class _BatchNorm(_NormBase):
self,
num_features: int,
eps: float = 1e-5,
- momentum: float = 0.1,
+ momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
device=None,
@@ -677,7 +677,7 @@ class SyncBatchNorm(_BatchNorm):
self,
num_features: int,
eps: float = 1e-5,
- momentum: float = 0.1,
+ momentum: Optional[float] = 0.1,
affine: bool = True,
track_running_stats: bool = True,
process_group: Optional[Any] = None,
diff --git a/torch/nn/modules/instancenorm.py b/torch/nn/modules/instancenorm.py
index d0c37b7244..ae187e98b7 100644
--- a/torch/nn/modules/instancenorm.py
+++ b/torch/nn/modules/instancenorm.py
@@ -34,8 +34,15 @@ class _InstanceNorm(_NormBase):
def _apply_instance_norm(self, input):
return F.instance_norm(
- input, self.running_mean, self.running_var, self.weight, self.bias,
- self.training or not self.track_running_stats, self.momentum, self.eps)
+ input,
+ self.running_mean,
+ self.running_var,
+ self.weight,
+ self.bias,
+ self.training or not self.track_running_stats,
+ self.momentum if self.momentum is not None else 0.0,
+ self.eps,
+ )
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
diff --git a/torch/optim/swa_utils.py b/torch/optim/swa_utils.py
index 90b3f1598d..0c685e34fc 100644
--- a/torch/optim/swa_utils.py
+++ b/torch/optim/swa_utils.py
@@ -1,12 +1,15 @@
import itertools
import math
from copy import deepcopy
+from typing import Any, Callable, Iterable, List, Optional, Tuple, Union, Dict, cast
import warnings
import torch
+from torch import Tensor
from torch.nn import Module
from torch.optim.lr_scheduler import LRScheduler
from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices
+from .optimizer import Optimizer
__all__ = [
'AveragedModel',
@@ -18,12 +21,14 @@ __all__ = [
'get_swa_avg_fn'
]
-from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype
+from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype, TensorListList, Indices
+
+PARAM_LIST = Union[Tuple[Tensor, ...], List[Tensor]]
def get_ema_multi_avg_fn(decay=0.999):
@torch.no_grad()
- def ema_update(ema_param_list, current_param_list, _):
+ def ema_update(ema_param_list: PARAM_LIST, current_param_list: PARAM_LIST, _):
# foreach lerp only handles float and complex
if torch.is_floating_point(ema_param_list[0]) or torch.is_complex(ema_param_list[0]):
torch._foreach_lerp_(ema_param_list, current_param_list, 1 - decay)
@@ -36,20 +41,23 @@ def get_ema_multi_avg_fn(decay=0.999):
def get_swa_multi_avg_fn():
@torch.no_grad()
- def swa_update(averaged_param_list, current_param_list, num_averaged):
+ def swa_update(averaged_param_list: PARAM_LIST, current_param_list: PARAM_LIST, num_averaged: Union[Tensor, int]):
# foreach lerp only handles float and complex
if torch.is_floating_point(averaged_param_list[0]) or torch.is_complex(averaged_param_list[0]):
torch._foreach_lerp_(averaged_param_list, current_param_list, 1 / (num_averaged + 1))
else:
diffs = torch._foreach_sub(current_param_list, averaged_param_list)
- torch._foreach_addcdiv_(averaged_param_list, diffs, [num_averaged + 1] * len(averaged_param_list))
+ if isinstance(num_averaged, Tensor):
+ torch._foreach_addcdiv_(averaged_param_list, diffs, [num_averaged + 1] * len(averaged_param_list))
+ else:
+ torch._foreach_add_(averaged_param_list, diffs, alpha=1.0 / (num_averaged + 1))
return swa_update
def get_ema_avg_fn(decay=0.999):
@torch.no_grad()
- def ema_update(ema_param, current_param, num_averaged):
+ def ema_update(ema_param: Tensor, current_param: Tensor, num_averaged):
return decay * ema_param + (1 - decay) * current_param
return ema_update
@@ -57,7 +65,7 @@ def get_ema_avg_fn(decay=0.999):
def get_swa_avg_fn():
@torch.no_grad()
- def swa_update(averaged_param, current_param, num_averaged):
+ def swa_update(averaged_param: Tensor, current_param: Tensor, num_averaged: Union[Tensor, int]):
return averaged_param + (current_param - averaged_param) / (num_averaged + 1)
return swa_update
@@ -162,7 +170,17 @@ class AveragedModel(Module):
.. _Polyak averaging:
https://paperswithcode.com/method/polyak-averaging
"""
- def __init__(self, model, device=None, avg_fn=None, multi_avg_fn=None, use_buffers=False):
+
+ def __init__(
+ self,
+ model: Module,
+ device: Optional[Union[int, torch.device]] = None,
+ avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]],
+ Tensor]] = None,
+ multi_avg_fn: Optional[Callable[
+ [PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None]] = None,
+ use_buffers=False,
+ ):
super().__init__()
assert avg_fn is None or multi_avg_fn is None, 'Only one of avg_fn and multi_avg_fn should be provided'
self.module = deepcopy(model)
@@ -177,7 +195,7 @@ class AveragedModel(Module):
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
- def update_parameters(self, model):
+ def update_parameters(self, model: Module):
self_param = (
itertools.chain(self.module.parameters(), self.module.buffers())
if self.use_buffers else self.parameters()
@@ -197,7 +215,11 @@ class AveragedModel(Module):
if self.n_averaged > 0:
if self.multi_avg_fn is not None or self.avg_fn is None:
- grouped_tensors = _group_tensors_by_device_and_dtype([self_param_detached, model_param_detached])
+ grouped_tensors = _group_tensors_by_device_and_dtype(
+ cast(TensorListList, [self_param_detached, model_param_detached]))
+ grouped_tensors = cast(
+ Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Tensor]], Indices]],
+ grouped_tensors)
for ((device, _), ([self_params, model_params], _)) in grouped_tensors.items():
if self.multi_avg_fn:
self.multi_avg_fn(self_params, model_params, self.n_averaged.to(device))
@@ -223,7 +245,7 @@ class AveragedModel(Module):
@torch.no_grad()
-def update_bn(loader, model, device=None):
+def update_bn(loader: Iterable[Any], model: Module, device: Optional[Union[int, torch.device]] = None):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
@@ -319,7 +341,7 @@ class SWALR(LRScheduler):
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
https://arxiv.org/abs/1803.05407
"""
- def __init__(self, optimizer, swa_lr, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1):
+ def __init__(self, optimizer: Optimizer, swa_lr: float, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1):
swa_lrs = self._format_param(optimizer, swa_lr)
for swa_lr, group in zip(swa_lrs, optimizer.param_groups):
group['swa_lr'] = swa_lr
@@ -361,10 +383,13 @@ class SWALR(LRScheduler):
return (lr - alpha * swa_lr) / (1 - alpha)
def get_lr(self):
- if not self._get_lr_called_within_step:
+ # `_get_lr_called_within_step` is only available `_enable_get_lr_call`,
+ # so we ignore the type error here. See `LRScheduler.step()` for more details.
+ if not self._get_lr_called_within_step: # type: ignore[attr-defined]
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
- step = self._step_count - 1
+ # Set in `LRScheduler._initial_step()`
+ step = self._step_count - 1 # type: ignore[attr-defined]
if self.anneal_epochs == 0:
step = max(1, step)
prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs)))
diff --git a/torch/optim/swa_utils.pyi b/torch/optim/swa_utils.pyi
deleted file mode 100644
index 074e7a9bbd..0000000000
--- a/torch/optim/swa_utils.pyi
+++ /dev/null
@@ -1,32 +0,0 @@
-from typing import Any, Callable, Iterable, Union
-
-from torch import device, Tensor
-from torch.nn.modules import Module
-from .lr_scheduler import _LRScheduler
-from .optimizer import Optimizer
-
-class AveragedModel(Module):
- def __init__(
- self,
- model: Module,
- device: Union[int, device] = ...,
- avg_fn: Callable[[Tensor, Tensor, int], Tensor] = ...,
- use_buffers: bool = ...,
- ) -> None: ...
- def update_parameters(self, model: Module) -> None: ...
-
-def update_bn(
- loader: Iterable[Any],
- model: Module,
- device: Union[int, device] = ...,
-) -> None: ...
-
-class SWALR(_LRScheduler):
- def __init__(
- self,
- optimizer: Optimizer,
- swa_lr: float,
- anneal_epochs: int,
- anneal_strategy: str,
- last_epoch: int = ...,
- ) -> None: ... | 2.41.0 |
c09c6b91af6287be9ce6f167640575f0908f943 | Fri, 12 Apr 2024 17:34:58 +0000 | [PATCH 0091/1000] Fix memory planning compile error (#123867) | Summary: We should be using CppPrinter in the cpp wrapper codegen, not the ExprPrinter (which prints expressions for Python) Not really a memory-planning-specific bug, but exposed by mem planning because it tends to emit more complicated expressions Differential Revision: D56025683 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123867 Approved by: https://github.com/hl475, https://github.com/chenyang78 | diff --git a/test/inductor/test_memory_planning.py b/test/inductor/test_memory_planning.py
index c1271e0648..1bd546e5b4 100644
--- a/test/inductor/test_memory_planning.py
+++ b/test/inductor/test_memory_planning.py
@@ -74,7 +74,7 @@ class TestMemoryPlanning(TestCase):
).check_next(
"auto buf0 = alloc_from_pool(pool1, 0, at::kFloat, {s0, s0}, {s0, 1L});"
).check(
- "auto buf1 = alloc_from_pool(pool1, align((4*s0) + (4*s0*((-1) + s0))),"
+ "auto buf1 = alloc_from_pool(pool1, align((4L*s0) + (4L*s0*((-1L) + s0))),"
).run(
code
)
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index 3f650ef205..f8e2f30859 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -16,7 +16,7 @@ from ..codecache import CudaKernelParamCache
from ..utils import cache_on_self, sympy_product
from ..virtualized import V
from .common import IndentedBuffer
-from .wrapper import EnterSubgraphLine, ExitSubgraphLine, pexpr, WrapperCodeGen
+from .wrapper import EnterSubgraphLine, ExitSubgraphLine, WrapperCodeGen
class CppWrapperCpu(WrapperCodeGen):
@@ -1540,7 +1540,7 @@ class CppWrapperCpu(WrapperCodeGen):
tmp_name = f"tmp_tensor_handle_{next(self.tmp_tensor_id)}"
args = [
name,
- pexpr(offset), # bytes not numel
+ self.expr_printer(offset), # bytes not numel
self.codegen_dtype(dtype),
str(len(shape)),
self.codegen_int_array_var(
@@ -1561,7 +1561,7 @@ class CppWrapperCpu(WrapperCodeGen):
", ".join(
[
name,
- pexpr(offset), # bytes not numel
+ self.expr_printer(offset), # bytes not numel
self.codegen_dtype(dtype),
self.codegen_shape_tuple(shape),
self.codegen_shape_tuple(stride), | 2.41.0 |
3dbe2b517b1a515016eaf6c681aaa02f62f3cdd | Thu, 11 Apr 2024 17:31:24 +0000 | [PATCH 0092/1000] Add test for skipping hf logging during export (#123410) | https://github.com/pytorch/pytorch/pull/123402 already supports hf logging because HF logger is based on logging module This PR adds a test to guard this against regression, only Pull Request resolved: https://github.com/pytorch/pytorch/pull/123410 Approved by: https://github.com/BowenBao, https://github.com/malfet | diff --git a/test/export/test_export.py b/test/export/test_export.py
index f05aa1d074..5b2f920739 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -45,6 +45,7 @@ from torch.testing._internal.common_utils import (
IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
+ TEST_TRANSFORMERS,
TestCase as TorchTestCase,
)
from torch.utils._pytree import (
@@ -4604,6 +4605,31 @@ def forward(self, x):
return (add, add_1)""",
)
+ @unittest.skipIf(not TEST_TRANSFORMERS, "No transformers")
+ def test_hf_logging_logger(self):
+ import transformers
+
+ logger = transformers.utils.logging.get_logger(__name__)
+
+ class M(torch.nn.Module):
+ def forward(self, x):
+ logger.warning_once("start")
+ x1 = x + x
+ x2 = x1 * x1
+ x3 = x2 + x2
+ return (x1, x3)
+
+ gm = export(M(), (torch.randn(3, 3),)).graph_module
+ self.assertExpectedInline(
+ gm.code.strip(),
+ """\
+def forward(self, x):
+ add = torch.ops.aten.add.Tensor(x, x); x = None
+ mul = torch.ops.aten.mul.Tensor(add, add)
+ add_1 = torch.ops.aten.add.Tensor(mul, mul); mul = None
+ return (add, add_1)""",
+ )
+
def test_warning(self):
class M(torch.nn.Module):
def forward(self, x):
diff --git a/test/onnx/test_fx_to_onnx.py b/test/onnx/test_fx_to_onnx.py
index c444ae54c7..2f81818909 100644
--- a/test/onnx/test_fx_to_onnx.py
+++ b/test/onnx/test_fx_to_onnx.py
@@ -721,6 +721,18 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase):
model = LoggingLoggerModule()
_ = torch.onnx.dynamo_export(model, input)
+ def test_export_with_hf_logging_logger(self):
+ logger = transformers.utils.logging.get_logger(__name__)
+
+ class HFLoggingLoggerModule(torch.nn.Module):
+ def forward(self, x):
+ logger.warning_once("abc")
+ return x + 1
+
+ input = torch.randn(2, 3)
+ model = HFLoggingLoggerModule()
+ _ = torch.onnx.dynamo_export(model, input)
+
def test_checkpoint_cast(self):
model_id = "openai/whisper-large-v3"
feature_extractor = transformers.WhisperFeatureExtractor(feature_size=128)
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
index a864f82cd5..776a9d2d3e 100644
--- a/torch/testing/_internal/common_utils.py
+++ b/torch/testing/_internal/common_utils.py
@@ -1233,7 +1233,7 @@ TEST_CUDA = torch.cuda.is_available()
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None)
TEST_PRIVATEUSE1 = True if (hasattr(custom_device_mod, "is_available") and custom_device_mod.is_available()) else False
TEST_NUMBA = _check_module_exists('numba')
-
+TEST_TRANSFORMERS = _check_module_exists('transformers')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64 | 2.41.0 |
66b24e24255044de81092547541da84d13799cb | Fri, 12 Apr 2024 18:21:27 +0000 | [PATCH 0094/1000] [Inductor] Add a device agnostic DeviceGuard class to inductor (#123338) | Summary: Currently although only in one place in inductor, the `device` context manager from the device interface is used . This PR creates an inductor specific `DeviceGuard` class for use in these cases, which keeps a reference to the `DeviceInterface` class which is defined and added out of tree. This then offloads the device specific work to the device interface, instead of having to define this logic on the device class which isn't strictly necessary for inductor. Ideally I would have used the existing `DeviceGuard` class, but these are defined per device and don't work well with inductor's device agnostic/ out of tree compatible design. With the existing classes in mind, I am happy to take suggestions on the renaming of this class. Whilst I was there, I also took the opportunity to rename `gpu_device` to `device_interface` to clarify this is not necessarily a GPU. Test Plan: None currently, happy to add some. Co-authored-by: Matthew Haddock <[email protected]> Co-authored-by: Adnan Akhundov <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123338 Approved by: https://github.com/aakhundov | diff --git a/test/dynamo/test_deviceguard.py b/test/dynamo/test_deviceguard.py
new file mode 100644
index 0000000000..4ed54a4c19
--- /dev/null
+++ b/test/dynamo/test_deviceguard.py
@@ -0,0 +1,92 @@
+# Owner(s): ["module: dynamo"]
+import unittest
+from unittest.mock import Mock
+
+import torch
+
+import torch._dynamo.test_case
+import torch._dynamo.testing
+from torch._dynamo.device_interface import CudaInterface, DeviceGuard
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+
+
+class TestDeviceGuard(torch._dynamo.test_case.TestCase):
+ """
+ Unit tests for the DeviceGuard class using a mock DeviceInterface.
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.device_interface = Mock()
+
+ self.device_interface.exchange_device = Mock(return_value=0)
+ self.device_interface.maybe_exchange_device = Mock(return_value=0)
+
+ def test_device_guard(self):
+ device_guard = DeviceGuard(self.device_interface, 1)
+
+ with device_guard as _:
+ self.device_interface.exchange_device.assert_called_once_with(1)
+ self.assertEqual(device_guard.prev_idx, 0)
+ self.assertEqual(device_guard.idx, 1)
+
+ self.device_interface.maybe_exchange_device.assert_called_once_with(0)
+ self.assertEqual(device_guard.prev_idx, 0)
+ self.assertEqual(device_guard.idx, 0)
+
+ def test_device_guard_no_index(self):
+ device_guard = DeviceGuard(self.device_interface, None)
+
+ with device_guard as _:
+ self.device_interface.exchange_device.assert_not_called()
+ self.assertEqual(device_guard.prev_idx, -1)
+ self.assertEqual(device_guard.idx, None)
+
+ self.device_interface.maybe_exchange_device.assert_not_called()
+ self.assertEqual(device_guard.prev_idx, -1)
+ self.assertEqual(device_guard.idx, None)
+
+
[email protected](not TEST_CUDA, "No CUDA available.")
+class TestCUDADeviceGuard(torch._dynamo.test_case.TestCase):
+ """
+ Unit tests for the DeviceGuard class using a CudaInterface.
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.device_interface = CudaInterface
+
+ @unittest.skipIf(not TEST_MULTIGPU, "need multiple GPU")
+ def test_device_guard(self):
+ current_device = torch.cuda.current_device()
+
+ device_guard = DeviceGuard(self.device_interface, 1)
+
+ with device_guard as _:
+ self.assertEqual(torch.cuda.current_device(), 1)
+ self.assertEqual(device_guard.prev_idx, 0)
+ self.assertEqual(device_guard.idx, 1)
+
+ self.assertEqual(torch.cuda.current_device(), current_device)
+ self.assertEqual(device_guard.prev_idx, 0)
+ self.assertEqual(device_guard.idx, 0)
+
+ def test_device_guard_no_index(self):
+ current_device = torch.cuda.current_device()
+
+ device_guard = DeviceGuard(self.device_interface, None)
+
+ with device_guard as _:
+ self.assertEqual(torch.cuda.current_device(), current_device)
+ self.assertEqual(device_guard.prev_idx, -1)
+ self.assertEqual(device_guard.idx, None)
+
+ self.assertEqual(device_guard.prev_idx, -1)
+ self.assertEqual(device_guard.idx, None)
+
+
+if __name__ == "__main__":
+ from torch._dynamo.test_case import run_tests
+
+ run_tests()
diff --git a/torch/_dynamo/device_interface.py b/torch/_dynamo/device_interface.py
index 9070222a0e..e62efaf825 100644
--- a/torch/_dynamo/device_interface.py
+++ b/torch/_dynamo/device_interface.py
@@ -69,6 +69,14 @@ class DeviceInterface(metaclass=DeviceInterfaceMeta):
def set_device(device: _device_t):
raise NotImplementedError()
+ @staticmethod
+ def maybe_exchange_device(device: int) -> int:
+ raise NotImplementedError()
+
+ @staticmethod
+ def exchange_device(device: int) -> int:
+ raise NotImplementedError()
+
@staticmethod
def device_count():
raise NotImplementedError()
@@ -110,6 +118,31 @@ class DeviceInterface(metaclass=DeviceInterfaceMeta):
raise NotImplementedError()
+class DeviceGuard:
+ """
+ This class provides a context manager for device switching. This is a stripped
+ down version of torch.{device_name}.device.
+
+ The context manager changes the current device to the given device index
+ on entering the context and restores the original device on exiting.
+ The device is switched using the provided device interface.
+ """
+
+ def __init__(self, device_interface: Type[DeviceInterface], index: Optional[int]):
+ self.device_interface = device_interface
+ self.idx = index
+ self.prev_idx = -1
+
+ def __enter__(self):
+ if self.idx is not None:
+ self.prev_idx = self.device_interface.exchange_device(self.idx)
+
+ def __exit__(self, type: Any, value: Any, traceback: Any):
+ if self.idx is not None:
+ self.idx = self.device_interface.maybe_exchange_device(self.prev_idx)
+ return False
+
+
class CudaInterface(DeviceInterface):
device = torch.cuda.device
@@ -159,6 +192,8 @@ class CudaInterface(DeviceInterface):
synchronize = staticmethod(torch.cuda.synchronize)
get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment]
get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[arg-type]
+ exchange_device = staticmethod(torch.cuda._exchange_device) # type: ignore[arg-type]
+ maybe_exchange_device = staticmethod(torch.cuda._maybe_exchange_device) # type: ignore[arg-type]
# Can be mock patched by @patch decorator.
@staticmethod
@@ -224,6 +259,8 @@ class XpuInterface(DeviceInterface):
synchronize = staticmethod(torch.xpu.synchronize)
get_device_properties = staticmethod(torch.xpu.get_device_properties) # type: ignore[assignment]
get_raw_stream = staticmethod(get_xpu_stream) # type: ignore[arg-type]
+ exchange_device = staticmethod(torch.xpu._exchange_device) # type: ignore[arg-type]
+ maybe_exchange_device = staticmethod(torch.xpu._maybe_exchange_device) # type: ignore[arg-type]
# Can be mock patched by @patch decorator.
@staticmethod
diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py
index ecc182e2d8..5ce3d82d71 100644
--- a/torch/_inductor/triton_heuristics.py
+++ b/torch/_inductor/triton_heuristics.py
@@ -18,7 +18,7 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import torch
import torch.autograd.profiler as autograd_profiler
-from torch._dynamo.device_interface import get_interface_for_device
+from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device
from torch._dynamo.utils import dynamo_timed, get_first_attr
from torch.utils._triton import has_triton_package
@@ -165,7 +165,7 @@ class CachingAutotuner(KernelInterface):
self.device_type = (
triton_meta["device_type"] if "device_type" in triton_meta else "cuda"
)
- self.gpu_device = get_interface_for_device(self.device_type)
+ self.device_interface = get_interface_for_device(self.device_type)
if log.isEnabledFor(logging.DEBUG):
log.debug(
@@ -217,7 +217,7 @@ class CachingAutotuner(KernelInterface):
seen_configs = set(self.configs)
- device_prop = self.gpu_device.Worker.get_device_properties(
+ device_prop = self.device_interface.Worker.get_device_properties(
self.triton_meta["device"]
)
if (
@@ -318,7 +318,7 @@ class CachingAutotuner(KernelInterface):
device_type = self.device_type if torch.version.hip is None else "cuda"
device_id = compile_meta["device"]
device = torch.device(device_type, device_id)
- cc = self.gpu_device.get_compute_capability(device)
+ cc = self.device_interface.get_compute_capability(device)
compile_meta["cc"] = cc
@@ -353,9 +353,9 @@ class CachingAutotuner(KernelInterface):
)
# load binary to the correct device
- with self.gpu_device.device(compile_meta["device"]): # type: ignore[attr-defined]
+ with DeviceGuard(self.device_interface, compile_meta["device"]): # type: ignore[attr-defined]
# need to initialize context
- self.gpu_device.synchronize(self.gpu_device.current_device())
+ self.device_interface.synchronize(self.device_interface.current_device())
try:
binary = triton.compile(*compile_args, **compile_kwargs)
@@ -528,8 +528,8 @@ class CachingAutotuner(KernelInterface):
)
return float("inf")
- stream = self.gpu_device.get_raw_stream( # type: ignore[call-arg]
- self.gpu_device.current_device()
+ stream = self.device_interface.get_raw_stream( # type: ignore[call-arg]
+ self.device_interface.current_device()
)
def kernel_call(): | 2.41.0 |
b0ba6bbd320985fedaccbd4a874ef5ff3d61d74 | Thu, 11 Apr 2024 21:54:39 -0700 | [PATCH 0097/1000] [dynamo] Improve constant-prop for regex/torch.__version__ (#123705) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123705 Approved by: https://github.com/anijain2305 ghstack dependencies: #123700 | diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py
index 7ce0edd9b5..db79e59d39 100644
--- a/test/dynamo/test_functions.py
+++ b/test/dynamo/test_functions.py
@@ -171,6 +171,58 @@ class FunctionTests(torch._dynamo.test_case.TestCase):
v = v + x
return v
+ @make_test
+ def test_obj_eq(a, b):
+ v = a + b
+ if MyCls() == None: # noqa: E711
+ return -1
+ if MyCls() != None: # noqa: E711
+ v = v.sin()
+ if MyCls() == MyCls():
+ return -2
+ if MyCls() != MyCls():
+ return v + 1
+ return -3
+
+ @make_test
+ def test_cls_eq(a, b):
+ v = a + b
+ if MyCls == None: # noqa: E711
+ return -1
+ if MyCls != None: # noqa: E711
+ v = v.sin()
+ if MyCls != MyCls:
+ return -2
+ if MyCls == MyCls:
+ return v + 1
+ return -3
+
+ @make_test
+ def test_obj_is(a, b):
+ v = a + b
+ if MyCls() is None: # noqa: E711
+ return -1
+ if MyCls() is not None: # noqa: E711
+ v = v.sin()
+ if MyCls() is MyCls():
+ return -2
+ if MyCls() is not MyCls():
+ return v + 1
+ return -3
+
+ @make_test
+ def test_cls_is(a, b):
+ v = a + b
+ if MyCls is None: # noqa: E711
+ return -1
+ if MyCls is not None: # noqa: E711
+ v = v.sin()
+ if MyCls is not MyCls:
+ return -2
+ if MyCls is MyCls:
+ return v + 1
+ return -3
+
@make_test
def test_itertools_combinations(a, b):
combs = []
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index b3d876290a..f64f33a5b9 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -9091,6 +9091,18 @@ def ___make_guard_fn():
self.assertEqual(list(eager), list(compiled))
self.assertEqual(len(counters["graph_break"]), 0)
+ def test_packaging_version_parse(self):
+ from packaging import version
+
+ @torch.compile(backend="eager", fullgraph=True)
+ def fn():
+ x = torch.zeros(1)
+ if version.parse(torch.__version__) >= version.parse("2.0.0"):
+ return x + 1
+ return x
+
+ self.assertEqual(fn().item(), 1)
+
def test_itertools_accumulate_tensors_user_defined(self):
def udo_fn_0(a, b):
return -1
diff --git a/test/dynamo_expected_failures/TestLinearizeCPU.test_linearize_composition_cpu_float32 b/test/dynamo_expected_failures/TestLinearizeCPU.test_linearize_composition_cpu_float32
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestPythonPytree.test_treespec_equality b/test/dynamo_expected_failures/TestPythonPytree.test_treespec_equality
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/torch/_dynamo/polyfill.py b/torch/_dynamo/polyfill.py
index 437d2eb537..d6bbb59692 100644
--- a/torch/_dynamo/polyfill.py
+++ b/torch/_dynamo/polyfill.py
@@ -54,3 +54,13 @@ def list_cmp(op: Callable[[Any, Any], bool], left: Sequence[Any], right: Sequenc
if a != b:
return op(a, b)
return op(len(left), len(right))
+
+
+def dropwhile(predicate, iterable):
+ # dropwhile(lambda x: x<5, [1,4,6,4,1]) → 6 4 1
+ iterable = iter(iterable)
+ for x in iterable:
+ if not predicate(x):
+ yield x
+ break
+ yield from iterable
diff --git a/torch/_dynamo/variables/__init__.py b/torch/_dynamo/variables/__init__.py
index 776d17c93a..06f634efb3 100644
--- a/torch/_dynamo/variables/__init__.py
+++ b/torch/_dynamo/variables/__init__.py
@@ -71,9 +71,11 @@ from .misc import (
NewGlobalVariable,
NumpyVariable,
PythonModuleVariable,
+ RegexPatternVariable,
StopIterationVariable,
StringFormatVariable,
SuperVariable,
+ TorchVersionVariable,
TypingVariable,
UnknownVariable,
)
@@ -136,6 +138,7 @@ __all__ = [
"PlacementVariable",
"PythonModuleVariable",
"RangeVariable",
+ "RegexPatternVariable",
"RemovableHandleVariable",
"RepeatIteratorVariable",
"RestrictedListSubclassVariable",
@@ -148,6 +151,7 @@ __all__ = [
"TensorVariable",
"TorchCtxManagerClassVariable",
"TorchInGraphFunctionVariable",
+ "TorchVersionVariable",
"TupleVariable",
"UnknownVariable",
"UnspecializedNNModuleVariable",
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index f94c464094..8a727288c2 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -145,7 +145,9 @@ from .misc import (
MethodWrapperVariable,
NumpyVariable,
PythonModuleVariable,
+ RegexPatternVariable,
SavedTensorBox,
+ TorchVersionVariable,
TypingVariable,
)
from .nn_module import FSDPManagedNNModuleVariable, UnspecializedNNModuleVariable
@@ -347,6 +349,7 @@ class VariableBuilder:
(tuple_iterator, cls.wrap_tuple_iterator),
((slice, range), cls.wrap_slice_range),
(tuple(common_constant_types), cls.wrap_literal),
+ (re.Pattern, cls.wrap_regex_pattern),
]
if config.trace_numpy and np:
@@ -360,6 +363,11 @@ class VariableBuilder:
return result
+ def wrap_regex_pattern(self, value: re.Pattern):
+ # TODO(jansel): something like a REPR_MATCH might be more robust here
+ self.install_guards(GuardBuilder.ID_MATCH)
+ return RegexPatternVariable(value)
+
@classmethod
@functools.lru_cache(None)
def _id_dispatch(cls):
@@ -383,6 +391,7 @@ class VariableBuilder:
**self.install_guards(GuardBuilder.FUNCTION_MATCH),
),
),
+ (torch.__version__, lambda self, value: TorchVersionVariable()),
]
result = {}
diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py
index f6e4680c18..14b58c99cb 100644
--- a/torch/_dynamo/variables/builtin.py
+++ b/torch/_dynamo/variables/builtin.py
@@ -518,6 +518,14 @@ class BuiltinVariable(VariableTracker):
def compare_set_items(tx, left, right):
return ConstantVariable(op(left.set_items, right.set_items))
+ def compare_via_method(tx, left, right):
+ return left.call_method(tx, f"__{op.__name__}__", [right], {})
+
+ if op.__name__.startswith("is_"):
+ compare_user_defined = compare_by_value
+ else:
+ compare_user_defined = compare_via_method
+
op_var = BuiltinVariable(op)
result.extend(
[
@@ -546,14 +554,13 @@ class BuiltinVariable(VariableTracker):
list_compare_check,
),
((has_set_items, has_set_items), compare_set_items),
- # TODO(jansel): UserDefinedObjectVariable is wrong and could invoke user code
(
(UserDefinedObjectVariable, UserDefinedObjectVariable),
- compare_by_value,
+ compare_user_defined,
),
(
(UserDefinedClassVariable, UserDefinedClassVariable),
- compare_by_value,
+ compare_user_defined,
),
(
(
diff --git a/torch/_dynamo/variables/iter.py b/torch/_dynamo/variables/iter.py
index 4c999adb7b..e907c4c815 100644
--- a/torch/_dynamo/variables/iter.py
+++ b/torch/_dynamo/variables/iter.py
@@ -178,6 +178,10 @@ class ItertoolsVariable(VariableTracker):
return variables.CountIteratorVariable(*args, mutable_local=MutableLocal())
elif self.value is itertools.cycle:
return variables.CycleIteratorVariable(*args, mutable_local=MutableLocal())
+ elif self.value is itertools.dropwhile:
+ return variables.UserFunctionVariable(polyfill.dropwhile).call_function(
+ tx, args, kwargs
+ )
else:
return super().call_function(tx, args, kwargs)
diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py
index 97498b1c2f..27f965c98c 100644
--- a/torch/_dynamo/variables/misc.py
+++ b/torch/_dynamo/variables/misc.py
@@ -1,10 +1,10 @@
# mypy: ignore-errors
-
import collections
import dataclasses
import functools
import inspect
import itertools
+import re
import sys
import types
from typing import Dict, List
@@ -995,3 +995,65 @@ class StopIterationVariable(VariableTracker):
codegen.load_import_from("builtins", "StopIteration")
codegen.foreach(self.args)
codegen.call_function(len(self.args), True)
+
+
+class ConstantLikeVariable(VariableTracker):
+ """self.value is a compile-time constant, but not a literal"""
+
+ _error_prefix = "ConstantLikeVariable"
+
+ def __init__(self, value, **kwargs):
+ super().__init__(**kwargs)
+ self.value = value
+
+ def python_type(self):
+ return type(self.value)
+
+ def as_python_constant(self):
+ return self.value
+
+ def call_method(
+ self,
+ tx,
+ name,
+ args: List[VariableTracker],
+ kwargs: Dict[str, VariableTracker],
+ ) -> VariableTracker:
+ try:
+ # we only support constant propagation for methods
+ cargs = [x.as_python_constant() for x in args]
+ ckwargs = {k: v.as_python_constant() for k, v in kwargs.items()}
+ except NotImplementedError:
+ unimplemented(f"{self._error_prefix}.{name}(*{args}, **{kwargs})")
+
+ result = getattr(self.value, name)(*cargs, **ckwargs)
+
+ if variables.ConstantVariable.is_literal(result):
+ return variables.ConstantVariable.create(result)
+ if isinstance(result, re.Match):
+ return ConstantRegexMatchVariable(result)
+
+ unimplemented(f"{self._error_prefix}.{name}() -> {result}")
+
+ def var_getattr(self, tx, name: str) -> VariableTracker:
+ result = getattr(self.value, name)
+ if variables.ConstantVariable.is_literal(result):
+ return variables.ConstantVariable.create(result)
+ return GetAttrVariable(self, name)
+
+
+class RegexPatternVariable(ConstantLikeVariable):
+ _error_prefix = "re.Pattern"
+
+
+class ConstantRegexMatchVariable(ConstantLikeVariable):
+ _error_prefix = "re.Match"
+
+
+class TorchVersionVariable(ConstantLikeVariable):
+ _error_prefix = "torch.__version__"
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("value", torch.__version__)
+ assert kwargs["value"] is torch.__version__
+ super().__init__(**kwargs)
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index e64a7b1c49..47e69fb0ad 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -7,6 +7,7 @@ import importlib
import inspect
import itertools
import random
+import re
import sys
import threading
import types
@@ -247,6 +248,10 @@ class UserDefinedClassVariable(UserDefinedVariable):
return BuiltinVariable.call_custom_dict_fromkeys(
tx, self.value, *args, **kwargs
)
+ elif name == "__eq__" and len(args) == 1 and hasattr(args[0], "value"):
+ return variables.ConstantVariable(self.value == args[0].value)
+ elif name == "__ne__" and len(args) == 1 and hasattr(args[0], "value"):
+ return variables.ConstantVariable(self.value != args[0].value)
return super().call_method(tx, name, args, kwargs)
@@ -603,6 +608,16 @@ class UserDefinedObjectVariable(UserDefinedVariable):
assert self.source # OrderedDict, dict subtypes must always have source
return self.odict_getitem(tx, args[0])
+ if (
+ method in (object.__ne__, object.__eq__)
+ and len(args) == 1
+ and not kwargs
+ and hasattr(args[0], "value")
+ ):
+ return ConstantVariable(
+ (self.value is args[0].value) is (method is object.__eq__)
+ )
+
# check for methods implemented in C++
if isinstance(method, types.FunctionType):
source = (
@@ -868,6 +883,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
(
torch.Tensor,
torch.nn.Module,
+ re.Pattern,
),
)
):
@@ -879,7 +895,10 @@ class UserDefinedObjectVariable(UserDefinedVariable):
if (
name not in getattr(value, "__dict__", {})
- and type(value).__module__.startswith("torch.")
+ and (
+ type(value).__module__.startswith("torch.")
+ or isinstance(subobj, re.Pattern)
+ )
and "torch.optim" not in type(value).__module__
and not callable(value)
and not isinstance(subobj, types.MethodDescriptorType) | 2.41.0 |
022600cc64f1594d98656d66dc261822ea7d202 | Thu, 11 Apr 2024 21:54:40 -0700 | [PATCH 0098/1000] [inductor] Handle meta tensor ops in graph (#123786) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123786 Approved by: https://github.com/anijain2305 ghstack dependencies: #123700, #123705 | diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index 80a0fed789..7acd238c96 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -1773,6 +1773,19 @@ class CPUReproTests(TestCase):
res_grad = test_args_for_opt["input"].grad
self.assertEqual(ref_grad, res_grad)
+ def test_meta_device(self):
+ @torch.compile(fullgraph=True)
+ def fn():
+ x = torch.ops.aten.empty.memory_format(
+ [1024, 128, 128],
+ dtype=torch.float16,
+ device="meta",
+ pin_memory=False,
+ )
+ return x.sin() + 1
+
+ self.assertEqual(fn().shape, [1024, 128, 128])
+
def test_decomposed_fake_quant_per_channel(self):
def fq(input, scales, zero_points, axis, quant_min, quant_max):
res = torch.fake_quantize_per_channel_affine(
diff --git a/torch/_inductor/constant_folding.py b/torch/_inductor/constant_folding.py
index be9bcf09a9..82cb5b5a5b 100644
--- a/torch/_inductor/constant_folding.py
+++ b/torch/_inductor/constant_folding.py
@@ -155,6 +155,9 @@ class ConstantFolder(torch.fx.Interpreter):
out = super().run_node(node)
if node.op != "get_attr" and isinstance(out, torch.Tensor):
+ if out.device.type == "meta":
+ return out
+
if not self.insertable_tensor_check(out):
return out
diff --git a/torch/_inductor/fx_passes/joint_graph.py b/torch/_inductor/fx_passes/joint_graph.py
index df89037067..ab9352714f 100644
--- a/torch/_inductor/fx_passes/joint_graph.py
+++ b/torch/_inductor/fx_passes/joint_graph.py
@@ -109,6 +109,28 @@ def remove_no_ops(
if len(node.args) == 2 and node.args[1] in ones:
replace_no_op(node, 0)
+ # meta tensors returned from the graph have no data and can be replaced with empty_strided
+ for output_node in graph.find_nodes(op="output"):
+ had_meta_return = False
+
+ def visit(n):
+ nonlocal had_meta_return
+ val = n.meta.get("val")
+ if isinstance(val, torch.Tensor) and val.device.type == "meta":
+ with graph.inserting_before(output_node):
+ n.replace_all_uses_with(
+ graph.call_function(
+ torch.ops.aten.empty_strided.default,
+ args=(val.size(), val.stride()),
+ kwargs={"dtype": val.dtype, "device": val.device},
+ )
+ )
+ had_meta_return = True
+
+ torch.fx.map_arg(output_node.args, visit)
+ if had_meta_return:
+ graph.eliminate_dead_code()
+
@torch.utils._python_dispatch._disable_current_modes()
def remove_redundant_views(gm: torch.fx.GraphModule):
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index 9601d29f77..ce2b6d44f0 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -1238,6 +1238,7 @@ class GraphLowering(torch.fx.Interpreter):
device_types = self.device_types.copy()
device_types.discard("cpu")
+ device_types.discard("meta")
# TODO(Eikan): Only support mixing cpu and other device now.
assert len(device_types) <= 1, "Does not support mixing {}".format(
"+".join(device_types)
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 5f85704c99..0022d0758d 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -193,7 +193,7 @@ def decode_device(device: Union[Optional[torch.device], str]) -> torch.device:
return torch.tensor(0.0).device # default device
if isinstance(device, str):
device = torch.device(device)
- if device.type != "cpu" and device.index is None:
+ if device.type not in ("cpu", "meta") and device.index is None:
device_interface = get_interface_for_device(device.type)
return torch.device(device.type, index=device_interface.Worker.current_device())
return device | 2.41.0 |
1e6f84ad8faa1400ce5227f5e42f44325c585ed | Thu, 11 Apr 2024 21:54:40 -0700 | [PATCH 0099/1000] [dynamo] Graph break on uninitialized nn.Module (#123790) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123790 Approved by: https://github.com/anijain2305 ghstack dependencies: #123700, #123705, #123786 | diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index f54b3cfe57..33caa7dd42 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -1099,16 +1099,10 @@ class GuardBuilder(GuardBuilderBase):
def NN_MODULE(self, guard: Guard):
self.ID_MATCH(guard)
- ref = self.arg_ref(guard)
val = self.get(guard.name)
-
- def setup_guard():
+ if hasattr(val, "training"):
assert istype(val.training, bool)
self._guard_on_attribute(guard, "training", GuardBuilder.CONSTANT_MATCH)
-
- if hasattr(val, "training"):
- # There are cases where a monkeypatched object has a guard made between __new__ and __init__
- setup_guard()
else:
exc.unimplemented(f"Guard setup for uninitialized class {type(val)}")
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index 8a727288c2..8c8a9e97dd 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -969,6 +969,8 @@ class VariableBuilder:
def wrap_module(self, value: torch.nn.Module):
from ..eval_frame import OptimizedModule
+ if len(value.__dict__) == 0:
+ unimplemented(f"uninitialized nn.Module: {typestr(value)}")
if istype(value, OptimizedModule):
self.install_guards(GuardBuilder.TYPE_MATCH)
self.source = AttrSource(self.source, "_orig_mod")
diff --git a/torch/_guards.py b/torch/_guards.py
index 5f4c6d9941..3f3cdc2777 100644
--- a/torch/_guards.py
+++ b/torch/_guards.py
@@ -257,7 +257,7 @@ class Guard:
try:
return self.create_fn(builder, self)
except Exception:
- log.error("Error while creating guard:\n%s", str(self).rstrip())
+ log.exception("Error while creating guard:\n%s", str(self).rstrip())
if self.stack:
log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
raise | 2.41.0 |
3935783f7f0ae642d409aaf7df00e240730311c | Thu, 11 Apr 2024 21:54:41 -0700 | [PATCH 0101/1000] [dynamo] Fix @property on user-defined nn.Module (#123804) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123804 Approved by: https://github.com/anijain2305 ghstack dependencies: #123700, #123705, #123786, #123790, #123803 | diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index f98c84cf54..2b03261129 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -4658,6 +4658,30 @@ def forward(self, s0 : torch.SymInt, s1 : torch.SymInt, L_x_ : torch.Tensor):
compiled_str = str(e)
self.assertEqual(orig_str, compiled_str)
+ def test_partially_initialized_module_property(self):
+ class Matrix(torch.nn.Module):
+ def __init__(self, data):
+ super().__init__()
+ self._data = data
+ self.foo = 10 * self.blocking
+
+ @property
+ def data(self):
+ return self._data
+
+ @property
+ def blocking(self):
+ return self.data.shape[1]
+
+ @torch.compile(backend="eager", fullgraph=True)
+ def fn():
+ return Matrix(torch.randn(10, 20))
+
+ v = fn()
+ self.assertEqual(v.foo, 200)
+ self.assertEqual(v.data.shape, (10, 20))
+ self.assertEqual(type(v), Matrix)
+
def test_global_fn_mutation(self):
def foo(x, y):
return global_fn(x) + y
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index 47e69fb0ad..6d242ce56a 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -788,7 +788,16 @@ class UserDefinedObjectVariable(UserDefinedVariable):
or "__slots__" in self.value.__class__.__dict__
or type(self.value) == threading.local
):
- # getattr_static doesn't work on these
+ try:
+ cls_var = inspect.getattr_static(
+ self.value.__class__, name, NO_SUCH_SUBOBJ
+ )
+ if cls_var is not NO_SUCH_SUBOBJ and name not in self.value.__dict__:
+ # maybe user-defined @property that we need to inline
+ return cls_var
+ except AttributeError:
+ pass # __slots__
+ # this might call torch.nn.Module.__getattr__
subobj = getattr(self.value, name)
else:
subobj = inspect.getattr_static(self.value, name)
@@ -802,7 +811,6 @@ class UserDefinedObjectVariable(UserDefinedVariable):
value = self.value
source = AttrSource(self.source, name) if self.source else None
self._check_for_getattribute()
- getattr_fn = self._check_for_getattr()
if tx.output.side_effects.has_pending_mutation_of_attr(self, name):
return tx.output.side_effects.load_attr(self, name)
@@ -811,6 +819,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
subobj = self._getattr_static(name)
except AttributeError:
subobj = NO_SUCH_SUBOBJ
+ getattr_fn = self._check_for_getattr()
if isinstance(getattr_fn, types.FunctionType):
return variables.UserMethodVariable(
getattr_fn, self, source=source | 2.41.0 |
0694690810f3d0530f22b3558384df715bb6e3e | Thu, 11 Apr 2024 21:54:42 -0700 | [PATCH 0103/1000] [dynamo] Support Tuple[int] args to autograd.Function (#123887) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123887 Approved by: https://github.com/anijain2305 ghstack dependencies: #123700, #123705, #123786, #123790, #123803, #123804, #123896 | diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py
index beca818570..afde52b9ed 100644
--- a/test/dynamo/test_autograd_function.py
+++ b/test/dynamo/test_autograd_function.py
@@ -924,6 +924,33 @@ class AutogradFunctionTests(torch._dynamo.test_case.TestCase):
foo(torch.randn(2))
foo(torch.randn(2, requires_grad=True))
+ def test_tuple_arg(self):
+ cnt = torch._dynamo.testing.CompileCounter()
+
+ class TupleArgFunc(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x, shape):
+ ctx.save_for_backward(torch.randn(shape))
+ return x + 1
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ (result,) = ctx.saved_tensors
+ return result, None
+
+ @torch.compile(backend=cnt, fullgraph=True)
+ def fn():
+ return TupleArgFunc.apply(x, shape)
+
+ shape = (10, 10)
+ x = torch.randn(shape, requires_grad=True)
+ out = fn()
+ out.sum().backward()
+ self.assertEqual(out, x + 1)
+ self.assertEqual(x.grad.shape, shape)
+ self.assertEqual(cnt.frame_count, 1)
+ self.assertEqual(cnt.op_count, 2)
+
@requires_cuda
@skipIfRocm
def test_triton_kernel_basic(self):
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 471fff7d8d..9a41d37b57 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -137,7 +137,7 @@ def validate_args_and_maybe_create_graph_inputs(
set_subgraph_inputs,
description,
):
- from . import AutogradFunctionContextVariable, EnumVariable
+ from . import AutogradFunctionContextVariable
from .builder import wrap_fx_proxy_cls
assert tracer.parent is not None
@@ -166,7 +166,7 @@ def validate_args_and_maybe_create_graph_inputs(
args.append(a)
continue
- if isinstance(a, (ConstantVariable, EnumVariable)):
+ if a.is_python_constant():
# This arg is not used in the body of the higher order op.
# Currently, this new input is added to make the calls
# happy, which expect a fixed number of arguments. In | 2.41.0 |
f0fc04fa39017761a72f46ae50571e2fd35d9da | Fri, 12 Apr 2024 19:18:16 +0000 | [PATCH 0104/1000] [CUDA][64-bit indexing] Bump large tensor threshold of `test_cross_entropy_large_tensor` to 70GiB (#123772) | `torch.cuda.max_memory_reserved()` here shows 68729962496 (about 65546 MiB). CC @malfet @crcrpar Pull Request resolved: https://github.com/pytorch/pytorch/pull/123772 Approved by: https://github.com/mikaylagawarecki | diff --git a/test/test_nn.py b/test/test_nn.py
index f4f5b80a3c..ae58f688c5 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -11737,10 +11737,10 @@ if __name__ == '__main__':
# i.e. we don't count the ignored_idx at all.
check_equal(loss, (inp1, targ_positive_ignore_index), (inp2[1:], targ_positive_ignore_index[1:]))
- # Ref: https://github.com/pytorch/pytorch/issue/85005
+ # Ref: https://github.com/pytorch/pytorch/issues/85005
@onlyCUDA
@largeTensorTest("45GB", "cpu")
- @largeTensorTest("45GB", "cuda")
+ @largeTensorTest("70GB", "cuda")
@parametrize_test("reduction", ("none", "mean", "sum"))
def test_cross_entropy_large_tensor(self, device, reduction):
logits = torch.randn(int(2 ** 16), int(2 ** 16) + 1, dtype=torch.float32, device='cuda', requires_grad=True) | 2.41.0 |
346ec8263ee0695b7706cf38e7fd4063ad69896 | Thu, 11 Apr 2024 09:31:29 -0700 | [PATCH 0105/1000] [BE] Document what is tested in TestOptim (#123853) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123853 Approved by: https://github.com/soulitzer | diff --git a/test/test_optim.py b/test/test_optim.py
index d11fe8d42f..7f4a352c48 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -42,6 +42,48 @@ def drosenbrock(tensor):
@markDynamoStrictTest
class TestOptimRenewed(TestCase):
+ """
+ This test class validates the core optimizers and is structured as the correctness of:
+ - The update algorithms (forloop implementation)
+ * Every optimizer's algorithm is most readably implemented through a big for-loop
+ over all the parameters, which is what we refer to as the forloop or single tensor
+ implementation. These algorithms are manually validated by comparing to the paper
+ and systematically validated by assuring that the loss goes the right direction
+ when the optimizer has been applied.
+ * This implementation should compose with optimizer hyperparameters well, such as
+ supporting Tensor LRs, the capturable API, and sparse and complex parameters.
+ - Each varying implementation
+ * We then have implementations that improve upon the performance of the forloop
+ implementation by leveraging fusion, namely our foreach (mult_tensor) and fused
+ implementations.
+ * These variations are validated numerically by comparing with the forloop version
+ of the optimizer. In fact, we test most variations this way--we see the forloop
+ implementation as the ground truth and expect that improvements to it in any way
+ should be just as correct.
+ * Both params and optimizer states should be validated numerically.
+ - state_dict APIs
+ * The optimizer instance should be serializable
+ * Calling save and load should be deterministic
+ * Moving between devices should be seamless
+ * BC - load_state_dict should be able to handle older optimizer states
+ - Hook APIs (everything should fire in the right order)
+ - LR Scheduler integration (composing should not error + should go the right direction)
+ - Parameter groups (should be equivalent to having multiple optimizers)
+ - Erroring (what should error should error)
+
+ We also cover different ways of generating parameters and grads:
+ - With parameters, we either generate them randomly given specific shapes or we take
+ them from a sample NN module.
+ * Variety is important here because NN modules have type Parameter and randomly
+ generated tensors have type Tensor.
+ * Parameters can be sparse for a subset of the optimizers (check out OptimizerInfo)
+ * Complex parameters should be handled using view_as_real
+ * Parameters can be spread across different devices and different dtypes for any
+ given optimizer
+ * Parameters can be contiguous and noncontiguous
+ - With grads, we follow suit from the parameters.
+ * Grads can also be None, empty, or zero-valued, and this should not disrupt training.
+ """
@onlyCPU
@optims(optim_db) | 2.41.0 |
62e19606e2555361d2cdeb64d914a4cab93d728 | Fri, 12 Apr 2024 08:52:55 -0700 | [PATCH 0106/1000] [quant] Enable backward for choose_qparams_per_token_asymmetric (#123452) | Summary: When running the backward for this op, we get the error: ``` RuntimeError: derivative for aten::aminmax is not implemented ``` This commit replaces this call with separate amin and amax calls instead, which do have implemented derivatives. Test Plan: python test/test_quantization.py -k test_decomposed_choose_qparams_per_token_asymmetric_backward Reviewers: jerryzh168, digantdesai Subscribers: jerryzh168, digantdesai, supriyar Differential Revision: [D55805170](https://our.internmc.facebook.com/intern/diff/D55805170) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123452 Approved by: https://github.com/digantdesai, https://github.com/jerryzh168, https://github.com/zou3519 | diff --git a/test/quantization/core/test_quantized_tensor.py b/test/quantization/core/test_quantized_tensor.py
index b2bd97bdc3..228f1f8ee7 100644
--- a/test/quantization/core/test_quantized_tensor.py
+++ b/test/quantization/core/test_quantized_tensor.py
@@ -1602,6 +1602,14 @@ class TestQuantizedTensor(TestCase):
self.assertEqual(quantized_X.int_repr(), quantized_decomposed_X)
self.assertEqual(dequantized_X, dequantized_decomposed_X)
+ def test_decomposed_choose_qparams_per_token_asymmetric_backward(self):
+ # register the ops
+ import torch.ao.quantization.fx._decomposed
+ x = torch.randn(2, 3).requires_grad_()
+ (s, zp) = torch.ops.quantized_decomposed.choose_qparams_per_token_asymmetric(x, torch.int8)
+ out = x.div(s).add(zp).round()
+ out.sum().backward()
+
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index 18dd61c37c..2ffe8f21ce 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -638,7 +638,6 @@ def choose_qparams_per_token_meta(
)
-# TODO: move this to https://github.com/pytorch/pytorch/blob/main/torch/ao/quantization/fx/_decomposed.py
quantized_decomposed_lib.define(
"choose_qparams_per_token_asymmetric(Tensor input, ScalarType dtype) -> (Tensor, Tensor)"
)
@@ -647,7 +646,7 @@ quantized_decomposed_lib.define(
@impl(
quantized_decomposed_lib,
"choose_qparams_per_token_asymmetric",
- "CompositeExplicitAutograd",
+ "CompositeImplicitAutograd",
)
def choose_qparams_per_token_asymmetric(
input: torch.Tensor,
@@ -667,7 +666,8 @@ def choose_qparams_per_token_asymmetric(
"""
# Based on https://github.com/google/XNNPACK/blob/df156f0cf3db5a4576cc711123eeb54915f82ffc/src/xnnpack/quantization.h#L18
qmin, qmax = -128, 127
- min_val, max_val = torch.aminmax(input, dim=-1, keepdim=True)
+ min_val = torch.amin(input, dim=-1, keepdim=True)
+ max_val = torch.amax(input, dim=-1, keepdim=True)
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
eps = torch.finfo(torch.float32).eps # use xnnpack eps?
@@ -691,21 +691,6 @@ def choose_qparams_per_token_asymmetric(
return scale.to(torch.float32), zero_point.to(torch.float32)
-@impl(
- quantized_decomposed_lib,
- "choose_qparams_per_token_asymmetric",
- "Meta",
-)
-def choose_qparams_per_token_asymmetric_meta(
- input: torch.Tensor,
- dtype: torch.dtype,
-) -> Tuple[torch.Tensor, torch.Tensor]:
- size = (1, input.size(-1))
- return torch.empty(size, dtype=torch.double, device=input.device), torch.empty(
- size, dtype=torch.int64, device=input.device
- )
-
-
def _per_token_quant_qparam_dim_check(input, scales, zero_points):
num_tokens = math.prod(list(input.size())[:-1])
assert ( | 2.41.0 |
9deff689fabc87941bf7a8ea5db987a01e125f8 | Fri, 12 Apr 2024 20:13:16 +0000 | [PATCH 0107/1000] Update compile doc to suggest Module.compile (#123951) | For users for whom fqn change is problematic Pull Request resolved: https://github.com/pytorch/pytorch/pull/123951 Approved by: https://github.com/msaroufim | diff --git a/torch/__init__.py b/torch/__init__.py
index 3a10130d5f..73e91dd316 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -1794,6 +1794,8 @@ def compile(model: Optional[Callable] = None, *,
disable: builtins.bool = False) -> Callable:
"""
Optimizes given model/function using TorchDynamo and specified backend.
+ If you are compiling an :class:`torch.nn.Module`, you can also use :meth:`torch.nn.Module.compile`
+ to compile the module inplace without changing its structure.
Concretely, for every frame executed within the compiled region, we will attempt
to compile it and cache the compiled result on the code object for future | 2.41.0 |
65aa5af6ed429be295c9ac57895186fc33dd4f8 | Fri, 12 Apr 2024 21:19:41 +0000 | [PATCH 0109/1000] [Pytorch] doc sync-stream-and-free-HBM counter in memory_stats (#123799) | Differential Revision: D56000503 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123799 Approved by: https://github.com/malfet | diff --git a/torch/cuda/memory.py b/torch/cuda/memory.py
index 60440c58dc..857a13a06c 100644
--- a/torch/cuda/memory.py
+++ b/torch/cuda/memory.py
@@ -210,6 +210,11 @@ def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
- ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
+ - ``"num_sync_all_streams"``: number of ``synchronize_and_free_events`` calls.
+ - ``"num_device_alloc"``: number of CUDA allocation calls. This includes both
+ cuMemMap and cudaMalloc.
+ - ``"num_device_free"``: number of CUDA free calls. This includes both cuMemUnmap
+ and cudaFree.
The caching allocator can be configured via ENV to not split blocks larger than a
defined size (see Memory Management section of the Cuda Semantics documentation). | 2.41.0 |
b11fb4695c61e04c7032e039f0a9f02636e0176 | Fri, 12 Apr 2024 18:24:20 +0000 | [PATCH 0111/1000] [Dynamo] fix opcode `YIELD_FROM` and `SEND` (#123912) | This PR is split from #120300. - #120300 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123912 Approved by: https://github.com/anijain2305 | diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index f64f33a5b9..e760cce7fd 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -8729,7 +8729,7 @@ def ___make_guard_fn():
return [t * k for t in yield_from_gen(t_list)]
- t_list = [torch.randn([2, 3])] * 3
+ t_list = [torch.randn([2, 3]) for _ in range(3)]
eager = yield_from_fn(t_list, 2)
counter = CompileCounter()
compiled = torch._dynamo.optimize(counter)(yield_from_fn)(t_list, 2)
@@ -8778,6 +8778,34 @@ def ___make_guard_fn():
self.assertEqual(eager, compiled)
self.assertEqual(counter.frame_count, 1)
+ def test_yield_from_user_stop_iteration(self):
+ class MyIter:
+ def __init__(self, seq):
+ self.seq = seq
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ self.index += 1
+ if self.index <= len(self.seq):
+ return self.seq[self.index - 1]
+ raise StopIteration(self.index)
+
+ def yield_from_iter_fn(seq):
+ def gen(seq):
+ yield from MyIter(seq)
+
+ return [i for i in gen(seq)]
+
+ seq = [torch.randn([2, 3]) for _ in range(3)]
+ eager = yield_from_iter_fn(seq)
+ counter = CompileCounter()
+ compiled = torch._dynamo.optimize(counter)(yield_from_iter_fn)(seq)
+ self.assertEqual(eager, compiled)
+ self.assertEqual(counter.frame_count, 0)
+
def test_yield_send_to_subgenerator_graph_break(self):
def subgenerator(tensor):
multiplier = yield
diff --git a/test/dynamo_expected_failures/TestFXAPIBackwardCompatibility.test_public_api_surface b/test/dynamo_expected_failures/TestFXAPIBackwardCompatibility.test_public_api_surface
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/test_fx.py b/test/test_fx.py
index 18f842d670..085fa33265 100644
--- a/test/test_fx.py
+++ b/test/test_fx.py
@@ -576,7 +576,6 @@ class TestFX(JitTestCase):
with self.assertRaisesRegex(AssertionError, "doesn't exist in"):
tracer.trace(f)
-
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
@@ -814,7 +813,6 @@ class TestFX(JitTestCase):
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
-
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
@@ -870,7 +868,6 @@ class TestFX(JitTestCase):
x = self.lin(x)
return x
-
ec = ExampleCode()
traced = torch.fx.symbolic_trace(ec)
@@ -878,7 +875,6 @@ class TestFX(JitTestCase):
x = torch.randn(bs, d_hid)
torch.testing.assert_close(ec(x), traced(x))
-
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
@@ -952,7 +948,6 @@ class TestFX(JitTestCase):
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
-
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
@@ -1486,7 +1481,6 @@ class TestFX(JitTestCase):
self.assertTrue(neg in relu.users)
-
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
@@ -1506,7 +1500,6 @@ class TestFX(JitTestCase):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
-
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
@@ -1801,7 +1794,6 @@ class TestFX(JitTestCase):
self.assertEqual(node.meta["stack_trace"], "stack_trace")
self.assertEqual(node.meta["source_fn_stack"], "source_fn_stack")
-
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
@@ -2174,7 +2166,6 @@ class TestFX(JitTestCase):
for node in to_erase:
rn18_traced.graph.erase_node(node)
-
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
@@ -2217,7 +2208,6 @@ class TestFX(JitTestCase):
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
-
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
@@ -2233,7 +2223,6 @@ class TestFX(JitTestCase):
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
-
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
@@ -2391,7 +2380,6 @@ class TestFX(JitTestCase):
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
-
def test_trace_return_dataclass(self):
"""
Test case for Module that return dataclass
@@ -2449,7 +2437,6 @@ class TestFX(JitTestCase):
self.assertEqual(module(x), gm(x))
-
def test_trace_return_namedtuple(self):
"""
Test case for Module that return namedtuple
@@ -2462,7 +2449,6 @@ class TestFX(JitTestCase):
def forward(self, d : torch.Tensor):
return MyOutput(foo=d, bar=d)
-
module = ModuleReturnNamedTuple()
traced_graph = symbolic_trace(module).graph
@@ -2748,7 +2734,6 @@ class TestFX(JitTestCase):
proc.join()
self.assertEqual(proc.exitcode, 0)
-
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
@@ -3597,7 +3582,7 @@ class TestFX(JitTestCase):
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if isinstance(x, PHBase) else x, inp)
- num_flat_args = len([i == PH for i in pytree.tree_leaves(inp)])
+ num_flat_args = len(pytree.tree_leaves(inp))
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
@@ -3867,7 +3852,6 @@ def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
m.graph.lint()
-
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
diff --git a/torch/_dynamo/exc.py b/torch/_dynamo/exc.py
index 712f8377bc..1e6805bf60 100644
--- a/torch/_dynamo/exc.py
+++ b/torch/_dynamo/exc.py
@@ -159,8 +159,16 @@ class UserError(Unsupported):
class UserStopIteration(TorchDynamoException):
- def __init__(self):
+ value: Optional[Any]
+
+ # Reference `StopIteration_init` in CPython
+ # https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L568-L584
+ def __init__(self, *args, **kwargs):
super().__init__("unhandled `raise StopIteration`")
+ if len(args) > 0:
+ self.value = args[0]
+ else:
+ self.value = None
class UncapturedHigherOrderOpError(TorchDynamoException):
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 2b6a525d16..ff97bfa973 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -1,4 +1,5 @@
import collections
+import collections.abc
import contextlib
import copy
import dataclasses
@@ -1179,7 +1180,7 @@ class InstructionTranslatorBase(
# Python 3.8 only
addr = self.indexof[self.next_instruction]
self.push(ConstantVariable.create(addr))
- self.instruction_pointer = self.indexof[inst.target]
+ self.jump(inst)
def END_FINALLY(self, inst):
# Python 3.8 only
@@ -2636,7 +2637,6 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
def YIELD_VALUE(self, inst: Instruction):
self.generated_items.append(self.pop())
- # TODO(jansel): figure out why this is needed, it isn't in the docs for YIELD_VALUE
self.push(ConstantVariable.create(None))
def GET_YIELD_FROM_ITER(self, inst):
@@ -2645,61 +2645,61 @@ class InliningGeneratorInstructionTranslator(InliningInstructionTranslator):
self.pop()
res = BuiltinVariable(iter).call_function(self, [tos], {})
self.push(res)
- return self.YIELD_FROM(inst)
def YIELD_FROM(self, inst):
- while True:
- tos = self.stack[-1].realize()
- if isinstance(tos, ConstantVariable) and tos.value is None:
- self.pop()
- return
- try:
- val = tos.next_variable(self)
-
- # TODO(anijain2305,jansel) - The last pop is because
- # YIELD_FROM. If we remove it from there, we don't need to
- # pop it here.
- self.push(val)
- self.YIELD_VALUE(inst)
- self.pop()
-
- # Pop the old iter and push the new iter
- self.pop()
- self.push(tos)
- except (StopIteration, exc.UserStopIteration):
- return
-
- def SEND(self, inst):
assert len(self.stack) >= 2
val = self.pop()
tos = self.stack[-1]
- if isinstance(tos, ListIteratorVariable):
- # We handle yield in a very differnt way than CPython does. Instead
- # of returning to the parent frame on a yield, TorchDynamo instead
- # just collects the generated_items and proceed to the next
- # instruction in the same frame. From bytecode tracing stanpoint,
- # this means that the iterator returned from the child funtion on
- # `yield from ...` will always be exhausted.
-
- # Therefore to implement SEND, we have to look at the implementation
- # when the iterator returns StopIteration. This translates to this code
- # 3.11 - https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2613-L2618
- # 3.12 - https://github.com/python/cpython/blob/3.12/Python/bytecodes.c#L863-L865
- # The implementation is different in 3.11 and 3.12. In 3.12, we rely
- # on END_SEND to clean up. In 3.11, SEND does the cleanup as well.
+ if not (isinstance(val, ConstantVariable) and val.value is None):
+ # invoke send
+ # Unreachable code - if you hit this, you are implementing generator support and have
+ # lifted the `unimplemented("generator")` in frame conversion. This codepath handles
+ # subgenerator and lines up with this line in Python 3.10
+ # https://github.com/python/cpython/blob/3.10/Python/ceval.c#L2599
+ unimplemented("Unreachable sub-generator code")
- if sys.version_info >= (3, 12):
- # Do not pop, we will rely on END_SEND to pop the iterator
- pass
- else:
- # Check that the iterator is exhausted. It should be because of
- # how we implement yields.
- assert tos.is_exhausted()
- self.pop()
+ try:
+ val = tos.next_variable(self)
+ except (StopIteration, exc.UserStopIteration) as ex:
+ # The iterator is exhausted. Stop the loop and return.
+ self.pop()
+ self.push(ConstantVariable.create(ex.value))
+ else:
+ self.push(val)
+ # Add the value to yield into generated_items and replace the top of the stack with None
+ self.YIELD_VALUE(inst)
+ # Repeat the YIELD_FROM instruction in the next eval loop
+ assert (
+ isinstance(self.instruction_pointer, int)
+ and self.instruction_pointer > 0
+ )
+ self.instruction_pointer -= 1
+
+ def SEND(self, inst):
+ assert len(self.stack) >= 2
+ val = self.pop()
+ tos = self.stack[-1]
+ if isinstance(tos, ListIteratorVariable) or (
+ isinstance(tos, UserDefinedObjectVariable)
+ and isinstance(tos.value, collections.abc.Iterator)
+ ):
if isinstance(val, ConstantVariable) and val.value is None:
- self.push(val)
- self.instruction_pointer = self.indexof[inst.target]
+ try:
+ val = tos.next_variable(self)
+ except (StopIteration, exc.UserStopIteration) as ex:
+ # To implement SEND, we have to look at the implementation
+ # when the iterator returns StopIteration. This translates to this code
+ # 3.11: https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2613-L2619
+ # 3.12: https://github.com/python/cpython/blob/3.12/Python/bytecodes.c#L863-L866
+ # The implementation is different in 3.11 and 3.12. In 3.12, we rely
+ # on END_SEND to clean up. In 3.11, SEND does the cleanup as well.
+ if sys.version_info < (3, 12):
+ self.pop() # Python 3.12 uses new opcode END_SEND
+ self.push(ConstantVariable.create(ex.value))
+ self.jump(inst)
+ else:
+ self.push(val)
else:
# invoke send
# Unreachable code - if you hit this, you are implementing generator support and have
diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py
index 1231eac8ca..a23ebe05b6 100644
--- a/torch/_dynamo/variables/lists.py
+++ b/torch/_dynamo/variables/lists.py
@@ -678,9 +678,6 @@ class ListIteratorVariable(VariableTracker):
]
)
- def is_exhausted(self):
- return self.index >= len(self.items)
-
class TupleIteratorVariable(ListIteratorVariable):
pass | 2.41.0 |
dde6a461f3fff53104344bf4f9c16dec0b3ff86 | Fri, 12 Apr 2024 22:31:57 +0000 | [PATCH 0113/1000] fix cpp path in torch/_C/_autograd.pyi (#123924) | The file `tools/autograd/init.cpp` does not exist, I think the right path is `torch/csrc/autograd/init.cpp`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123924 Approved by: https://github.com/Skylion007 | diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi
index 7f15c1cd12..92b21f96df 100644
--- a/torch/_C/_autograd.pyi
+++ b/torch/_C/_autograd.pyi
@@ -10,7 +10,7 @@ from ._profiler import (
ProfilerConfig,
)
-# Defined in tools/autograd/init.cpp
+# Defined in torch/csrc/autograd/init.cpp
class DeviceType(Enum):
CPU = ... | 2.41.0 |
e98bdd66d2b051a918e58d5f7bb80b366677bf8 | Fri, 12 Apr 2024 23:30:56 +0000 | [PATCH 0115/1000] [dynamo] Turn on CPP guard manager (#123547) | As title Pull Request resolved: https://github.com/pytorch/pytorch/pull/123547 Approved by: https://github.com/jansel | diff --git a/torch/_dynamo/config.py b/torch/_dynamo/config.py
index b94f523d14..e45aad8fa2 100644
--- a/torch/_dynamo/config.py
+++ b/torch/_dynamo/config.py
@@ -339,7 +339,7 @@ numpy_default_int = "int64"
use_numpy_random_stream = False
# Use C++ guard manager
-enable_cpp_guard_manager = os.environ.get("TORCHDYNAMO_CPP_GUARD_MANAGER", "0") == "1"
+enable_cpp_guard_manager = os.environ.get("TORCHDYNAMO_CPP_GUARD_MANAGER", "1") == "1"
# Inline inbuilt nn modules
inline_inbuilt_nn_modules = ( | 2.41.0 |
85cd117e64f51b998049e66aeac1728aadcf8b6 | Fri, 12 Apr 2024 23:33:11 +0000 | [PATCH 0116/1000] [nccl-pg] print broadcast ncclunique id duration (#123963) | Summary: Print NCCL PG broadcast nccl unique id duration for measurement. Differential Revision: D56048059 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123963 Approved by: https://github.com/wconstab | diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index bf34066101..66e5e00504 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1945,7 +1945,16 @@ std::shared_ptr<NCCLComm> ProcessGroupNCCL::getNCCLComm(
// For point-to-point communication on the same process, don't need broadcast.
if (!isSendRecvSelf) {
// Broadcast so that each process can have a unique NCCL ID
+ auto timeStarted = std::chrono::steady_clock::now();
broadcastUniqueNCCLID(&ncclID, singleP2POp, deviceKey, p2pRank);
+ auto timerDeltaMs =
+ std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::chrono::steady_clock::now() - timeStarted)
+ .count() *
+ 1000;
+ LOG(INFO) << logPrefix()
+ << "ProcessGroupNCCL broadcast unique ID through store took "
+ << timerDeltaMs << " ms";
}
at::cuda::OptionalCUDAGuard gpuGuard; | 2.41.0 |
61eb39348117221e284538f0ec0e95d2024b4e9 | Fri, 12 Apr 2024 13:49:02 -0700 | [PATCH 0117/1000] AOT logging: log fw_metadata with each graph (#118646) | Log fw_metadata for each AOT graph. This is helpful for seeing information about subclass graph inputs/outputs/tangents, and lots of other stuff Pull Request resolved: https://github.com/pytorch/pytorch/pull/118646 Approved by: https://github.com/tugsbayasgalan, https://github.com/ezyang ghstack dependencies: #118645 | diff --git a/test/dynamo/test_logging.py b/test/dynamo/test_logging.py
index 21caf7b975..f2f507825a 100644
--- a/test/dynamo/test_logging.py
+++ b/test/dynamo/test_logging.py
@@ -85,7 +85,7 @@ def single_record_test(**kwargs):
class LoggingTests(LoggingTestCase):
test_bytecode = multi_record_test(2, bytecode=True)
test_output_code = multi_record_test(2, output_code=True)
- test_aot_graphs = multi_record_test(2, aot_graphs=True)
+ test_aot_graphs = multi_record_test(3, aot_graphs=True)
@requires_cuda
@make_logging_test(schedule=True)
diff --git a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
index abe6b009d7..de54569571 100644
--- a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
+++ b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
@@ -98,6 +98,12 @@ def aot_dispatch_base_graph(
meta=fw_metadata,
fw_only=flat_fn,
)
+ aot_graphs_log.debug(
+ "aot_config id: %s, fw_metadata=%s,subclass_metadata=%s",
+ str(aot_config.aot_id),
+ str(fw_metadata),
+ str(maybe_subclass_meta),
+ )
# We track buffer assignments when exporting in non-strict mode.
# (In contrast, strict mode errors on any attribute assignment.)
@@ -240,6 +246,12 @@ def aot_dispatch_autograd_graph(
joint_fn_to_trace = subclass_tracing_info.plain_tensor_trace_fn
updated_joint_inputs = subclass_tracing_info.plain_tensor_args
maybe_subclass_meta = subclass_tracing_info.maybe_subclass_meta
+ aot_graphs_log.debug(
+ "aot_config id: %s, fw_metadata=%s,subclass_metadata=%s",
+ str(aot_config.aot_id),
+ str(fw_metadata),
+ str(maybe_subclass_meta),
+ )
fx_g = _create_graph(joint_fn_to_trace, updated_joint_inputs, aot_config=aot_config)
diff --git a/torch/_functorch/_aot_autograd/schemas.py b/torch/_functorch/_aot_autograd/schemas.py
index e0e8b88398..dd4a6f9684 100644
--- a/torch/_functorch/_aot_autograd/schemas.py
+++ b/torch/_functorch/_aot_autograd/schemas.py
@@ -504,7 +504,7 @@ class SubclassMeta:
# in case we made incorrect assumptions about the subclass-ness of our grad_outputs
#
# Optional field because we don't compute for inference graphs
- grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]]
+ grad_input_metas: Optional[List[Union[int, SubclassCreationMeta]]] = None
def __init__(self):
# The fields in this class get set after its construction. | 2.41.0 |
d6c5972c12e4742b2f2b2776f0b3421c0e1c678 | Fri, 12 Apr 2024 23:54:11 +0000 | [PATCH 0118/1000] [BE]: Optimize min/max/sum comprehensions C419 (#123960) | Automatic fixes that replaces certain list comprehensions with generator ones where appropriate so that they are immediately consumed. This is preview functionality in ruff for rule C419 and it was automatically applied. Co-authored-by: Nikita Shulga <[email protected]> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123960 Approved by: https://github.com/malfet | diff --git a/benchmarks/gpt_fast/benchmark.py b/benchmarks/gpt_fast/benchmark.py
index 0bbbbcb8f7..14d7477449 100644
--- a/benchmarks/gpt_fast/benchmark.py
+++ b/benchmarks/gpt_fast/benchmark.py
@@ -194,10 +194,8 @@ def run_experiment(
torch.manual_seed(1234)
model_size = sum(
- [
- p.numel() * p.dtype.itemsize
- for p in itertools.chain(model.parameters(), model.buffers())
- ]
+ p.numel() * p.dtype.itemsize
+ for p in itertools.chain(model.parameters(), model.buffers())
)
aggregate_metrics = {"tokens_per_sec": []}
diff --git a/benchmarks/tensorexpr/rnn_eltwise.py b/benchmarks/tensorexpr/rnn_eltwise.py
index 39cffe0bea..a923af9505 100644
--- a/benchmarks/tensorexpr/rnn_eltwise.py
+++ b/benchmarks/tensorexpr/rnn_eltwise.py
@@ -57,7 +57,7 @@ class RNNEltwise(benchmark.Benchmark):
def memsize(t):
return t.numel() * t.element_size()
- input_size = sum([memsize(t) for t in self.inputs])
+ input_size = sum(memsize(t) for t in self.inputs)
output_size = 2 * memsize(self.cx)
io_size = input_size + output_size
return {"sol": io_size, "algorithmic": io_size}
diff --git a/scripts/compile_tests/failures_histogram.py b/scripts/compile_tests/failures_histogram.py
index c0b2787bd2..6e65888a1c 100644
--- a/scripts/compile_tests/failures_histogram.py
+++ b/scripts/compile_tests/failures_histogram.py
@@ -97,7 +97,7 @@ def failures_histogram(eager_dir, dynamo_dir, verbose=False, format_issues=False
else "(num_failed_tests, error_msg, sample_test)"
)
print(header)
- sum_counts = sum([r[0] for r in result])
+ sum_counts = sum(r[0] for r in result)
for row in result:
if format_issues:
print(as_issue(*row))
diff --git a/test/distributed/optim/test_zero_redundancy_optimizer.py b/test/distributed/optim/test_zero_redundancy_optimizer.py
index bf79d2bdc5..b84d96cb0f 100644
--- a/test/distributed/optim/test_zero_redundancy_optimizer.py
+++ b/test/distributed/optim/test_zero_redundancy_optimizer.py
@@ -530,7 +530,7 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
params.append(torch.rand(size, 1))
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(
- sum([x.numel() for x in o.optim.param_groups[0]["params"]]),
+ sum(x.numel() for x in o.optim.param_groups[0]["params"]),
sum(sizes),
)
@@ -567,7 +567,7 @@ class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
# all partitions have the same elements
self.assertEqual(len(o.param_groups), 2)
self.assertEqual(
- sum([x.numel() for g in o.optim.param_groups for x in g["params"]]),
+ sum(x.numel() for g in o.optim.param_groups for x in g["params"]),
sum(sizes),
)
self.assertEqual(len(o.optim.param_groups), 2)
diff --git a/test/distributed/pipeline/sync/test_transparency.py b/test/distributed/pipeline/sync/test_transparency.py
index e9a312745b..88456b407d 100644
--- a/test/distributed/pipeline/sync/test_transparency.py
+++ b/test/distributed/pipeline/sync/test_transparency.py
@@ -15,7 +15,7 @@ from torch.testing._internal.common_utils import run_tests
def test_simple_linears(setup_rpc):
def sum_grad(parameters):
- return sum([p.grad.sum() for p in parameters if p.grad is not None])
+ return sum(p.grad.sum() for p in parameters if p.grad is not None)
def zero_grad(parameters):
for p in parameters:
diff --git a/test/functorch/discover_coverage.py b/test/functorch/discover_coverage.py
index 4e21be4cbe..80cacddec6 100644
--- a/test/functorch/discover_coverage.py
+++ b/test/functorch/discover_coverage.py
@@ -254,11 +254,11 @@ def get_ops_percentage(torch_threshold, nn_fn_threshold):
# get all operators that are not in the denylist
all_ops = get_top_ops(999999, 999999)
- total_op_usages = sum([get_num_usages(op) for op in all_ops])
+ total_op_usages = sum(get_num_usages(op) for op in all_ops)
# get subset of all operators
subset_ops = get_top_ops(torch_threshold, nn_fn_threshold)
- subset_op_usages = sum([get_num_usages(op) for op in subset_ops])
+ subset_op_usages = sum(get_num_usages(op) for op in subset_ops)
return subset_op_usages / total_op_usages
diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py
index e0078970c6..5269fc47f3 100644
--- a/test/functorch/test_ops.py
+++ b/test/functorch/test_ops.py
@@ -467,7 +467,7 @@ class TestOperators(TestCase):
# Reduce into single value for grad
if isinstance(result, torch.Tensor):
return abs_if_complex(result.sum())
- result = sum([abs_if_complex(res.sum()) for res in result])
+ result = sum(abs_if_complex(res.sum()) for res in result)
return result
result = grad(wrapped_fn, diff_argnums)(*args, **kwargs)
diff --git a/test/inductor/test_fx_fusion.py b/test/inductor/test_fx_fusion.py
index 25e256a785..ba65a5b3a7 100644
--- a/test/inductor/test_fx_fusion.py
+++ b/test/inductor/test_fx_fusion.py
@@ -30,7 +30,7 @@ def chain_passes(*passes: PassFunc) -> PassFunc:
def count_call(module: torch.fx.GraphModule, op: str, target_op: Any) -> int:
return sum(
- [1 if (n.op == op and n.target == target_op) else 0 for n in module.graph.nodes]
+ 1 if (n.op == op and n.target == target_op) else 0 for n in module.graph.nodes
)
diff --git a/test/nn/test_pruning.py b/test/nn/test_pruning.py
index 931e196eba..fa268ed311 100644
--- a/test/nn/test_pruning.py
+++ b/test/nn/test_pruning.py
@@ -892,11 +892,11 @@ class TestPruningNN(NNTestCase):
# Pruning one of them causes one of the weights to become a tensor
prune.l1_unstructured(l, "weight_ih_l0", 0.5)
- assert sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]) == 3
+ assert sum(isinstance(p, torch.nn.Parameter) for p in l._flat_weights) == 3
# Removing the pruning reparametrization restores the Parameter
prune.remove(l, "weight_ih_l0")
- assert sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]) == 4
+ assert sum(isinstance(p, torch.nn.Parameter) for p in l._flat_weights) == 4
# Make sure that, upon removal of the reparametrization, the
# `._parameters` and `.named_parameters` contain the right params.
diff --git a/test/onnx/test_fx_op_consistency.py b/test/onnx/test_fx_op_consistency.py
index af47dfe818..9a1160939a 100644
--- a/test/onnx/test_fx_op_consistency.py
+++ b/test/onnx/test_fx_op_consistency.py
@@ -1443,7 +1443,7 @@ SKIP_XFAIL_SUBTESTS_WITH_MATCHER_AND_MODEL_TYPE: tuple[
),
skip(
"linalg.multi_dot",
- matcher=lambda sample: sum([torch.numel(input) for input in sample.input]) == 0,
+ matcher=lambda sample: sum(torch.numel(input) for input in sample.input) == 0,
reason="fixme: Undefined",
),
skip(
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index fb2c247917..66ebe31781 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -3016,10 +3016,9 @@ class TestExperimentalUtils(TestCase):
for event_key, event_metrics in metrics.items():
self.assertEqual(
event_metrics.self_time_ns,
- event_key.event.duration_time_ns - sum([
+ event_key.event.duration_time_ns - sum(
child.duration_time_ns
- for child in event_key.event.children
- ]))
+ for child in event_key.event.children))
def test_utils_intervals_overlap(self):
event = _utils.EventKey(MockProfilerEvent("Event 1", 1, 5, 5))
diff --git a/test/quantization/fx/test_model_report_fx.py b/test/quantization/fx/test_model_report_fx.py
index c7f4fe595d..ee64bc3c79 100644
--- a/test/quantization/fx/test_model_report_fx.py
+++ b/test/quantization/fx/test_model_report_fx.py
@@ -1346,7 +1346,7 @@ class TestFxDetectInputWeightEqualization(QuantizationTestCase):
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
- is_in_include_list = sum([isinstance(module, x) for x in mods_to_check]) > 0
+ is_in_include_list = sum(isinstance(module, x) for x in mods_to_check) > 0
if is_in_include_list:
# make sure it has the observer attribute
@@ -1563,7 +1563,7 @@ class TestFxDetectOutliers(QuantizationTestCase):
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
number_of_obs_found = sum(
- [1 if obs_name_to_find in str(node.target) else 0 for node in prepared_for_callibrate_model.graph.nodes]
+ 1 if obs_name_to_find in str(node.target) else 0 for node in prepared_for_callibrate_model.graph.nodes
)
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
@@ -1753,7 +1753,7 @@ class TestFxDetectOutliers(QuantizationTestCase):
assert sum(counts_info) >= 2
# half of the recorded max values should be what we set
- matched_max = sum([val == 3.28e8 for val in module_dict[OutlierDetector.MAX_VALS_KEY]])
+ matched_max = sum(val == 3.28e8 for val in module_dict[OutlierDetector.MAX_VALS_KEY])
self.assertEqual(matched_max, param_size / 2)
diff --git a/test/test_bundled_inputs.py b/test/test_bundled_inputs.py
index db3c8df9b8..1bf938506f 100644
--- a/test/test_bundled_inputs.py
+++ b/test/test_bundled_inputs.py
@@ -435,7 +435,7 @@ class TestBundledInputs(TestCase):
# two args which have InflatableArg with fmt_fn
# 1 * 2 * 2 = 4
self.assertEqual(
- sum([method.startswith("_inflate_helper") for method in methods]), 4
+ sum(method.startswith("_inflate_helper") for method in methods), 4
)
diff --git a/test/test_flop_counter.py b/test/test_flop_counter.py
index 1a9a757f9f..cd95f2cf55 100644
--- a/test/test_flop_counter.py
+++ b/test/test_flop_counter.py
@@ -21,7 +21,7 @@ def FlopCounterMode(*args, **kwargs):
return torch.utils.flop_counter.FlopCounterMode(*args, **kwargs, display=False)
def get_total_flops(mode):
- return str(sum([v for _, v in mode.flop_counts["Global"].items()]))
+ return str(sum(v for _, v in mode.flop_counts["Global"].items()))
def T(*shape, requires_grad=False):
return torch.randn(*shape, requires_grad=requires_grad)
diff --git a/test/test_foreach.py b/test/test_foreach.py
index 359a07c223..19d695762c 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -334,9 +334,7 @@ class TestForeach(TestCase):
[rhs_arg, tensors], is_cuda=False, expect_fastpath=False
)
).mean().backward()
- sum(
- [ref.func(ref_rhs_arg, t) for t in ref_tensors]
- ).mean().backward()
+ sum(ref.func(ref_rhs_arg, t) for t in ref_tensors).mean().backward()
self.assertEqual(
[t.grad for t in tensors], [t.grad for t in ref_tensors]
)
diff --git a/test/test_fx.py b/test/test_fx.py
index 085fa33265..0b402e8671 100644
--- a/test/test_fx.py
+++ b/test/test_fx.py
@@ -3593,17 +3593,17 @@ class TestFX(JitTestCase):
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
- assert sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args
+ assert sum(i.op == 'placeholder' for i in nf.graph.nodes) == num_flat_args
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
- assert sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1
+ assert sum(i.op == 'placeholder' for i in nf.graph.nodes) == 1
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
- assert sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args
+ assert sum(i.op == 'placeholder' for i in nf.graph.nodes) == num_flat_args
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
diff --git a/test/test_nn.py b/test/test_nn.py
index ae58f688c5..e040afe3c6 100644
--- a/test/test_nn.py
+++ b/test/test_nn.py
@@ -1699,14 +1699,14 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""")
# Applying weight norm on one of them causes it to become a tensor
l = torch.nn.utils.weight_norm(l, name=name)
self.assertEqual(
- sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
+ sum(isinstance(p, torch.nn.Parameter) for p in l._flat_weights),
num_params - 1,
)
# Removing the weight norm reparametrization restores the Parameter
l = torch.nn.utils.remove_weight_norm(l, name=name)
self.assertEqual(
- sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),
+ sum(isinstance(p, torch.nn.Parameter) for p in l._flat_weights),
num_params,
)
diff --git a/test/test_optim.py b/test/test_optim.py
index 7f4a352c48..09da6e706f 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -348,7 +348,7 @@ class TestOptimRenewed(TestCase):
solution = torch.tensor([1, 1])
with torch.no_grad():
- initial_dist = sum([param.dist(solution) for param in params])
+ initial_dist = sum(param.dist(solution) for param in params)
def get_grad(param, sparse_grad, w):
grad = drosenbrock(param)
@@ -410,13 +410,13 @@ class TestOptimRenewed(TestCase):
if not kwargs.get("maximize", False):
self.assertLessEqual(
- sum([param.dist(solution) for param in params]),
+ sum(param.dist(solution) for param in params),
initial_dist
)
else:
self.assertGreaterEqual(
- sum([rosenbrock(param) for param in params]),
- sum([rosenbrock(param_t) for param_t in params_t]),
+ sum(rosenbrock(param) for param in params),
+ sum(rosenbrock(param_t) for param_t in params_t),
)
diff --git a/test/test_transformers.py b/test/test_transformers.py
index af32521fc5..e71c120876 100644
--- a/test/test_transformers.py
+++ b/test/test_transformers.py
@@ -2223,10 +2223,10 @@ class TestSDPACudaOnly(NNTestCase):
attn_mask_strides = (14, 14, 14, 1)
# Calculate the number of elements needed for each tensor
- query_num_elements = max([size * stride for size, stride in zip(query_size, query_strides)])
- key_num_elements = max([size * stride for size, stride in zip(key_size, key_strides)])
- value_num_elements = max([size * stride for size, stride in zip(value_size, value_strides)])
- attention_mask_num_elements = max([size * stride for size, stride in zip(attention_mask_size, attn_mask_strides)])
+ query_num_elements = max(size * stride for size, stride in zip(query_size, query_strides))
+ key_num_elements = max(size * stride for size, stride in zip(key_size, key_strides))
+ value_num_elements = max(size * stride for size, stride in zip(value_size, value_strides))
+ attention_mask_num_elements = max(size * stride for size, stride in zip(attention_mask_size, attn_mask_strides))
# Create the tensors with the specified sizes and strides
query = torch.randn(query_num_elements, device=device).as_strided(query_size, query_strides)
diff --git a/torch/_dynamo/bytecode_analysis.py b/torch/_dynamo/bytecode_analysis.py
index 581a8e1bf9..092b20491d 100644
--- a/torch/_dynamo/bytecode_analysis.py
+++ b/torch/_dynamo/bytecode_analysis.py
@@ -244,8 +244,8 @@ def stacksize_analysis(instructions) -> Union[int, float]:
stack_size = stack_sizes[inst]
print(stack_size.low, stack_size.high, inst)
- low = min([x.low for x in stack_sizes.values()])
- high = max([x.high for x in stack_sizes.values()])
+ low = min(x.low for x in stack_sizes.values())
+ high = max(x.high for x in stack_sizes.values())
assert fixed_point.value, "failed to reach fixed point"
assert low >= 0
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index 12b545d4d1..c46304369b 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -105,7 +105,7 @@ def reduce_to_scalar_loss(out):
# Mean does not work on integer tensors
return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
- return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
+ return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
elif type(out).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
@@ -115,7 +115,7 @@ def reduce_to_scalar_loss(out):
elif type(out).__name__ == "SquashedNormal":
return out.mean.sum()
elif isinstance(out, dict):
- return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
+ return sum(reduce_to_scalar_loss(value) for value in out.values()) / len(
out.keys()
)
raise NotImplementedError("Don't know how to reduce", type(out))
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index 25ae60d361..9f1f3aa3a7 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -1594,7 +1594,7 @@ class CompileProfiler:
def recompilation_report():
if len(gf):
- max_recompiles = max([num_recompiles(code) for code in gf])
+ max_recompiles = max(num_recompiles(code) for code in gf)
recomp_table = tabulate(
summarized_gf,
headers=["Function", "Recompiles", "Recompile Reasons"],
diff --git a/torch/_functorch/partitioners.py b/torch/_functorch/partitioners.py
index bb81624664..fd7fba3e8f 100644
--- a/torch/_functorch/partitioners.py
+++ b/torch/_functorch/partitioners.py
@@ -1296,7 +1296,7 @@ def min_cut_rematerialization_partition(
storages = {get_node_storage(node) for node in saved_values}
print(
"Theoretical Activations Stored: ",
- sum([_size_of(i) for i in saved_values]) / 1e9,
+ sum(_size_of(i) for i in saved_values) / 1e9,
)
sorted_sizes = sorted([(_size_of(i), str(i)) for i in saved_values])
fw_module_nodes = {
diff --git a/torch/_inductor/comms.py b/torch/_inductor/comms.py
index b14051a44f..02eb21a4e7 100644
--- a/torch/_inductor/comms.py
+++ b/torch/_inductor/comms.py
@@ -215,10 +215,8 @@ def reorder_compute_for_overlap(
assert_no_comm_nodes(needed_by_next_comm_and_ready_compute_nodes)
total_compute_runtime_cost = rolled_over_compute_cost + sum(
- [
- estimate_op_runtime(node)
- for node in needed_by_next_comm_and_ready_compute_nodes
- ]
+ estimate_op_runtime(node)
+ for node in needed_by_next_comm_and_ready_compute_nodes
)
prev_comm_runtime_cost = estimate_op_runtime(comm_nodes[idx - 1])
schedule_nodes(tuple_sorted(needed_by_next_comm_and_ready_compute_nodes))
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index bdb7867acf..4fdd2c8c38 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -3642,16 +3642,12 @@ def max_pool2d_with_indices_backward(
new_size = list(x.get_size())
h_window_size = max(
- [
- max(h // stride[0] - max(0, (h - kernel_size[0]) // stride[0]), 1)
- for h in range(kernel_size[0] * 2)
- ]
+ max(h // stride[0] - max(0, (h - kernel_size[0]) // stride[0]), 1)
+ for h in range(kernel_size[0] * 2)
)
w_window_size = max(
- [
- max(w // stride[1] - max(0, (w - kernel_size[1]) // stride[1]), 1)
- for w in range(kernel_size[1] * 2)
- ]
+ max(w // stride[1] - max(0, (w - kernel_size[1]) // stride[1]), 1)
+ for w in range(kernel_size[1] * 2)
)
window_size = h_window_size * w_window_size
@@ -4353,16 +4349,12 @@ def avg_pool2d_backward(
dtype = x.get_dtype()
h_window_size = max(
- [
- max(h // stride[0] - max(0, (h - kernel_size[0]) // stride[0]), 1)
- for h in range(kernel_size[0] * 2)
- ]
+ max(h // stride[0] - max(0, (h - kernel_size[0]) // stride[0]), 1)
+ for h in range(kernel_size[0] * 2)
)
w_window_size = max(
- [
- max(w // stride[1] - max(0, (w - kernel_size[1]) // stride[1]), 1)
- for w in range(kernel_size[1] * 2)
- ]
+ max(w // stride[1] - max(0, (w - kernel_size[1]) // stride[1]), 1)
+ for w in range(kernel_size[1] * 2)
)
window_size = h_window_size * w_window_size
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 61b0da467c..f37580a59b 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -538,7 +538,7 @@ class BaseSchedulerNode:
node_bytes = 0
for buf_name in reads | writes:
- buf_accessed_elems = sum([node_numel for dep in buf_accesses[buf_name]])
+ buf_accessed_elems = sum(node_numel for dep in buf_accesses[buf_name])
buf: Union[ir.Buffer, ir.TensorBox]
if buf_name in V.graph.name_to_buffer:
buf = V.graph.name_to_buffer[buf_name]
@@ -868,8 +868,8 @@ class FusedSchedulerNode(BaseSchedulerNode):
for dep in set.union(*[x.unmet_dependencies for x in snodes])
if dep.name not in self.get_names()
} - self.read_writes.writes
- self.min_order = min([x.min_order for x in self.snodes])
- self.max_order = max([x.max_order for x in self.snodes])
+ self.min_order = min(x.min_order for x in self.snodes)
+ self.max_order = max(x.max_order for x in self.snodes)
@cache_on_self
def get_name(self) -> str:
diff --git a/torch/amp/grad_scaler.py b/torch/amp/grad_scaler.py
index 140ec67162..f2fae37142 100644
--- a/torch/amp/grad_scaler.py
+++ b/torch/amp/grad_scaler.py
@@ -426,10 +426,8 @@ class GradScaler:
found_inf = cast(
torch.Tensor,
sum(
- [
- t.to(scaler.device, non_blocking=True)
- for t in optimizer_state["found_inf_per_device"].values()
- ]
+ t.to(scaler.device, non_blocking=True)
+ for t in optimizer_state["found_inf_per_device"].values()
),
)
optimizer.grad_scale = ( # type: ignore[attr-defined]
diff --git a/torch/ao/quantization/fx/_model_report/detector.py b/torch/ao/quantization/fx/_model_report/detector.py
index 71986fd17f..ce9a227516 100644
--- a/torch/ao/quantization/fx/_model_report/detector.py
+++ b/torch/ao/quantization/fx/_model_report/detector.py
@@ -292,7 +292,7 @@ class PerChannelDetector(DetectorBase):
# get the fully qualified name and check if in list of modules to include and list of modules to ignore
for fqn, module in model.named_modules():
- is_in_include_list = sum([isinstance(module, x) for x in self.supported_modules]) > 0
+ is_in_include_list = any(isinstance(module, x) for x in self.supported_modules)
# check if the module per_channel is supported
# based on backend
@@ -515,10 +515,10 @@ class DynamicStaticDetector(DetectorBase):
Returns True if the module is supported by observer, False otherwise
"""
# check to see if module is of a supported type
- is_supported_type = sum([isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED]) > 0
+ is_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED)
# check if it will be supported
- future_supported_type = sum([isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED]) > 0
+ future_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED)
# supported
supported = is_supported_type or future_supported_type
@@ -576,7 +576,7 @@ class DynamicStaticDetector(DetectorBase):
post_obs_dist_classif = self.STATIONARY_STR if post_stat > self.tolerance else self.NON_STATIONARY_STR
# check if current support or future support
- is_supported_type = sum([isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED]) > 0
+ is_supported_type = any(isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED)
# store the set of important information for this module
module_info = {
@@ -789,7 +789,7 @@ class InputWeightEqualizationDetector(DetectorBase):
Returns True if the module is supported by observer, False otherwise
"""
# check to see if module is of a supported type
- is_supported_type = sum([type(module) is x for x in self.SUPPORTED_MODULES]) > 0
+ is_supported_type = any(type(module) is x for x in self.SUPPORTED_MODULES)
# this is check for observer insertion
if insert:
diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py
index bbe40f032f..aa3b27a72b 100644
--- a/torch/autograd/profiler_util.py
+++ b/torch/autograd/profiler_util.py
@@ -164,7 +164,7 @@ class EventList(list):
@property
def self_cpu_time_total(self):
- return sum([event.self_cpu_time_total for event in self])
+ return sum(event.self_cpu_time_total for event in self)
def table(
self,
@@ -526,7 +526,7 @@ class FunctionEvent(FormattedTimesMixin):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_memory_usage - sum(
- [child.cpu_memory_usage for child in self.cpu_children]
+ child.cpu_memory_usage for child in self.cpu_children
)
@property
@@ -534,7 +534,7 @@ class FunctionEvent(FormattedTimesMixin):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cuda_memory_usage - sum(
- [child.cuda_memory_usage for child in self.cpu_children]
+ child.cuda_memory_usage for child in self.cpu_children
)
@property
@@ -542,7 +542,7 @@ class FunctionEvent(FormattedTimesMixin):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.privateuse1_memory_usage - sum(
- [child.privateuse1_memory_usage for child in self.cpu_children]
+ child.privateuse1_memory_usage for child in self.cpu_children
)
@property
@@ -550,7 +550,7 @@ class FunctionEvent(FormattedTimesMixin):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_time_total - sum(
- [child.cpu_time_total for child in self.cpu_children]
+ child.cpu_time_total for child in self.cpu_children
)
@property
@@ -576,7 +576,7 @@ class FunctionEvent(FormattedTimesMixin):
return 0
if self.device_type == DeviceType.CPU:
return self.cuda_time_total - sum(
- [child.cuda_time_total for child in self.cpu_children]
+ child.cuda_time_total for child in self.cpu_children
)
else:
assert self.device_type == DeviceType.CUDA
@@ -595,7 +595,7 @@ class FunctionEvent(FormattedTimesMixin):
return 0
if self.device_type == DeviceType.CPU:
return self.privateuse1_time_total - sum(
- [child.privateuse1_time_total for child in self.cpu_children]
+ child.privateuse1_time_total for child in self.cpu_children
)
else:
assert self.device_type == DeviceType.PrivateUse1
@@ -889,11 +889,11 @@ def _build_table(
with_flops=with_flops,
)
- name_column_width = max([len(evt.key) for evt in events]) + 4
+ name_column_width = max(len(evt.key) for evt in events) + 4
if max_name_column_width is not None:
name_column_width = min(name_column_width, max_name_column_width)
- shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4
+ shapes_column_width = max(len(str(evt.input_shapes)) for evt in events) + 4
if max_shapes_column_width is not None:
shapes_column_width = min(shapes_column_width, max_shapes_column_width)
@@ -908,7 +908,7 @@ def _build_table(
has_stack = len(stacks) > 0
if has_stack:
src_column_width = (
- max([max([len(entry) for entry in stack]) for stack in stacks]) + 4
+ max(max(len(entry) for entry in stack) for stack in stacks) + 4
)
if max_src_column_width is not None:
src_column_width = min(src_column_width, max_src_column_width)
@@ -1033,7 +1033,7 @@ def _build_table(
result.append(s)
result.append("\n") # Yes, newline after the end as well
- sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events])
+ sum_self_cpu_time_total = sum(event.self_cpu_time_total for event in events)
sum_self_cuda_time_total = 0
sum_self_privateuse1_time_total = 0
for evt in events:
diff --git a/torch/backends/xeon/run_cpu.py b/torch/backends/xeon/run_cpu.py
index 6b54eed299..1d5ec99bf6 100644
--- a/torch/backends/xeon/run_cpu.py
+++ b/torch/backends/xeon/run_cpu.py
@@ -178,7 +178,7 @@ class _CPUinfo:
# physical cores := core column in lscpu output
# logical cores := cPU column in lscpu output
- self.node_nums = int(max([line[3] for line in self.cpuinfo])) + 1
+ self.node_nums = int(max(line[3] for line in self.cpuinfo)) + 1
self.node_physical_cores: List[List[int]] = [] # node_id is index
self.node_logical_cores: List[List[int]] = [] # node_id is index
self.physical_core_node_map = {} # physical core to numa node id
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
index 2472700587..c421fa327d 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
@@ -209,10 +209,8 @@ def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg
for placement in weight._sharding_spec.placements:
dim_size = output_split_sizes[placement.rank()]
start = sum(
- [
- split_size if i < placement.rank() else 0
- for i, split_size in enumerate(output_split_sizes)
- ]
+ split_size if i < placement.rank() else 0
+ for i, split_size in enumerate(output_split_sizes)
)
indices += list(range(start, start + dim_size))
diff --git a/torch/distributed/_tensor/op_schema.py b/torch/distributed/_tensor/op_schema.py
index f316003eed..89f113c6fc 100644
--- a/torch/distributed/_tensor/op_schema.py
+++ b/torch/distributed/_tensor/op_schema.py
@@ -136,7 +136,7 @@ class OpStrategy(StrategyType):
"""
Returns the max number of shards across all placement strategies
"""
- return max([strategy.output_spec.num_shards for strategy in self.strategies])
+ return max(strategy.output_spec.num_shards for strategy in self.strategies)
@property
def output_mesh_shape(self):
diff --git a/torch/fx/experimental/accelerator_partitioner.py b/torch/fx/experimental/accelerator_partitioner.py
index c2caf933fd..7bb91692b3 100644
--- a/torch/fx/experimental/accelerator_partitioner.py
+++ b/torch/fx/experimental/accelerator_partitioner.py
@@ -339,7 +339,7 @@ class Partitioner:
self.find_single_partition(
total_size_of_graph, logical_device_id=device_with_max_mem.logical_id
)
- elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
+ elif total_size_of_graph > sum(d.available_mem_bytes for d in self.devices):
raise RuntimeError("Devices have no enough memory for the module")
else:
# Sparse nn based partition
diff --git a/torch/jit/_script.py b/torch/jit/_script.py
index 2d087bcdd5..e9e4c404c5 100644
--- a/torch/jit/_script.py
+++ b/torch/jit/_script.py
@@ -1652,7 +1652,7 @@ class _ScriptProfile:
for source_stats in self.profile._dump_stats():
source_ref = source_stats.source()
source_lines = source_ref.text().splitlines()
- dedent = min([len(line) - len(line.lstrip(" ")) for line in source_lines])
+ dedent = min(len(line) - len(line.lstrip(" ")) for line in source_lines)
source_lines = [line[dedent:] for line in source_lines]
start_line = source_ref.starting_lineno()
diff --git a/torch/nested/_internal/nested_tensor.py b/torch/nested/_internal/nested_tensor.py
index 51b5cfa652..b5c9354ad2 100644
--- a/torch/nested/_internal/nested_tensor.py
+++ b/torch/nested/_internal/nested_tensor.py
@@ -337,8 +337,8 @@ def jagged_from_list(
ret_nt = nested_view_from_values_offsets(values, offsets)
ret_nt._metadata_cache = {
# compute this now since it's easy
- "max_seqlen": max([t.shape[0] for t in tensors]),
- "min_seqlen": min([t.shape[0] for t in tensors]),
+ "max_seqlen": max(t.shape[0] for t in tensors),
+ "min_seqlen": min(t.shape[0] for t in tensors),
}
return (ret_nt, offsets) # type: ignore[return-value]
diff --git a/torch/nested/_internal/ops.py b/torch/nested/_internal/ops.py
index 183b2345e3..d4c79241f4 100644
--- a/torch/nested/_internal/ops.py
+++ b/torch/nested/_internal/ops.py
@@ -59,7 +59,7 @@ def _wrap_jagged_dims(ndim, dims, op_name):
def check_schema(schema_str: str, func, *args, **kwargs) -> None:
named_arg_types = schema_str.split(", ")
- num_optional_args = sum([x.endswith("?") for x in named_arg_types])
+ num_optional_args = [x.endswith("?") for x in named_arg_types].count(True)
min_args = len(named_arg_types) - num_optional_args
# special case: ellipses allows for any number of unchecked args at the end
@@ -201,7 +201,7 @@ def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
# Handle pointwise fallbacks
if torch.Tag.pointwise in func.tags:
# Assume there aren't additional tensors that aren't the "unary/binary" args
- num_tensor_args = sum([isinstance(x, torch.Tensor) for x in args])
+ num_tensor_args = sum(isinstance(x, torch.Tensor) for x in args)
if num_tensor_args == 1:
check_schema("self: jt_all, ...", func, *args, **kwargs)
return functools.partial(jagged_unary_pointwise, func)
diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py
index 3db95fe14a..b5d877f813 100644
--- a/torch/nn/parallel/distributed.py
+++ b/torch/nn/parallel/distributed.py
@@ -956,7 +956,7 @@ class DistributedDataParallel(Module, Joinable):
# 1. Create gradient buffer
device = torch.device("cpu") if device_ids is None else device_ids[0]
self._delay_grad_buffer = torch.zeros(
- sum([p.numel() for p in self._delay_all_reduce_params]),
+ sum(p.numel() for p in self._delay_all_reduce_params),
device=device,
)
diff --git a/torch/testing/_internal/common_fsdp.py b/torch/testing/_internal/common_fsdp.py
index fb4ed86bfe..6f0ed8bd78 100644
--- a/torch/testing/_internal/common_fsdp.py
+++ b/torch/testing/_internal/common_fsdp.py
@@ -715,7 +715,7 @@ class MixtureOfExperts(NestedWrappedModule):
d_input = 8
expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda)
- self.num_expert_params = sum([p.numel() for p in expert.parameters()])
+ self.num_expert_params = sum(p.numel() for p in expert.parameters())
for p in expert.parameters():
p.expert = True # type: ignore[attr-defined]
diff --git a/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py b/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
index c64f5cd64d..0549fe33a1 100644
--- a/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
+++ b/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
@@ -198,7 +198,7 @@ class Agent:
rewards.extend(self.rewards[ob_id])
# use the minimum observer reward to calculate the running reward
- min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards])
+ min_reward = min(sum(self.rewards[ob_id]) for ob_id in self.rewards)
self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward
# clear saved probs and rewards
diff --git a/torch/utils/_sympy/solve.py b/torch/utils/_sympy/solve.py
index 4d1113bea8..6276c69629 100644
--- a/torch/utils/_sympy/solve.py
+++ b/torch/utils/_sympy/solve.py
@@ -102,7 +102,7 @@ def _try_isolate_lhs(
if isinstance(e, sympy.Rel):
# Move any constants in the left-hand side to the right-hand side.
lhs_not_thing = (
- sum([a for a in e.lhs.args if not a.has(thing)])
+ sum(a for a in e.lhs.args if not a.has(thing))
if isinstance(e.lhs, sympy.Add)
else 0
)
diff --git a/torch/utils/bottleneck/__main__.py b/torch/utils/bottleneck/__main__.py
index e08fe5fa78..4444211a0f 100644
--- a/torch/utils/bottleneck/__main__.py
+++ b/torch/utils/bottleneck/__main__.py
@@ -171,7 +171,7 @@ def parse_args():
def cpu_time_total(autograd_prof):
- return sum([event.cpu_time_total for event in autograd_prof.function_events])
+ return sum(event.cpu_time_total for event in autograd_prof.function_events)
def main():
diff --git a/torchgen/static_runtime/gen_static_runtime_ops.py b/torchgen/static_runtime/gen_static_runtime_ops.py
index 81a2ee7e96..737d296d9a 100644
--- a/torchgen/static_runtime/gen_static_runtime_ops.py
+++ b/torchgen/static_runtime/gen_static_runtime_ops.py
@@ -206,14 +206,12 @@ def main() -> None:
)
print("grouped native ops with out variant: %d" % len(native_functions_groups))
- supported_functions_num = sum(
- [len(groups) for groups in supported_functions_groups]
- )
+ supported_functions_num = sum(len(groups) for groups in supported_functions_groups)
print("generated functions groups with out variant: %d" % supported_functions_num)
print("\nview grouped native ops: %d" % len(native_functions_view_groups))
supported_view_functions_num = sum(
- [len(groups) for groups in supported_functions_view_groups]
+ len(groups) for groups in supported_functions_view_groups
)
print("generated functions view groups: %d" % supported_view_functions_num)
| 2.41.0 |
79f5b9a3926e155d118932eb5b7678436139341 | Sat, 13 Apr 2024 00:55:44 +0000 | [PATCH 0120/1000] Pass triton kernel info to record function (#123871) | Summary: This DIFF is to pass triton kernel information, such as kernel python file, kernel type, grid, and stream, to record_function. With these information, Execution trace can capture triton kernel and replay it in PARAM. Test Plan: unit test buck2 test caffe2/test:profiler -- test_record_function_fast Differential Revision: D56021651 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123871 Approved by: https://github.com/sraikund16 | diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py
index 5ce3d82d71..e4b392df36 100644
--- a/torch/_inductor/triton_heuristics.py
+++ b/torch/_inductor/triton_heuristics.py
@@ -147,6 +147,7 @@ class CachingAutotuner(KernelInterface):
size_hints=None,
inductor_meta=None, # metadata not relevant to triton
custom_kernel=False, # whether the kernel is inductor-generated or custom
+ filename: Optional[str] = None,
):
super().__init__()
@@ -176,7 +177,7 @@ class CachingAutotuner(KernelInterface):
for c in self.configs:
log.debug(c)
- self.launchers = []
+ self.launchers = [] # type: ignore[var-annotated]
self.lock = threading.Lock()
if os.getenv("TRITON_CACHE_DIR") is None:
os.environ["TRITON_CACHE_DIR"] = os.path.join(
@@ -189,6 +190,7 @@ class CachingAutotuner(KernelInterface):
self.coordesc_tuner = CoordescTuner(
is_mm=False, name=self.fn.__name__, size_hints=size_hints
)
+ self.filename = filename
def precompile(self, warm_cache_only_with_cc=None):
with self.lock:
@@ -733,8 +735,19 @@ class CachingAutotuner(KernelInterface):
# it is faster than entering and exiting a context manager, even if the context
# manager is a nullcontext.
if autograd_profiler._is_profiler_enabled:
+ # grid can be a tuple of ints or a string.
+ grid_info = (
+ grid if isinstance(grid, tuple) else getattr(grid, "grid_fn_str", None)
+ )
with torch._C._profiler._RecordFunctionFast(
- self.inductor_meta.get("kernel_name", "triton kernel"), args
+ self.inductor_meta.get("kernel_name", "triton kernel"),
+ args,
+ {
+ "kernel_file": self.filename,
+ "kernel_type": "triton",
+ "grid": grid_info,
+ "stream": stream,
+ },
):
return launcher(
*args,
@@ -1026,6 +1039,7 @@ def cached_autotune(
heuristic_type=heuristic_type,
size_hints=size_hints,
custom_kernel=custom_kernel,
+ filename=filename,
)
return CachingAutotuner(
fn,
@@ -1037,6 +1051,7 @@ def cached_autotune(
heuristic_type=heuristic_type,
size_hints=size_hints,
custom_kernel=custom_kernel,
+ filename=filename,
)
return decorator
@@ -1602,6 +1617,8 @@ def grid(*numels):
z_grid,
)
+ setattr(grid_fn, "grid_fn_str", f"grid({numels})") # noqa: B010
+
return grid_fn
@@ -1610,4 +1627,7 @@ def split_scan_grid(xnumel, rnumel):
assert meta.get("XBLOCK", 1) == 1
return (ceildiv(rnumel, meta.get("RBLOCK", 1)), xnumel, 1)
+ grid_fn_str = f"split_scan_grid({xnumel}, {rnumel})"
+ setattr(grid_fn, "grid_fn_str", grid_fn_str) # noqa: B010
+
return grid_fn | 2.41.0 |
7a45883ce8496e0e67f2f64a3587029aa61795e | Sat, 13 Apr 2024 00:57:00 +0000 | [PATCH 0121/1000] [Reland] [Distributed] [2/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#123821) | Reland of #122892 with problematic changes reverted. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123821 Approved by: https://github.com/Skylion007 | diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp
index e2829937c5..d3c4a9fe1d 100644
--- a/torch/csrc/distributed/c10d/Functional.cpp
+++ b/torch/csrc/distributed/c10d/Functional.cpp
@@ -1,5 +1,3 @@
-#include <shared_mutex>
-
#include <ATen/ATen.h>
#include <ATen/core/op_registration/op_registration.h>
#include <c10/core/DispatchKey.h>
@@ -8,6 +6,7 @@
#include <torch/csrc/distributed/c10d/GroupRegistry.hpp>
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
#include <torch/csrc/distributed/c10d/RankLocal.hpp>
+#include <utility>
namespace {
@@ -15,7 +14,7 @@ class WorkRegistry {
public:
void register_work(
const at::Tensor& tensor,
- c10::intrusive_ptr<c10d::Work> work) {
+ const c10::intrusive_ptr<c10d::Work>& work) {
const auto storage = tensor.storage().getWeakStorageImpl();
std::unique_lock lock(lock_);
auto [it, inserted] = registry_.emplace(storage, work);
@@ -51,8 +50,8 @@ class WorkRegistry {
"is invoked on all tensors returned from c10d_functional collective "
"ops before they are used.");
}
- for (auto it = registry_.begin(); it != registry_.end(); ++it) {
- it->second.release();
+ for (auto& it : registry_) {
+ it.second.release();
}
}
@@ -68,7 +67,7 @@ static WorkRegistry process_registry;
void register_work(
const at::Tensor& tensor,
- c10::intrusive_ptr<c10d::Work> work) {
+ const c10::intrusive_ptr<c10d::Work>& work) {
if (c10d::get_thread_isolation_mode()) {
c10d::RankLocal<WorkRegistry>::get().register_work(tensor, work);
} else {
@@ -168,6 +167,7 @@ std::vector<at::Tensor> all_gather_into_tensor_coalesced(
int64_t group_size,
std::string group_name) {
std::vector<at::Tensor> outputs;
+ outputs.reserve(inputs.size());
for (const auto& tensor : inputs) {
outputs.push_back(allocate_all_gather_output(tensor, group_size));
}
@@ -212,6 +212,7 @@ std::vector<at::Tensor> reduce_scatter_tensor_coalesced(
c10d::ReduceScatterOptions opts;
opts.reduceOp = to_reduce_op(reduce_op);
std::vector<at::Tensor> outputs;
+ outputs.reserve(inputs.size());
for (const auto& tensor : inputs) {
outputs.push_back(allocate_reduce_scatter_output(tensor, group_size));
}
@@ -241,8 +242,8 @@ at::Tensor all_to_all_single(
std::vector<int64_t> input_split_sizes,
std::string group_name) {
std::vector<int64_t> output_sizes = input.sizes().vec();
- output_sizes[0] =
- std::accumulate(output_split_sizes.begin(), output_split_sizes.end(), 0);
+ output_sizes[0] = std::accumulate(
+ output_split_sizes.begin(), output_split_sizes.end(), int64_t(0));
auto output = input.new_empty(output_sizes);
auto group = c10d::resolve_process_group(group_name);
diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp
index b2983f06d4..dd90fadb11 100644
--- a/torch/csrc/distributed/c10d/reducer.cpp
+++ b/torch/csrc/distributed/c10d/reducer.cpp
@@ -20,6 +20,7 @@
#include <torch/csrc/autograd/utils/lambda_post_hook.h>
#include <torch/csrc/distributed/c10d/comm.hpp>
#include <torch/csrc/distributed/c10d/logger.hpp>
+#include <utility>
namespace c10d {
namespace {
@@ -89,7 +90,7 @@ std::vector<at::Tensor> extractTensors(const c10::IValue& result) {
Reducer::Reducer(
std::vector<at::Tensor> params,
std::vector<std::vector<size_t>> bucket_indices,
- std::vector<size_t> per_bucket_size_limits,
+ const std::vector<size_t>& per_bucket_size_limits,
c10::intrusive_ptr<c10d::ProcessGroup> process_group,
std::vector<bool> expect_sparse_gradients,
int64_t bucket_bytes_cap,
@@ -449,7 +450,7 @@ void Reducer::mark_variable_ready_sparse(size_t variable_index) {
if (sparse_metadata_) {
grad = grad.coalesce();
REDUCER_CHECK(
- param_names_.size() != 0, logger_, "No parameter names were found");
+ !param_names_.empty(), logger_, "No parameter names were found");
std::string& param_name = param_names_[variable_index];
auto iter = sparse_metadata_->find(param_name);
REDUCER_CHECK(
@@ -632,7 +633,7 @@ void Reducer::delay_all_reduce() {
}
void Reducer::set_logger(std::weak_ptr<c10d::Logger> logger) {
- logger_ = logger;
+ logger_ = std::move(logger);
}
// The function `autograd_hook` is called after the gradient for a
diff --git a/torch/csrc/distributed/c10d/reducer.hpp b/torch/csrc/distributed/c10d/reducer.hpp
index 43782204be..863aeecb4e 100644
--- a/torch/csrc/distributed/c10d/reducer.hpp
+++ b/torch/csrc/distributed/c10d/reducer.hpp
@@ -51,7 +51,7 @@ class TORCH_API Reducer {
explicit Reducer(
std::vector<at::Tensor> params,
std::vector<std::vector<size_t>> bucket_indices,
- std::vector<size_t> per_bucket_size_limits,
+ const std::vector<size_t>& per_bucket_size_limits,
c10::intrusive_ptr<c10d::ProcessGroup> process_group,
std::vector<bool> expect_sparse_gradients,
int64_t bucket_bytes_cap,
@@ -303,11 +303,9 @@ class TORCH_API Reducer {
using GradCallback = std::function<bool(at::Tensor&)>;
#ifndef _WIN32
static_assert(
- std::is_same<
+ std::is_same_v<
GradCallback,
- torch::distributed::autograd::DistAutogradContext::GradCallback>::
- value,
- "");
+ torch::distributed::autograd::DistAutogradContext::GradCallback>);
#endif
void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
| 2.41.0 |
1654fd4b091345a3ea763562b5cb6bf4b2c7081 | Fri, 12 Apr 2024 15:41:33 -0700 | [PATCH 0122/1000] [PT2D][FSDP] skip FSDP hooks base on dynamo config (#123021) | unit test: `pytest test/distributed/_composable/fsdp/test_fully_shard_compile.py` For FSDP, we turn on/off compiling hooks base on `torch._dynamo.config.skip_fsdp_hooks` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123021 Approved by: https://github.com/yf225, https://github.com/anijain2305 | diff --git a/test/distributed/_composable/fsdp/test_fully_shard_compile.py b/test/distributed/_composable/fsdp/test_fully_shard_compile.py
new file mode 100644
index 0000000000..8a87dfdd1d
--- /dev/null
+++ b/test/distributed/_composable/fsdp/test_fully_shard_compile.py
@@ -0,0 +1,64 @@
+# Owner(s): ["oncall: distributed"]
+
+
+import unittest
+
+import torch
+from torch.distributed._composable.fsdp import fully_shard
+from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
+from torch.testing._internal.common_fsdp import FSDPTest, MLP
+from torch.testing._internal.common_utils import run_tests
+from torch.utils._triton import has_triton
+
+
+class TestFullyShardCompileCompute(FSDPTest):
+ @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
+ @skip_if_lt_x_gpu(2)
+ def test_disable_compiling_hooks(self):
+ self.run_subtests(
+ {
+ "skip_fsdp_hooks": [False, True],
+ },
+ self._test_disable_compiling_hooks,
+ )
+
+ def _test_disable_compiling_hooks(
+ self,
+ skip_fsdp_hooks: bool,
+ ):
+ torch._dynamo.reset()
+ trace_rules_check_count = 0
+ HOOKS_FILE_NAME = "torch/distributed/_composable/fsdp/_fsdp_state.py"
+ HOOK_WRAPPER_NAME = "fsdp_hook_wrapper"
+
+ def patched_trace_rules_check(*args, **kwargs):
+ nonlocal trace_rules_check_count
+ f_code = args[0]
+ if (
+ hasattr(f_code, "co_filename")
+ and f_code.co_filename.endswith(HOOKS_FILE_NAME)
+ and f_code.co_name != HOOK_WRAPPER_NAME
+ ):
+ trace_rules_check_count += 1
+ return orig_trace_rules_check(*args, **kwargs)
+
+ original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks
+ orig_trace_rules_check = torch._dynamo.trace_rules.check
+ torch.distributed.barrier()
+ torch._dynamo.config.skip_fsdp_hooks = skip_fsdp_hooks
+ torch._dynamo.trace_rules.check = patched_trace_rules_check
+ model = MLP(4)
+ fully_shard(model)
+ model.compile()
+ model(torch.randn((4, 4), device="cuda"))
+ torch.distributed.barrier()
+ torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks
+ torch._dynamo.trace_rules.check = orig_trace_rules_check
+ if skip_fsdp_hooks:
+ self.assertEqual(trace_rules_check_count, 0)
+ else:
+ self.assertTrue(trace_rules_check_count > 0)
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/torch/_dynamo/config.py b/torch/_dynamo/config.py
index e45aad8fa2..9482cfabcc 100644
--- a/torch/_dynamo/config.py
+++ b/torch/_dynamo/config.py
@@ -289,6 +289,8 @@ optimize_ddp_lazy_compile = False
# Whether to skip guarding on FSDP-managed modules
skip_fsdp_guards = True
+# Whether to apply torch._dynamo.disable() to per-param FSDP hooks
+skip_fsdp_hooks = False
# Make dynamo skip guarding on hooks on nn modules
# Note: unsafe: if your model actually has hooks and you remove them, or doesn't and you add them,
diff --git a/torch/distributed/_composable/fsdp/_fsdp_state.py b/torch/distributed/_composable/fsdp/_fsdp_state.py
index 087c61dd5f..547b8e8d9f 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_state.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_state.py
@@ -36,6 +36,17 @@ class FSDPStateContext:
self.is_last_backward: bool = True
+def disable_if_config_true(func):
+ @functools.wraps(func)
+ def fsdp_hook_wrapper(*args, **kwargs):
+ if torch._dynamo.config.skip_fsdp_hooks:
+ return torch._dynamo.disable(func, recursive=True)(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+
+ return fsdp_hook_wrapper
+
+
class FSDPState(_State):
def __init__(self):
super().__init__()
@@ -142,6 +153,7 @@ class FSDPState(_State):
if module in module_to_fsdp_param_group:
module_to_fsdp_param_group[module]._module_fqn = module_name
+ @disable_if_config_true
def _pre_forward(
self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
@@ -161,6 +173,7 @@ class FSDPState(_State):
args, kwargs = self._fsdp_param_group.pre_forward(module, args, kwargs)
return args, kwargs
+ @disable_if_config_true
def _post_forward(self, module: nn.Module, input: Any, output: Any) -> Any:
# When composing with module-hook-based activation checkpointing, the
# post-backward hook is responsible for the reshard | 2.41.0 |
efe6e2fea41b875ad19b27980b937f2b7aa6435 | Thu, 11 Apr 2024 23:40:10 -0700 | [PATCH 0123/1000] [dynamo][3.12] Stop backend detection on the first RETURN_VALUE (#123878) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/123878 Approved by: https://github.com/williamwen42 ghstack dependencies: #122943, #123877 | diff --git a/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_inference.csv b/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_inference.csv
index 155f1e54ff..49ad52dabe 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_inference.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_inference.csv
@@ -14,7 +14,7 @@ Background_Matting,pass_due_to_skip,0
-DALLE2_pytorch,pass,11
+DALLE2_pytorch,pass,12
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_inference.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_inference.csv
index 529ad92504..5051583c0f 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_inference.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_inference.csv
@@ -14,7 +14,7 @@ Background_Matting,pass_due_to_skip,0
-DALLE2_pytorch,pass,11
+DALLE2_pytorch,pass,12
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_inference.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_inference.csv
index 529ad92504..5051583c0f 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_inference.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_inference.csv
@@ -14,7 +14,7 @@ Background_Matting,pass_due_to_skip,0
-DALLE2_pytorch,pass,11
+DALLE2_pytorch,pass,12
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_inference.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_inference.csv
index 155f1e54ff..49ad52dabe 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_inference.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_inference.csv
@@ -14,7 +14,7 @@ Background_Matting,pass_due_to_skip,0
-DALLE2_pytorch,pass,11
+DALLE2_pytorch,pass,12
diff --git a/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv b/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv
index 361711ae8b..95b1036d3a 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv
@@ -14,7 +14,7 @@ Background_Matting,pass_due_to_skip,0
-DALLE2_pytorch,pass,11
+DALLE2_pytorch,pass,12
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index ff97bfa973..b7520c87b3 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -306,7 +306,7 @@ def generic_jump(truth_fn: typing.Callable[[object], bool], push: bool):
if not self.should_compile_partial_graph():
unimplemented("should_compile_partial_graph=False")
# compile a partial subgraph prefix then jump into user code
- if self.has_backedge():
+ if self.maybe_has_backedge():
msg = (
"Skipping frame because there is a graph break in a for/while loop\n"
f"{self.frame_summary()}"
@@ -538,7 +538,7 @@ def break_graph_if_unsupported(*, push):
*frame_loc,
)
- if self.has_backedge():
+ if self.maybe_has_backedge():
msg = (
"Skipping frame because there is a graph break in a for/while loop\n"
f"{self.frame_summary()}"
@@ -657,10 +657,31 @@ class InstructionTranslatorBase(
"""
self.inconsistent_side_effects = True
- def has_backedge(self):
+ def maybe_has_backedge(self):
+ # This function employs a heuristic. It does not reliably detect a backedge.
+ # The heuristic is straightforward: starting from the current instruction and
+ # continuing to the end, if any jump instruction targets an instruction before
+ # the current one, there might be a backedge.
+
+ # Python 3.12 introduced changes to bytecode that group common paths in
+ # blockstacks (with or try...else) and allow for early returns. Consequently,
+ # there can be multiple RETURN_VALUE instructions. Another heuristic is to
+ # halt detection upon encountering the first RETURN_VALUE or RETURN_CONST.
+
+ # These heuristics can result in both false positives and negatives, but
+ # in either case, the Dynamo code remains valid. For false positives
+ # (where an edge is incorrectly marked as a backedge), Dynamo will
+ # perform a SkipFrame instead of potentially applying optimizations. For
+ # false negatives (where an edge that should be marked as a backedge
+ # isn't), multiple graphs may be generated if there's a break in the
+ # graph during a for loop. In general, its better to have fewer false
+ # negatives so that Dynamo does not skip the whole frame.
+
cur_offset = self.current_instruction.offset
assert self.instruction_pointer is not None
for inst in self.instructions[self.instruction_pointer :]:
+ if inst.opname in ("RETURN_VALUE", "RETURN_CONST"):
+ return False
if inst.opname in JUMP_OPNAMES:
jump_offset = inst.argval
if jump_offset < cur_offset: | 2.41.0 |
8afcd7b619aa9f5732cecc2ba1d012c93f23deb | Thu, 11 Apr 2024 23:40:11 -0700 | [PATCH 0124/1000] [dynamo][dict] Add UnspecializedNNModuleVariable to dict keys (#122812) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/122812 Approved by: https://github.com/jansel ghstack dependencies: #122943, #123877, #123878 | diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index 2b03261129..a51664a748 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -1301,9 +1301,9 @@ class ReproTests(torch._dynamo.test_case.TestCase):
self.assertTrue(same(opt_model(a, b, c, d), correct))
if torch._dynamo.config.assume_static_by_default:
- self.assertExpectedInline(cnt.frame_count, """2""")
+ self.assertExpectedInline(cnt.frame_count, """4""")
else:
- self.assertExpectedInline(cnt.frame_count, """3""")
+ self.assertExpectedInline(cnt.frame_count, """5""")
def test_hf_model_output(self):
ex = ModelOutput(a=torch.randn(10), b=torch.randn(10), c=torch.randn(10))
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 66ebe31781..5f367063c0 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -67,6 +67,7 @@ from torch.testing._internal.common_utils import (
TEST_WITH_CROSSREF,
TEST_WITH_ROCM,
TestCase,
+ skipIfTorchDynamo,
)
Json = Dict[str, Any]
@@ -3281,6 +3282,7 @@ aten::mm""")
num_matched = len(pattern.matched_events())
self.assertEqual(num_matched, 1)
+ @skipIfTorchDynamo("pattern checks for aten::_zero op which might not be there with torch.compile'd graph")
def test_profiler_grad_not_set_to_none_pattern(self):
x = torch.ones((100, 100))
model = nn.Sequential(
diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py
index 14b58c99cb..733d97d3ef 100644
--- a/torch/_dynamo/variables/builtin.py
+++ b/torch/_dynamo/variables/builtin.py
@@ -1473,6 +1473,24 @@ class BuiltinVariable(VariableTracker):
unimplemented("non-const getattr() name")
if tx.output.side_effects.is_attribute_mutation(obj):
+ if isinstance(obj, variables.UnspecializedNNModuleVariable):
+ if (
+ name
+ in (
+ "named_parameters",
+ "parameters",
+ "named_buffers",
+ "buffers",
+ "named_modules",
+ "modules",
+ )
+ and obj.is_state_mutated
+ and tx.output.side_effects.has_pending_mutation(obj)
+ ):
+ unimplemented(
+ f"pending mutation on nn module, so graph breaking at {name!r} call"
+ )
+
try:
# re-read a pending side effect?
return tx.output.side_effects.load_attr(obj, name)
diff --git a/torch/_dynamo/variables/dicts.py b/torch/_dynamo/variables/dicts.py
index 07df439021..6b86048c50 100644
--- a/torch/_dynamo/variables/dicts.py
+++ b/torch/_dynamo/variables/dicts.py
@@ -51,6 +51,7 @@ def is_hashable(x):
variables.SkipFunctionVariable,
variables.misc.NumpyVariable,
variables.NNModuleVariable,
+ variables.UnspecializedNNModuleVariable,
variables.MethodWrapperVariable,
variables.TorchInGraphFunctionVariable,
variables.TypingVariable,
@@ -90,6 +91,8 @@ class ConstDictVariable(VariableTracker):
x = tuple(Hashable(e).underlying_value for e in self.vt.items)
elif isinstance(self.vt, variables.NNModuleVariable):
return self.vt.module
+ elif isinstance(self.vt, variables.UnspecializedNNModuleVariable):
+ return self.vt.value
elif isinstance(self.vt, variables.UserFunctionVariable):
return self.vt.get_function()
else:
diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py
index a23ebe05b6..33b2116123 100644
--- a/torch/_dynamo/variables/lists.py
+++ b/torch/_dynamo/variables/lists.py
@@ -264,6 +264,9 @@ class ListVariable(CommonListMethodsVariable):
def python_type(self):
return list
+ def __repr__(self):
+ return f"{self.__class__.__name__}(length={len(self.items)}"
+
def reconstruct(self, codegen):
codegen.foreach(self.items)
codegen.append_output(create_instruction("BUILD_LIST", arg=len(self.items)))
diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py
index 7346b9af09..548bebb9bb 100644
--- a/torch/_dynamo/variables/nn_module.py
+++ b/torch/_dynamo/variables/nn_module.py
@@ -649,7 +649,11 @@ class NNModuleVariable(VariableTracker):
class UnspecializedNNModuleVariable(UserDefinedObjectVariable):
- _nonvar_fields = {"value_type", *UserDefinedObjectVariable._nonvar_fields}
+ _nonvar_fields = {
+ "value_type",
+ "is_state_mutated",
+ *UserDefinedObjectVariable._nonvar_fields,
+ }
"""
The above class will specialize on the id() of a module and place
@@ -675,6 +679,7 @@ class UnspecializedNNModuleVariable(UserDefinedObjectVariable):
kwargs["value_type"] = type(value)
super().__init__(value=value, **kwargs)
+ self.is_state_mutated = False
@staticmethod
@functools.lru_cache(None)
@@ -788,6 +793,44 @@ class UnspecializedNNModuleVariable(UserDefinedObjectVariable):
if id(method.__code__) in self._nn_module_method_ids():
unimplemented(f"UnspecializedNNModuleVariable missing {name}")
+ # "_parameters" in self.value.__dict__ checks that module is initialized
+ if name == "__setattr__" and "_parameters" in self.value.__dict__:
+ # Record if mutations happens on parameters/buffers/modules. The
+ # mutations on these are not tracked by base class
+ # UserDefinedObject vt. This will be used later to graph break
+ # on seeing a paramters() and family calls.
+ # TODO(anijain2305) - This might not be needed if we let Dynamo
+ # inline both getattr and setattr. In that case, it should see
+ # the lowest level dicts - _parameters and family and
+ # automatically track mutations on those. Investigate if that
+ # can be done.
+ attr_name = args[0].as_python_constant()
+ value = args[1]
+
+ # This is reverse engineered by looking at nn module __setattr__
+ # logic.
+ if (
+ isinstance(value, variables.TensorVariable)
+ and value.python_type() is torch.nn.Parameter
+ ) or attr_name in self.value.__dict__["_parameters"]:
+ # Handle parameters
+ self.is_state_mutated = True
+ elif attr_name in self.value.__dict__["_buffers"]:
+ # Handle buffers
+ self.is_state_mutated = True
+ elif (
+ isinstance(
+ value,
+ (
+ variables.NNModuleVariable,
+ variables.UnspecializedNNModuleVariable,
+ ),
+ )
+ or attr_name in self.value.__dict__["_modules"]
+ ):
+ # Handle submodules
+ self.is_state_mutated = True
+
return super().call_method(tx, name, args, kwargs)
| 2.41.0 |
da3e113ca9420a07dd2786ad524de30283326e2 | Fri, 12 Apr 2024 17:00:11 -0700 | [PATCH 0125/1000] [functional_collective] remove the logic that forces torch-xla to use legacy funcol (#123776) | After https://github.com/pytorch/xla/pull/6887, torch-xla now also uses the all_reduce from native funcol. So we can remove this logic. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123776 Approved by: https://github.com/wanchaol | diff --git a/torch/distributed/_functional_collectives_impl.py b/torch/distributed/_functional_collectives_impl.py
index 69c4715bd4..d628dff7c2 100644
--- a/torch/distributed/_functional_collectives_impl.py
+++ b/torch/distributed/_functional_collectives_impl.py
@@ -42,17 +42,8 @@ else:
def native_funcol_enabled():
global _use_native_funcol
if _use_native_funcol is None:
- try:
- # Disable native funcol when torch_xla is installed. This check
- # will be removed once torch_xla adopts the native_funcol IR.
- import torch_xla # noqa: F401
-
- _use_native_funcol = False
- except Exception:
- # When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol
- _use_native_funcol = (
- os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1"
- )
+ # When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol
+ _use_native_funcol = os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1"
if not _use_native_funcol:
warning_once(
logger, | 2.41.0 |
a2e1d8e4fdcf6d91a6874a1e1e323de93a1be4b | Fri, 12 Apr 2024 17:00:11 -0700 | [PATCH 0126/1000] [functional collective] change the Python APIs to only use the native funcol ops (#123777) | ## Summary After this PR, the functional collective Python APIs will stop honoring `TORCH_DISABLE_NATIVE_FUNCOL` and only use native funcol ops. Specifically, this PR: - Removed `use_native_funcol()`. - Removed the code path in the Python APIs when `use_native_funcol()` is `False`. - Changed the CI tests that runs on both native funcol and legacy funcol through the Python API to only run with native funcol. ## Test Changes `test_functional_api.py` - Removed the tests where only one of output_split_sizes or input_split_sizes is specified. This behavior is unreliable has been removed from the native funcol. - Removed `TestWaitiness` which tests an implementation detail of the legacy funcol. We have equivalent tests for native funcol in `test/distributed/test_c10d_functional_native.py` https://github.com/pytorch/pytorch/blob/b7fac76fc259394136bc77b3e39d5705919e5c4c/test/distributed/test_c10d_functional_native.py#L114-L116 `test/distributed/_tensor/test_dtensor.py` `test/distributed/_tensor/test_dtensor_compile.py` `test/distributed/test_device_mesh.py` `test/distributed/_tensor/experimental/test_tp_transform.py` `test/distributed/_tensor/test_matrix_ops.py` `test/distributed/test_inductor_collectives.py` - All these tests were double running with both native funcol and legacy funcol. Changed to only run with native funcol. `test/distributed/test_c10d_functional_native.py` - Removed the `run_with_native_funcol` decorators. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123777 Approved by: https://github.com/wanchaol ghstack dependencies: #123776 | diff --git a/test/distributed/_composable/test_replicate_with_compiler.py b/test/distributed/_composable/test_replicate_with_compiler.py
index 9a7da14884..381610ef57 100644
--- a/test/distributed/_composable/test_replicate_with_compiler.py
+++ b/test/distributed/_composable/test_replicate_with_compiler.py
@@ -27,7 +27,6 @@ from torch.distributed.tensor.parallel import (
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
- run_with_native_funcol,
skip_if_lt_x_gpu,
skip_if_rocm,
)
@@ -279,7 +278,6 @@ class ReplicateTest(MultiProcessTestCase):
self.assertEqual(counters["inductor"]["ddp_buckets"], 3)
return code
- @run_with_native_funcol
def test_bucketing_coalesced_op(self):
torch._inductor.config._fuse_ddp_communication_passes = [
"fuse_ddp_with_coalesced_op",
@@ -312,7 +310,6 @@ class ReplicateTest(MultiProcessTestCase):
fc.run(code)
- @run_with_native_funcol
def test_bucketing_concat_op(self):
torch._inductor.config._fuse_ddp_communication_passes = [
"fuse_ddp_with_concat_op",
diff --git a/test/distributed/_tensor/experimental/test_tp_transform.py b/test/distributed/_tensor/experimental/test_tp_transform.py
index ebf6ee52bb..636870264f 100644
--- a/test/distributed/_tensor/experimental/test_tp_transform.py
+++ b/test/distributed/_tensor/experimental/test_tp_transform.py
@@ -11,13 +11,7 @@ from torch.distributed.tensor.parallel.style import (
ParallelStyle,
RowwiseParallel,
)
-from torch.testing._internal.common_distributed import (
- run_with_both_funcol_impls_with_arg,
-)
-from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
@@ -58,7 +52,6 @@ class DummyModel(torch.nn.Module):
return self.bn(self.fc(x))
-@instantiate_parametrized_tests
class TensorParallelTest(DTensorTestBase):
def setUp(self) -> None:
super().setUp()
@@ -74,8 +67,7 @@ class TensorParallelTest(DTensorTestBase):
self.assertDictEqual(expected_ops_count, actual_ops_count)
@with_comms
- @run_with_both_funcol_impls_with_arg
- def test_tp_transform_with_uncovered_op(self, use_native_funcol):
+ def test_tp_transform_with_uncovered_op(self):
model = DummyModel().to(device=self.device_type)
inputs = (torch.randn(7, 3, requires_grad=False).to(device=self.device_type),)
with torch.no_grad():
@@ -96,26 +88,16 @@ class TensorParallelTest(DTensorTestBase):
tp_res = tp_model(*inputs)
self.assertEqual(res, tp_res)
# Expect all_gather to be inserted to distributed sharded fc resutls
- if use_native_funcol:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "_c10d_functional.all_gather_into_tensor.default": 1,
- "_c10d_functional.wait_tensor.default": 1,
- },
- )
- else:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "c10d_functional.all_gather_into_tensor.default": 1,
- "c10d_functional.wait_tensor.default": 1,
- },
- )
+ self.assert_has_c10d_ops(
+ tp_exported_program.graph_module,
+ {
+ "_c10d_functional.all_gather_into_tensor.default": 1,
+ "_c10d_functional.wait_tensor.default": 1,
+ },
+ )
@with_comms
- @run_with_both_funcol_impls_with_arg
- def test_tp_transform_e2e(self, use_native_funcol):
+ def test_tp_transform_e2e(self):
torch.manual_seed(0)
model = MLPListModule(2).to(device=self.device_type)
inputs = (torch.randn((10, 12)).to(device=self.device_type),)
@@ -144,26 +126,16 @@ class TensorParallelTest(DTensorTestBase):
tp_res = tp_model(*inputs)
self.assertEqual(res, tp_res)
# Expect all_reduce to be inserted at the end of each MLP
- if use_native_funcol:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "_c10d_functional.all_reduce.default": 2,
- "_c10d_functional.wait_tensor.default": 2,
- },
- )
- else:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "c10d_functional.all_reduce.default": 2,
- "c10d_functional.wait_tensor.default": 2,
- },
- )
+ self.assert_has_c10d_ops(
+ tp_exported_program.graph_module,
+ {
+ "_c10d_functional.all_reduce.default": 2,
+ "_c10d_functional.wait_tensor.default": 2,
+ },
+ )
@with_comms
- @run_with_both_funcol_impls_with_arg
- def test_tp_transform_no_bias(self, use_native_funcol):
+ def test_tp_transform_no_bias(self):
torch.manual_seed(0)
model = MLPListModule(1, bias=False).to(device=self.device_type)
inputs = (torch.randn((10, 12)).to(device=self.device_type),)
@@ -189,22 +161,13 @@ class TensorParallelTest(DTensorTestBase):
with torch.inference_mode():
tp_res = tp_model(*inputs)
self.assertEqual(res, tp_res)
- if use_native_funcol:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "_c10d_functional.all_reduce.default": 1,
- "_c10d_functional.wait_tensor.default": 1,
- },
- )
- else:
- self.assert_has_c10d_ops(
- tp_exported_program.graph_module,
- {
- "c10d_functional.all_reduce.default": 1,
- "c10d_functional.wait_tensor.default": 1,
- },
- )
+ self.assert_has_c10d_ops(
+ tp_exported_program.graph_module,
+ {
+ "_c10d_functional.all_reduce.default": 1,
+ "_c10d_functional.wait_tensor.default": 1,
+ },
+ )
if __name__ == "__main__":
diff --git a/test/distributed/_tensor/test_dtensor.py b/test/distributed/_tensor/test_dtensor.py
index 4fb361915a..653dfcbb58 100644
--- a/test/distributed/_tensor/test_dtensor.py
+++ b/test/distributed/_tensor/test_dtensor.py
@@ -19,12 +19,8 @@ from torch.distributed.tensor.parallel import (
parallelize_module,
RowwiseParallel,
)
-from torch.testing._internal.common_distributed import run_with_both_funcol_impls
-from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
@@ -52,10 +48,8 @@ class DummyMLP(torch.nn.Module):
self.net2.bias.fill_(1.2)
-@instantiate_parametrized_tests
class DTensorTest(DTensorTestBase):
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_constructor(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(0)]
@@ -84,7 +78,6 @@ class DTensorTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_meta_dtensor(self):
device_mesh = self.build_device_mesh()
dist_specs = [[Shard(0)], [Replicate()]]
@@ -108,7 +101,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(meta_dtensor.to_local(), value_tensor)
@with_comms
- @run_with_both_funcol_impls
def test_modules_w_meta_dtensor(self):
model = DummyMLP("meta")
device_mesh = self.build_device_mesh()
@@ -144,7 +136,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(model_tp(inp), model_regular_tp(inp))
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_stride(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard0_spec = [Shard(0)]
@@ -171,7 +162,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(dist_tensor.stride(), global_stride)
@with_comms
- @run_with_both_funcol_impls
def test_from_local(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(0)]
@@ -209,7 +199,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(local_tensor_with_grad.grad, expected_grad)
@with_comms
- @run_with_both_funcol_impls
def test_from_local_uneven_sharding(self):
mesh_shape = (self.world_size,)
device_mesh = init_device_mesh(self.device_type, mesh_shape)
@@ -236,7 +225,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(dtensor.stride(), global_tensor.stride())
@with_comms
- @run_with_both_funcol_impls
def test_from_local_uneven_sharding_raise_error(self):
mesh_shape = (self.world_size,)
device_mesh = init_device_mesh(self.device_type, mesh_shape)
@@ -272,7 +260,6 @@ class DTensorTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_from_local_negative_dim(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(-1)]
@@ -332,7 +319,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(sharded_tensor.grad.stride(), [1, 3 * self.world_size])
@with_comms
- @run_with_both_funcol_impls
def test_to_local_grad_hint(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = (Shard(0),)
@@ -358,7 +344,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(replica_grad, global_tensor * self.world_size)
@with_comms
- @run_with_both_funcol_impls
def test_full_tensor_sync(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = (Shard(0),)
@@ -370,7 +355,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(full_out, global_tensor)
@with_comms
- @run_with_both_funcol_impls
def test_full_tensor_grad_hint(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = (Shard(0),)
@@ -384,7 +368,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(replica_grad, global_tensor * self.world_size)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_new_empty_strided(self):
device_mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
local_tensor = torch.randn(8, 8, requires_grad=True, device=self.device_type)
@@ -408,7 +391,6 @@ class DTensorTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_async_output(self):
# Tests that if the output of some dtensor operations isn't used in any compute,
# the output should be an AsyncCollectiveTensor (representing the fact that
@@ -451,7 +433,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(sync_out.to_local(), x)
@with_comms
- @run_with_both_funcol_impls
def test_from_local_then_to_local(self):
# this test ensure end to end from torch.Tensor -> dist tensor -> torch.Tensor works
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
@@ -485,7 +466,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(local_tensor_with_grad.grad, expected_grad)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_spec_read_only_after_set(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(0)]
@@ -498,7 +478,6 @@ class DTensorTest(DTensorTestBase):
self.assertNotEqual(sharded_tensor.placements, placements)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_spec_hash(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(0)]
@@ -519,7 +498,6 @@ class DTensorTest(DTensorTestBase):
self.assertNotEqual(hash(sharded_tensor._spec), hash(replica_tensor._spec))
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_properties(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
placements = [Shard(0)]
@@ -528,7 +506,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(sharded_tensor.device.type, self.device_type)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_save_load(self):
import io
@@ -543,7 +520,6 @@ class DTensorTest(DTensorTestBase):
self.assertEqual(sharded_tensor, reloaded_st)
-@instantiate_parametrized_tests
class DTensorMeshTest(DTensorTestBase):
@property
def world_size(self):
@@ -556,7 +532,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(tensor, exp_out_of_mesh)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_device_mesh_device_conversion(self):
# construct a cuda device mesh
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -570,7 +545,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_api_device_mesh_context_manager(self):
with DeviceMesh(self.device_type, list(range(self.world_size))) as mesh:
placements = [Shard(0)]
@@ -610,7 +584,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(sharded_after_2d.to_local().shape, torch.Size([3, 3]))
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_2d_mesh(self):
mesh_tensor = torch.arange(self.world_size).reshape(2, 4)
# construct a cuda device mesh
@@ -634,7 +607,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(dist_tensor.size(), torch.Size([3 * self.world_size, 3]))
@with_comms
- @run_with_both_funcol_impls
def test_device_mesh_nd(self):
# construct a cuda device mesh
mesh_tensor = torch.arange(self.world_size).reshape(2, 2, 2)
@@ -656,7 +628,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
- @run_with_both_funcol_impls
def test_dtensor_spec_local_shard_offset(self):
device_mesh = DeviceMesh(
self.device_type, torch.arange(self.world_size).reshape(2, 4)
@@ -696,7 +667,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(expected_shard_offsets, offset)
@with_comms
- @run_with_both_funcol_impls
def test_from_local_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
local_tensor = torch.ones(3, 4)
@@ -724,7 +694,6 @@ class DTensorMeshTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_default_value_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
@@ -763,7 +732,6 @@ class DTensorMeshTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_redistribute_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
@@ -780,7 +748,6 @@ class DTensorMeshTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_implicit_replication(self):
mesh = init_device_mesh(self.device_type, (self.world_size,))
local_tensor1 = torch.ones(4, 3)
@@ -797,7 +764,6 @@ class DTensorMeshTest(DTensorTestBase):
self.assertEqual(local_shard, torch.ones(4, 3) + torch.ones(3))
-@instantiate_parametrized_tests
class TestDTensorPlacementTypes(DTensorTestBase):
@property
def world_size(self):
@@ -813,7 +779,6 @@ class TestDTensorPlacementTypes(DTensorTestBase):
return tensor
@with_comms
- @run_with_both_funcol_impls
def test_split_tensor_1D(self) -> None:
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
shard_placement = Shard(0)
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index b336e1ae05..f9ad0278d7 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -33,11 +33,7 @@ from torch.distributed.tensor.parallel import (
PrepareModuleOutput,
RowwiseParallel,
)
-from torch.testing._internal.common_distributed import (
- run_with_both_funcol_impls,
- run_with_both_funcol_impls_with_arg,
- skip_if_lt_x_gpu,
-)
+from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
@@ -84,7 +80,6 @@ aot_eager_graph = aot_autograd(
)
-@instantiate_parametrized_tests
class TestDTensorCompile(torch._dynamo.test_case.TestCase):
def setUp(self):
super().setUp()
@@ -105,7 +100,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
def world_size(self) -> int:
return 2
- @run_with_both_funcol_impls
def test_placement_compile(self):
def fn(x):
a = 0
@@ -132,7 +126,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
compiled_out = compiled_fn(x)
self.assertEqual(opt_fn, compiled_out)
- @run_with_both_funcol_impls
def test_device_mesh_compile(self):
def fn(x):
# test size()
@@ -153,7 +146,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
compiled_out = compiled_fn(mesh)
self.assertEqual(opt_fn, compiled_out)
- @run_with_both_funcol_impls
def test_fakify_dtensor(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -168,7 +160,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
res = opt_fn(x)
self.assertEqual(res, ref)
- @run_with_both_funcol_impls
def test_dynamo_dtensor(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -183,7 +174,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
res = opt_fn(x)
self.assertEqual(res, ref)
- @run_with_both_funcol_impls
def test_dtensor_attribute_access_on_intermediate(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -219,7 +209,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
out = torch.compile(fn, backend="aot_eager", fullgraph=True)(x, y, z)
out.contiguous().sum().backward()
- @run_with_both_funcol_impls
def test_dynamo_dtensor_from_local(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -329,7 +318,6 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
out_dt = torch.matmul(tmp_dt, x_dt).permute(0, 2, 1)
out_dt.sum().backward()
- @run_with_both_funcol_impls
def test_dynamo_dtensor_from_local_redistribute(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -403,8 +391,7 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
@patch.object(torch._inductor.config, "reorder_for_compute_comm_overlap", True)
- @run_with_both_funcol_impls_with_arg
- def test_tp_compile_comm_reordering(self, use_native_funcol):
+ def test_tp_compile_comm_reordering(self):
class FakeAttention(nn.Module):
def __init__(self):
super().__init__()
@@ -466,23 +453,13 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
self.assertEqual(cnt.frame_count, 1)
code = run_and_get_triton_code(compiled_model, inp)
- if use_native_funcol:
- FileCheck().check(
- "buf0 = torch.ops._c10d_functional.all_gather_into_tensor.default(primal"
- ).check("buf1 = torch.ops._c10d_functional.wait_tensor.default(buf0").check(
- "extern_kernels.mm(buf0,"
- ).run(
- code
- )
- else:
- # Check that `buf2` is correctly waited on before first use.
- # fmt: off
- FileCheck() \
- .check("buf1_work = dist.all_gather_into_tensor(buf1[0]") \
- .check("buf2 = buf1[0]") \
- .check("buf2 = _wait_tensor(buf2)") \
- .check("extern_kernels.mm(buf2,") \
- .run(code)
+ FileCheck().check(
+ "buf0 = torch.ops._c10d_functional.all_gather_into_tensor.default(primal"
+ ).check("buf1 = torch.ops._c10d_functional.wait_tensor.default(buf0").check(
+ "extern_kernels.mm(buf0,"
+ ).run(
+ code
+ )
@instantiate_parametrized_tests
@@ -493,7 +470,6 @@ class TestDTensorCompileE2E(DTensorTestBase):
@with_comms
@parametrize("is_seq_parallel", [True, False])
- @run_with_both_funcol_impls
def test_tp_compile_fullgraph(self, is_seq_parallel):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -554,7 +530,6 @@ class TestDTensorCompileE2E(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
- @run_with_both_funcol_impls
def test_2d_fsdp_tp_compile(self):
data_parallel_size = 2
model = SimpleModel(self.device_type)
@@ -606,7 +581,6 @@ class TestDTensorCompileE2E(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
- @run_with_both_funcol_impls
def test_2d_fsdp_tp_ac_compile(self):
dp_degree = 2
tp_degree = self.world_size // dp_degree
@@ -658,7 +632,6 @@ class TestDTensorCompileE2E(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
- @run_with_both_funcol_impls
def test_compile_dtensor_redistribute_backward(self):
mesh = DeviceMesh(device_type="cuda", mesh=torch.arange(self.world_size))
diff --git a/test/distributed/_tensor/test_matrix_ops.py b/test/distributed/_tensor/test_matrix_ops.py
index 0d83d12e95..b303157acf 100644
--- a/test/distributed/_tensor/test_matrix_ops.py
+++ b/test/distributed/_tensor/test_matrix_ops.py
@@ -14,11 +14,7 @@ from torch.distributed._tensor.placement_types import (
Replicate,
Shard,
)
-from torch.testing._internal.common_distributed import run_with_both_funcol_impls
-from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
@@ -26,7 +22,6 @@ from torch.testing._internal.distributed._tensor.common_dtensor import (
)
-@instantiate_parametrized_tests
class DistMatrixOpsTest(DTensorTestBase):
@with_comms
def test_addmm(self):
@@ -138,7 +133,6 @@ class DistMatrixOpsTest(DTensorTestBase):
self.assertEqual(tranposed_mat2.placements, shard_spec)
@with_comms
- @run_with_both_funcol_impls
def test_t_partial(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
diff --git a/test/distributed/test_c10d_functional_native.py b/test/distributed/test_c10d_functional_native.py
index f3be3ed765..99062d1bab 100644
--- a/test/distributed/test_c10d_functional_native.py
+++ b/test/distributed/test_c10d_functional_native.py
@@ -22,7 +22,6 @@ from torch.distributed._functional_collectives import (
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
- run_with_native_funcol,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
@@ -89,7 +88,6 @@ class TestWithNCCL(MultiProcessTestCase):
torch._C._distributed_c10d._register_process_group("default", dist.group.WORLD)
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_reduce_single(self) -> None:
self._init_process_group()
@@ -116,7 +114,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_reduce_single_(self) -> None:
self._init_process_group()
@@ -132,7 +129,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.eq(expect).all()
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_reduce_coalesced(self) -> None:
self._init_process_group()
@@ -162,7 +158,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_reduce_coalesced_(self) -> None:
self._init_process_group()
@@ -181,7 +176,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.eq(sum(self.ranks) / self.world_size * i).all()
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_gather_into_tensor_single(self) -> None:
self._init_process_group()
@@ -213,7 +207,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_gather_into_tensor_coalesced(self) -> None:
self._init_process_group()
@@ -250,7 +243,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_reduce_scatter_tensor_single(self) -> None:
self._init_process_group()
@@ -277,7 +269,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_reduce_scatter_tensor_coalesced(self) -> None:
self._init_process_group()
@@ -305,7 +296,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_all_to_all_single(self) -> None:
self._init_process_group()
torch.cuda.set_device(self.device)
@@ -341,7 +331,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_broadcast(self) -> None:
self._init_process_group()
@@ -368,7 +357,6 @@ class TestWithNCCL(MultiProcessTestCase):
assert output.completed
@skip_if_lt_x_gpu(2)
- @run_with_native_funcol
def test_unwaited(self) -> None:
# Verify that the process can terminate gracefully
# even with unwaited tensors
@@ -384,7 +372,6 @@ class TestWithNCCL(MultiProcessTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
@fresh_inductor_cache()
- @run_with_native_funcol
def test_threading(self):
self._init_process_group()
device = torch.device(f"cuda:{self.rank}")
@@ -448,7 +435,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_all_reduce_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
@@ -485,7 +471,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_all_reduce_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
bufs = [arg + 42 for arg in args]
@@ -531,7 +516,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_inplace_op_on_view(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = (arg + 10)[:2]
@@ -559,7 +543,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_reuse_buffer_after_inplace_collective(self):
def func(arg: torch.Tensor) -> torch.Tensor:
# Expect allocation
@@ -594,7 +577,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_all_gather_into_tensor_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
ag0 = funcol.all_gather_tensor(arg, 0, "0")
@@ -621,7 +603,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_all_gather_into_tensor_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
ag0 = funcol.all_gather_into_tensor_coalesced(args, "0")
@@ -656,7 +637,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_reduce_scatter_tensor_single(self):
def func(arg: torch.Tensor) -> torch.Tensor:
rs0 = funcol.reduce_scatter_tensor(arg, "avg", 0, "0")
@@ -683,7 +663,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_reduce_scatter_tensor_coalesced(self):
def func(args: List[torch.Tensor]) -> torch.Tensor:
rs0 = funcol.reduce_scatter_tensor_coalesced(
@@ -720,7 +699,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_all_to_all_single(self):
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
@@ -769,7 +747,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_inductor_broadcast(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
@@ -806,7 +783,6 @@ class CompileTest(TestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@fresh_inductor_cache()
- @run_with_native_funcol
def test_ranks_and_tag(self):
def func(arg: torch.Tensor) -> torch.Tensor:
buf0 = arg + 42
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index fbfc783e5e..a98916a922 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -22,11 +22,7 @@ from torch.distributed.distributed_c10d import (
is_nccl_available,
ProcessGroup,
)
-from torch.testing._internal.common_distributed import run_with_both_funcol_impls
-from torch.testing._internal.common_utils import (
- instantiate_parametrized_tests,
- run_tests,
-)
+from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_unless_torch_gpu,
@@ -54,13 +50,11 @@ def _set_env_var(addr="localhost", port="25364", world_size=1, rank=0):
os.environ["RANK"] = f"{rank}"
-@instantiate_parametrized_tests
class DeviceMeshTest(DTensorTestBase):
@property
def world_size(self):
return 4
- @run_with_both_funcol_impls
def test_init_process_group(self):
device_type = _get_device_type(self.world_size)
mesh_tensor = torch.arange(4).reshape(2, 2)
@@ -78,7 +72,6 @@ class DeviceMeshTest(DTensorTestBase):
device_mesh = DeviceMesh(self.device_type, mesh)
@with_comms
- @run_with_both_funcol_impls
def test_get_group(self):
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(
@@ -99,7 +92,6 @@ class DeviceMeshTest(DTensorTestBase):
self.assertEqual(mesh_2d.get_group("tp"), tp_mesh.get_group())
@with_comms
- @run_with_both_funcol_impls
def test_get_local_rank_raises_exception(self):
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(
@@ -113,7 +105,6 @@ class DeviceMeshTest(DTensorTestBase):
local_rank = mesh_2d.get_local_rank()
@with_comms
- @run_with_both_funcol_impls
def test_get_local_rank(self):
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(
@@ -128,7 +119,6 @@ class DeviceMeshTest(DTensorTestBase):
self.assertEqual(tp_mesh.get_local_rank(), mesh_2d.get_local_rank("tp"))
@with_comms
- @run_with_both_funcol_impls
def test_device_mesh_2d(self):
mesh_tensor = torch.arange(4).reshape(2, 2)
# construct a cuda device mesh
@@ -153,7 +143,6 @@ class DeviceMeshTest(DTensorTestBase):
)
self.assertEqual(global_ranks, current_rank_expected_group_ranks)
- @run_with_both_funcol_impls
def test_fake_pg_device_mesh(self):
fake_store = FakeStore()
init_process_group("fake", store=fake_store, rank=0, world_size=self.world_size)
@@ -173,7 +162,6 @@ class DeviceMeshTestNDim(DTensorTestBase):
return 8
@with_comms
- @run_with_both_funcol_impls
def test_device_mesh_nd(self):
# construct a cuda device mesh
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
@@ -197,7 +185,6 @@ class DeviceMeshTestNDim(DTensorTestBase):
self.assertEqual(global_ranks, ranks.tolist())
@with_comms
- @run_with_both_funcol_impls
def test_device_mesh_hash(self):
mesh_tensor_2d = torch.arange(8).reshape(4, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor_2d)
@@ -215,7 +202,6 @@ class InitDeviceMeshTest(DTensorTestBase):
return 8
@with_comms
- @run_with_both_funcol_impls
def test_init_device_mesh(self):
mesh_shape = (2, 4)
ref_mesh = DeviceMesh(self.device_type, torch.arange(8).view(mesh_shape))
@@ -233,7 +219,6 @@ class InitDeviceMeshTest(DTensorTestBase):
self.assertEqual(mesh_2d, ref_mesh)
@with_comms
- @run_with_both_funcol_impls
def test_raises_duplicate_mesh_dim_names(self):
with self.assertRaisesRegex(
RuntimeError,
@@ -246,7 +231,6 @@ class InitDeviceMeshTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_raises_mesh_shape_mesh_dim_names_mismatch(self):
with self.assertRaisesRegex(
RuntimeError,
@@ -259,21 +243,18 @@ class InitDeviceMeshTest(DTensorTestBase):
)
-@instantiate_parametrized_tests
class TestDeviceMeshGetItem(DTensorTestBase):
@property
def world_size(self):
return 8
@with_comms
- @run_with_both_funcol_impls
def test_raises_no_mesh_dim_found(self):
with self.assertRaisesRegex(KeyError, "No `mesh_dim_names` found."):
mesh = init_device_mesh(self.device_type, (2, 4))
child_mesh = mesh["DP"]
@with_comms
- @run_with_both_funcol_impls
def test_raises_invalid_mesh_dim_name(self):
child_mesh_dim_name = "PP"
with self.assertRaisesRegex(
@@ -286,7 +267,6 @@ class TestDeviceMeshGetItem(DTensorTestBase):
child_mesh = mesh[child_mesh_dim_name]
@with_comms
- @run_with_both_funcol_impls
def test_get_item(self):
mesh_shape = (2, 4)
mesh_dim_names = ("DP", "TP")
@@ -310,7 +290,6 @@ class TestDeviceMeshGetItem(DTensorTestBase):
self.assertEqual(mesh_2d["DP"].mesh, pg_ranks_by_dim_name["DP"][dp_group_idx])
@with_comms
- @run_with_both_funcol_impls
def test_get_item_1d(self):
mesh = init_device_mesh(self.device_type, (8,), mesh_dim_names=("dp",))
# Make sure slicing out 1D mesh from a 1D mesh works.
@@ -322,7 +301,6 @@ class TestDeviceMeshGetItem(DTensorTestBase):
dp_mesh = mesh["dim0"]
@with_comms
- @run_with_both_funcol_impls
def test_cache_and_reuse_submesh_slice_result(self):
mesh = init_device_mesh(self.device_type, (2, 4), mesh_dim_names=("dp", "tp"))
@@ -340,10 +318,8 @@ class TestDeviceMeshGetItem(DTensorTestBase):
self.assertTrue(_world.group_count > ref_pg_count)
-@instantiate_parametrized_tests
class TestMeshEnv(DTensorTestBase):
@with_comms
- @run_with_both_funcol_impls
def test_get_parent_mesh(self):
mesh_shape = (2, self.world_size // 2)
mesh_dim_names = ("DP", "TP")
@@ -363,7 +339,6 @@ class TestMeshEnv(DTensorTestBase):
self.assertEqual(_mesh_resources.get_parent_mesh(mesh_1_3), None)
@with_comms
- @run_with_both_funcol_impls
def test_get_parent_mesh_dim_exist(self):
mesh_shape = (2, self.world_size // 2)
mesh_dim_names = ("DP", "TP")
@@ -375,7 +350,6 @@ class TestMeshEnv(DTensorTestBase):
self.assertEqual(_mesh_resources.get_parent_mesh_dim(mesh_2d["TP"]), 1)
@with_comms
- @run_with_both_funcol_impls
def test_get_parent_mesh_dim_not_exist(self):
mesh_shape = (self.world_size,)
mesh = init_device_mesh(self.device_type, mesh_shape)
@@ -383,7 +357,6 @@ class TestMeshEnv(DTensorTestBase):
self.assertEqual(_mesh_resources.get_parent_mesh_dim(mesh), None)
@with_comms
- @run_with_both_funcol_impls
def test_get_mesh_dim_by_name(self):
mesh_shape = (2, self.world_size // 2)
mesh_dim_names = ("DP", "TP")
@@ -395,14 +368,12 @@ class TestMeshEnv(DTensorTestBase):
self.assertEqual(_mesh_resources.get_mesh_dim_by_name(mesh_2d, "TP"), 1)
-@instantiate_parametrized_tests
class DeviceMeshCollectiveTest(DTensorTestBase):
@property
def world_size(self):
return 8
@with_comms
- @run_with_both_funcol_impls
def test_broadcast_1d(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
local_tensor = torch.ones(3, 3, device=self.device_type) * self.rank
@@ -410,7 +381,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(local_tensor, torch.zeros(3, 3))
@with_comms
- @run_with_both_funcol_impls
def test_scatter_1d(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
scatter_tensor_shape = [3, 3, 3]
@@ -429,7 +399,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(recv_tensor, splitted_list[mesh.get_rank()])
@with_comms
- @run_with_both_funcol_impls
def test_scatter_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
@@ -475,7 +444,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(scattered_tensor, tensor_splitted_list[my_rank])
@with_comms
- @run_with_both_funcol_impls
def test_all_gather_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
@@ -514,7 +482,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(all_gathered_tensor, tensor_to_split)
@with_comms
- @run_with_both_funcol_impls
def test_reduce_scatter_contiguous(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
@@ -557,7 +524,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(new_tensor_local, expected_tensor)
@with_comms
- @run_with_both_funcol_impls
def test_reduce_scatter_uneven(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
my_rank = device_mesh.get_rank()
@@ -620,7 +586,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
)
@with_comms
- @run_with_both_funcol_impls
def test_broadcast_nd(self):
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
@@ -639,7 +604,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(cloned_local_tensor, torch.ones(3, 3) * res_num)
@with_comms
- @run_with_both_funcol_impls
def test_scatter_nd(self):
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
@@ -662,7 +626,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank)
@with_comms
- @run_with_both_funcol_impls
def test_all_to_all_1d(self):
# transpose on a 2D tensor distributed over N nodes:
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
@@ -690,7 +653,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
self.assertEqual(output_tensor, expected_tensor)
@with_comms
- @run_with_both_funcol_impls
def test_all_to_all_nd(self):
mesh_tensor = torch.arange(8).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
diff --git a/test/distributed/test_functional_api.py b/test/distributed/test_functional_api.py
index c1cc1e89e0..491da6551f 100644
--- a/test/distributed/test_functional_api.py
+++ b/test/distributed/test_functional_api.py
@@ -3,13 +3,11 @@
import os
import sys
import unittest
-import weakref
from functools import partial, wraps
import torch
import torch.distributed as dist
import torch.distributed._functional_collectives as ft_c
-import torch.distributed._functional_collectives_impl as ft_c_impl
import torch.distributed._tensor as dt
import torch.distributed.distributed_c10d as c10d
@@ -28,9 +26,6 @@ from torch.testing._internal.common_distributed import (
MultiThreadedTestCase,
TEST_SKIPS,
requires_nccl,
- run_with_both_funcol_impls,
- run_with_both_funcol_impls_with_arg,
- run_with_legacy_funcol,
)
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -237,7 +232,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self._spawn_threads()
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_broadcast(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -254,7 +248,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(res, torch.ones([4], device=device))
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_all_reduce_eager(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -272,7 +265,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(res2, torch.tensor([2, 2, 2, 2], dtype=torch.float))
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_all_reduce_coalesced_eager(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -288,7 +280,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(res[1], t1 * 4)
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_all_gather_tensor(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -311,7 +302,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(gathered_tensor, torch.ones(output_size))
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_all_gather_into_tensor_coalesced(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -329,7 +319,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
)
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_reduce_scatter_tensor(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -354,7 +343,6 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(rs_tensor, torch.ones(input_size) * res_num)
@parametrize("device", ["cpu", "cuda"])
- @run_with_both_funcol_impls
def test_reduce_scatter_into_tensor_coalesced(self, device):
if device == "cuda":
if torch.cuda.device_count() < self.world_size:
@@ -372,17 +360,13 @@ class TestTraceableCollectives(MultiThreadedTestCase):
self.assertEqual(torch.tensor([8], device=device), res[1])
-@instantiate_parametrized_tests
class TestMetaCollectives(TestCase):
- @run_with_both_funcol_impls_with_arg
- def test_all_reduce(self, use_native_funcol):
+ def test_all_reduce(self):
x = torch.rand((2, 3, 4), device="meta")
- group = "0" if use_native_funcol else [1]
- out = ft_c.all_reduce(x, "sum", group)
+ out = ft_c.all_reduce(x, "sum", "0")
self.assertEqual(x.size(), out.size())
-@instantiate_parametrized_tests
class TestGradCollectives(MultiThreadedTestCase):
@property
def world_size(self):
@@ -392,17 +376,14 @@ class TestGradCollectives(MultiThreadedTestCase):
super().setUp()
self._spawn_threads()
- @run_with_both_funcol_impls_with_arg
- def test_all_reduce(self, use_native_funcol):
+ def test_all_reduce(self):
x = torch.rand([4], requires_grad=True)
y = torch.rand([4], requires_grad=True)
- group = "0" if use_native_funcol else [0, 1]
- out = ft_c.all_reduce(x, "sum", group)
+ out = ft_c.all_reduce(x, "sum", dist.group.WORLD)
(out + y).sum().backward()
self.assertIsNone(x.grad)
-@instantiate_parametrized_tests
class TestMakeFx(MultiThreadedTestCase):
@property
def world_size(self):
@@ -419,12 +400,10 @@ class TestMakeFx(MultiThreadedTestCase):
torch.fx._symbolic_trace._is_fx_tracing_flag = False
self.assertFalse(torch.fx._symbolic_trace.is_fx_tracing())
- @run_with_both_funcol_impls_with_arg
- def test_all_reduce_tracing(self, use_native_funcol):
+ def test_all_reduce_tracing(self):
def allred(input):
- group = "0" if use_native_funcol else [0, 1]
- return ft_c.all_reduce(input, "sum", group=group) + 1
+ return ft_c.all_reduce(input, "sum", group=dist.group.WORLD) + 1
graph = make_fx(allred)(torch.rand(4))
FileCheck().check("all_reduce").check("wait_tensor").run(str(graph.graph))
@@ -474,7 +453,6 @@ def with_comms(func=None):
return wrapper
-@instantiate_parametrized_tests
class TestCollectivesWithNCCL(MultiProcessTestCase):
def setUp(self):
super().setUp()
@@ -513,7 +491,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
@requires_nccl()
@with_comms()
- @run_with_both_funcol_impls
def test_all_gather_into_tensor_coalesced(self):
exit_if_lt_x_gpu(self.world_size)
@@ -529,7 +506,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
self.assertEqual(torch.ones([4 * dist.get_world_size()]) + 1, res[1])
@with_comms()
- @run_with_both_funcol_impls
def test_all_to_all_single(self):
device = "cuda" if BACKEND == dist.Backend.NCCL else "cpu"
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
@@ -548,7 +524,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
self.assertEqual(y, expected)
@with_comms()
- @run_with_both_funcol_impls
def test_all_to_all_single_1d_input(self):
device = "cuda" if BACKEND == dist.Backend.NCCL else "cpu"
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
@@ -567,43 +542,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
self.assertEqual(y, expected)
@with_comms()
- @run_with_legacy_funcol # native funcol doesn't support none sizes
- def test_all_to_all_single_output_split_sizes_none(self):
- device = "cuda" if BACKEND == dist.Backend.NCCL else "cpu"
- mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
- rank = dist.get_rank()
-
- input_split_sizes = [1] * self.world_size
- x = torch.ones(self.world_size, self.world_size, device=device) * (rank + 1)
- y = ft_c.all_to_all_single(
- x, output_split_sizes=None, input_split_sizes=input_split_sizes, group=mesh
- )
- expected = []
- for idx, tensor in enumerate(torch.chunk(x, self.world_size)):
- expected.append(torch.full_like(tensor, (idx + 1)))
- expected = torch.cat(expected)
- self.assertEqual(y, expected)
-
- @with_comms()
- @run_with_legacy_funcol # native funcol doesn't support none sizes
- def test_all_to_all_single_input_split_sizes_none(self):
- device = "cuda" if BACKEND == dist.Backend.NCCL else "cpu"
- mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
- rank = dist.get_rank()
-
- output_split_sizes = [1] * self.world_size
- x = torch.ones(self.world_size, self.world_size, device=device) * (rank + 1)
- y = ft_c.all_to_all_single(
- x, output_split_sizes=output_split_sizes, input_split_sizes=None, group=mesh
- )
- expected = []
- for idx, tensor in enumerate(torch.chunk(x, self.world_size)):
- expected.append(torch.full_like(tensor, (idx + 1)))
- expected = torch.cat(expected)
- self.assertEqual(y, expected)
-
- @with_comms()
- @run_with_legacy_funcol # native funcol doesn't support none sizes
def test_all_to_all_single_split_sizes_none(self):
device = "cuda" if BACKEND == dist.Backend.NCCL else "cpu"
mesh = dt.DeviceMesh(device, torch.arange(self.world_size))
@@ -622,7 +560,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@requires_nccl()
@with_comms()
- @run_with_both_funcol_impls
def test_tracing(self):
def allreduce(t, pg):
return ft_c.all_reduce(t, "sum", pg)
@@ -631,7 +568,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
compiled_allreduce(torch.randn(8, device=self.device), self.process_group)
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
- @run_with_both_funcol_impls
def test_tracing_with_fakepg(self):
exit_if_lt_x_gpu(self.world_size)
@@ -648,7 +584,6 @@ class TestCollectivesWithNCCL(MultiProcessTestCase):
allreduce(torch.randn(8, device=self.device), pg=dist.group.WORLD)
-@instantiate_parametrized_tests
class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
@property
@@ -657,7 +592,6 @@ class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
@requires_nccl()
@with_comms()
- @run_with_both_funcol_impls
def test_permute_tensor_with_sub_group(self):
exit_if_lt_x_gpu(self.world_size)
@@ -694,101 +628,6 @@ class TestNCCLCollectivesWithWorldSize4(TestCollectivesWithNCCL):
)
-
-class TestOpWaitiness(MultiProcessTestCase):
- @property
- def world_size(self):
- return 1
-
- def setUp(self):
- super().setUp()
- self._spawn_processes()
-
- def _init_process_group(self):
- from torch.testing._internal.distributed.fake_pg import FakeStore
- dist.init_process_group(
- backend="fake",
- world_size=self.world_size,
- rank=self.rank,
- store=FakeStore(),
- )
-
- @run_with_legacy_funcol # impl specific
- def test_wait_reduce_outstanding_work_count(self):
- self._init_process_group()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- tensor = torch.ones([4])
- res = ft_c.all_reduce(tensor, "sum", [0])
- self.assertEqual(1, ft_c_impl._outstanding_wait_count())
- self.assertTrue(ft_c_impl._tensor_needs_wait(res))
-
- res.trigger_wait()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
- self.assertFalse(ft_c_impl._tensor_needs_wait(res))
-
- @run_with_legacy_funcol # impl specific
- def test_add_triggers_wait(self):
- self._init_process_group()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- tensor = torch.ones([4])
- res = ft_c.all_reduce(tensor, "sum", [0])
- self.assertEqual(1, ft_c_impl._outstanding_wait_count())
- self.assertTrue(ft_c_impl._tensor_needs_wait(res))
-
- foo = res + torch.ones([4])
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
- self.assertFalse(ft_c_impl._tensor_needs_wait(res))
- self.assertFalse(isinstance(foo, ft_c.AsyncCollectiveTensor))
-
- @run_with_legacy_funcol # impl specific
- def test_view_does_not_trigger_wait(self):
- self._init_process_group()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- tensor = torch.ones([4])
- res = ft_c.all_reduce(tensor, "sum", [0])
- self.assertEqual(1, ft_c_impl._outstanding_wait_count())
- self.assertTrue(ft_c_impl._tensor_needs_wait(res))
-
- foo = res.view([2, 2])
- self.assertEqual(1, ft_c_impl._outstanding_wait_count())
- self.assertTrue(ft_c_impl._tensor_needs_wait(res))
- self.assertTrue(ft_c_impl._tensor_needs_wait(foo))
- self.assertTrue(isinstance(foo, ft_c.AsyncCollectiveTensor))
-
- foo.trigger_wait()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- self.assertEqual(foo.tolist(), [[1.0, 1.0], [1.0, 1.0]])
-
- @run_with_legacy_funcol # impl specific
- def test_dead_wrapper_triggers_wait(self):
- self._init_process_group()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- tensor = torch.ones([4])
- res = ft_c.all_reduce(tensor, "sum", [0])
-
- wr = weakref.ref(res)
- self.assertTrue(wr() is not None)
- res = None
- self.assertTrue(wr() is None)
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- @run_with_legacy_funcol # impl specific
- def test_dead_wrapper_plus_view(self):
- self._init_process_group()
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
- tensor = torch.ones([4])
- res = ft_c.all_reduce(tensor, "sum", [0])
- res = res.view([2, 2])
- self.assertEqual(1, ft_c_impl._outstanding_wait_count())
- res = None
- self.assertEqual(0, ft_c_impl._outstanding_wait_count())
-
@instantiate_parametrized_tests
class TestFunctionalAutograd(MultiThreadedTestCase):
def setUp(self):
diff --git a/test/distributed/test_inductor_collectives.py b/test/distributed/test_inductor_collectives.py
index af3d7ddd25..5662ae964c 100644
--- a/test/distributed/test_inductor_collectives.py
+++ b/test/distributed/test_inductor_collectives.py
@@ -17,9 +17,6 @@ from torch.testing._internal.common_distributed import (
DynamoDistributedMultiProcTestCase,
_dynamo_dist_per_rank_init,
requires_nccl,
- run_with_legacy_funcol,
- run_with_both_funcol_impls,
- run_with_both_funcol_impls_with_arg,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
@@ -60,7 +57,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_broadcast_inductor(self):
"""
Testing if broadcast works correctly when using inductor
@@ -97,7 +93,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_allreduce_inductor(self):
"""
This is matmul/cat/allreduce is a pattern we aim to optimize.
@@ -140,7 +135,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_eager_allreduce_inductor_wait(self):
def eager_func(a, b, c, d, *, tag, ranks, group_size):
@@ -180,7 +174,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_inductor_allreduce_eager_wait(self):
def inductor_func(a, b, c, d, *, tag, ranks, group_size):
@@ -219,7 +212,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "allow_buffer_reuse", True)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_allreduce_input_buffer_reuse(self):
def func(a, *, tag, ranks, group_size):
ar = _functional_collectives.all_reduce(a, "sum", ranks, tag)
@@ -239,7 +231,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_permute_tensor(self):
def func(tensor, src_dst_pairs, *, tag, ranks, group_size):
return _functional_collectives.permute_tensor(tensor, src_dst_pairs, ranks, tag)
@@ -269,7 +260,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._inductor.config, "allow_buffer_reuse", True)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_allgather_output_buffer_reuse(self):
class Model(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
@@ -295,7 +285,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_allgather_contiguous_input(self):
class Model(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
@@ -322,7 +311,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_allgather_into_tensor_inductor(self):
"""
This is matmul/cat/allreduce is a pattern we aim to optimize.
@@ -355,7 +343,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_reduce_scatter_tensor_inductor(self):
def example(a, b, *, tag, ranks, group_size):
c = torch.matmul(a, b)
@@ -386,7 +373,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_all_to_all_single_inductor(self):
def example(inp, input_split_sizes_tensor, output_split_sizes_tensor, *, tag, ranks, group_size):
input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
@@ -472,7 +458,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_all_to_all_single_inductor_input_split_sizes_none(self):
def example(inp, output_split_sizes_tensor, *, tag, ranks, group_size):
output_split_sizes = _tolist_with_constrain_as_size(output_split_sizes_tensor)
@@ -514,7 +499,6 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
@skip_if_lt_x_gpu(2)
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
- @run_with_legacy_funcol
def test_all_to_all_single_inductor_split_sizes_none(self):
def example(inp, *, tag, ranks, group_size):
a2a = torch.ops.c10d_functional.all_to_all_single(
@@ -560,7 +544,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(debug=True)
- @run_with_legacy_funcol # impl specific
def test_inductor_single_op(self):
def func(inp, *, tag, ranks, group_size):
@@ -589,7 +572,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(debug=True)
- @run_with_legacy_funcol # impl specific
def test_inductor_steal_buffer(self):
"""
it's ok and optimal if inductor allreduce mutates the buffer of an intermediate
@@ -627,7 +609,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch({"debug": True, "triton.descriptive_names": False})
- @run_with_legacy_funcol # impl specific
def test_inductor_doesnt_mutate_shared(self):
"""
make sure that an intermediate that's going to be reuse isn't mutated unless copied
@@ -665,14 +646,10 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
- @run_with_both_funcol_impls_with_arg
- def test_dynamo_trace_allreduce(self, use_native_funcol):
+ def test_dynamo_trace_allreduce(self):
def func(inp):
- if use_native_funcol:
- ar = _functional_collectives.all_reduce(inp, "sum", "0")
- else:
- ar = _functional_collectives.all_reduce(inp, "sum", [0], "")
+ ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
inputs = torch.ones(4, 4, device="cuda")
@@ -686,14 +663,10 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertEqual(counter.op_count, 2)
self.assertTrue(same(out, correct))
- @run_with_both_funcol_impls_with_arg
- def test_dynamo_trace_all_gather_tensor(self, use_native_funcol):
+ def test_dynamo_trace_all_gather_tensor(self):
def func(inp):
- if use_native_funcol:
- ar = _functional_collectives.all_gather_tensor(inp, 0, "0")
- else:
- ar = _functional_collectives.all_gather_tensor(inp, 0, [0], "")
+ ar = _functional_collectives.all_gather_tensor(inp, 0, "0")
return ar
inputs = torch.ones(4, 4, device="cuda")
@@ -707,7 +680,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertEqual(counter.op_count, 2)
self.assertTrue(same(out, correct))
- @run_with_both_funcol_impls
def test_dynamo_trace_all_gather_tensor_pg(self):
def func(inp, *, pg):
@@ -725,7 +697,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertEqual(counter.op_count, 2)
self.assertTrue(same(out, correct))
- @run_with_both_funcol_impls
def test_dynamo_rewrite_dist_all_gather(self):
def func(inp, out, *, pg):
@@ -751,7 +722,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
def test_dynamo_rewrite_dist_all_gather_list(self):
def func(inp, out, *, pg):
@@ -774,7 +744,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.frame_count == 1
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
def test_dynamo_rewrite_dist_all_gather_args_match(self):
# Duplicated most of the structure from test_dynamo_rewrite_dist_all_gather
# except uses kwargs to ensure rewrite has matching arg names
@@ -802,7 +771,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
def test_dynamo_rewrite_dist_reduce_scatter(self):
def func(inp, out, *, pg):
@@ -828,7 +796,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
@parametrize(
"pg_mode",
[
@@ -878,7 +845,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3
assert same(inputs_compiled, inputs_eager)
- @run_with_both_funcol_impls
def test_dynamo_rewrite_dist_all_to_all_single(self):
def func(output, input, pg):
@@ -902,7 +868,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.frame_count == 1
assert same(output_compiled, output_eager)
- @run_with_both_funcol_impls
@parametrize(
"reduce_op",
[
@@ -940,7 +905,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
)
compiled(*inputs)
- @run_with_both_funcol_impls
@parametrize(
"source", [
"GroupMember.WORLD",
@@ -979,7 +943,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
compiled(input)
- @run_with_both_funcol_impls
def test_dynamo_support_collective_op_with_async_op_False(self):
def func(inp, out, *, pg):
@@ -1006,7 +969,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 3
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
def test_dynamo_graphbreaks_unsupported_async_op(self):
def func(inp, out, *, pg):
@@ -1032,7 +994,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 0
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls
def test_dynamo_pg_var(self):
def func(inp, *, pg):
x = pg.rank() + 1 % pg.size()
@@ -1049,14 +1010,10 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert counter.op_count == 1
assert same(outputs, correct_outputs)
- @run_with_both_funcol_impls_with_arg
- def test_dynamo_trace_reduce_scatter_tensor(self, use_native_funcol):
+ def test_dynamo_trace_reduce_scatter_tensor(self):
def func(inp):
- if use_native_funcol:
- ar = _functional_collectives.reduce_scatter_tensor(inp, "sum", 0, "0")
- else:
- ar = _functional_collectives.reduce_scatter_tensor(inp, "sum", 0, [0], "")
+ ar = _functional_collectives.reduce_scatter_tensor(inp, "sum", 0, "0")
return ar
inputs = torch.ones(4, 4, device="cuda")
@@ -1070,7 +1027,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
self.assertEqual(counter.op_count, 2)
self.assertTrue(same(out, correct))
- @run_with_both_funcol_impls
def test_dynamo_trace_allgather_coalesced(self):
def func(inp, *, tag, ranks, group_size):
ar = torch.ops.c10d_functional.all_gather_into_tensor_coalesced(inp, tag, ranks, group_size)
@@ -1086,18 +1042,14 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
assert same(out, correct)
- @run_with_both_funcol_impls_with_arg
- def test_backwards(self, use_native_funcol):
+ def test_backwards(self):
"""
It's probably not that common to need backwards support for collectives.
However, I wanted to at least see if it was possible to support it as a design goal.
"""
def func(inp):
- if use_native_funcol:
- ar = _functional_collectives.all_reduce(inp, "sum", "0")
- else:
- ar = _functional_collectives.all_reduce(inp, "sum", [0], "")
+ ar = _functional_collectives.all_reduce(inp, "sum", "0")
return ar
input = torch.ones(4, 4, device="cuda", requires_grad=True)
@@ -1120,7 +1072,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch({"debug": True, "triton.descriptive_names": False})
- @run_with_legacy_funcol # impl specific
def test_inductor_all_gather_coalesced(self):
"""
make sure that an intermediate that's going to be reuse isn't mutated unless copied
@@ -1167,7 +1118,6 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
@unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch({"debug": True, "triton.descriptive_names": False})
- @run_with_legacy_funcol # impl specific
def test_inductor_reduce_scatter_coalesced(self):
"""
make sure that an intermediate that's going to be reuse isn't mutated unless copied
diff --git a/torch/distributed/_functional_collectives.py b/torch/distributed/_functional_collectives.py
index 0479a78035..175e74464a 100644
--- a/torch/distributed/_functional_collectives.py
+++ b/torch/distributed/_functional_collectives.py
@@ -10,10 +10,7 @@ from torch.distributed.device_mesh import DeviceMesh
from torch.fx.experimental.proxy_tensor import get_innermost_proxy_mode
from . import _functional_collectives_impl as fun_col_impl
-from ._functional_collectives_impl import ( # noqa: F401
- _register_tensor_wrapper,
- native_funcol_enabled,
-)
+from ._functional_collectives_impl import _register_tensor_wrapper # noqa: F401
try:
from torch.utils._cxx_pytree import tree_map_only
@@ -140,10 +137,7 @@ def wait_tensor(tensor):
Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.
"""
- if native_funcol_enabled():
- return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
- else:
- return torch.ops.c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
+ return torch.ops._c10d_functional.wait_tensor(tensor) # type: ignore[attr-defined]
def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ""):
@@ -155,14 +149,8 @@ def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ""):
group (ProcessGroup or List[int]): The process group to work on.
tag (str, optional): A unique identifier for the collective. Default: empty string
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- tensor = torch.ops._c10d_functional.broadcast(self, src, group_name)
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor = torch.ops.c10d_functional.broadcast(
- self, src, tag, rankset, group_size
- )
+ group_name = _resolve_group_name(group, tag)
+ tensor = torch.ops._c10d_functional.broadcast(self, src, group_name)
return _maybe_wrap_tensor(tensor)
@@ -183,20 +171,8 @@ def all_reduce(self: torch.Tensor, reduceOp: str, group: RANK_TYPES, tag: str =
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- tensor = torch.ops._c10d_functional.all_reduce(
- self, reduceOp.lower(), group_name
- )
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor = torch.ops.c10d_functional.all_reduce( # type: ignore[attr-defined]
- self,
- reduceOp,
- tag,
- rankset,
- group_size,
- )
+ group_name = _resolve_group_name(group, tag)
+ tensor = torch.ops._c10d_functional.all_reduce(self, reduceOp.lower(), group_name)
return _maybe_wrap_tensor(tensor)
@@ -223,20 +199,11 @@ def all_gather_tensor(
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
assert self.is_contiguous()
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- group_size = c10d._get_group_size_by_name(group_name)
- tensor = torch.ops._c10d_functional.all_gather_into_tensor(
- self, group_size, group_name
- )
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor = torch.ops.c10d_functional.all_gather_into_tensor( # type: ignore[attr-defined]
- self,
- tag,
- rankset,
- group_size,
- )
+ group_name = _resolve_group_name(group, tag)
+ group_size = c10d._get_group_size_by_name(group_name)
+ tensor = torch.ops._c10d_functional.all_gather_into_tensor(
+ self, group_size, group_name
+ )
res = _maybe_wrap_tensor(tensor)
# TODO this should be done inside AsyncCollectiveTensor to delay the wait() call
if gather_dim != 0:
@@ -270,11 +237,8 @@ def reduce_scatter_tensor(
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- group_size = c10d._get_group_size_by_name(group_name)
- else:
- tag, rankset, group_size = _expand_group(group, tag)
+ group_name = _resolve_group_name(group, tag)
+ group_size = c10d._get_group_size_by_name(group_name)
assert (
self.size(scatter_dim) % group_size == 0
@@ -283,21 +247,12 @@ def reduce_scatter_tensor(
tensor_list = torch.chunk(self, group_size, dim=scatter_dim)
self = torch.cat(tensor_list)
- if native_funcol_enabled():
- tensor = torch.ops._c10d_functional.reduce_scatter_tensor(
- self,
- reduceOp.lower(),
- group_size,
- group_name, # type: ignore[possibly-undefined]
- )
- else:
- tensor = torch.ops.c10d_functional.reduce_scatter_tensor( # type: ignore[attr-defined]
- self,
- reduceOp,
- tag,
- rankset, # type: ignore[possibly-undefined]
- group_size,
- )
+ tensor = torch.ops._c10d_functional.reduce_scatter_tensor(
+ self,
+ reduceOp.lower(),
+ group_size,
+ group_name, # type: ignore[possibly-undefined]
+ )
res = _maybe_wrap_tensor(tensor)
return res
@@ -321,22 +276,12 @@ def all_reduce_coalesced(
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- tensor_list = torch.ops._c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
- self,
- reduceOp.lower(),
- group_name,
- )
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor_list = torch.ops.c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
- self,
- reduceOp,
- tag,
- rankset,
- group_size,
- )
+ group_name = _resolve_group_name(group, tag)
+ tensor_list = torch.ops._c10d_functional.all_reduce_coalesced( # type: ignore[attr-defined]
+ self,
+ reduceOp.lower(),
+ group_name,
+ )
return list(map(_maybe_wrap_tensor, tensor_list))
@@ -359,22 +304,13 @@ def all_gather_into_tensor_coalesced(
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- group_size = c10d._get_group_size_by_name(group_name)
- tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
- self,
- group_size,
- group_name,
- )
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor_list = torch.ops.c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
- self,
- tag,
- rankset,
- group_size,
- )
+ group_name = _resolve_group_name(group, tag)
+ group_size = c10d._get_group_size_by_name(group_name)
+ tensor_list = torch.ops._c10d_functional.all_gather_into_tensor_coalesced( # type: ignore[attr-defined]
+ self,
+ group_size,
+ group_name,
+ )
return list(map(_maybe_wrap_tensor, tensor_list))
@@ -400,11 +336,8 @@ def reduce_scatter_tensor_coalesced(
:: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover
that information and perform collective algebraic optimization. Use other forms of input for that.
"""
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- group_size = c10d._get_group_size_by_name(group_name)
- else:
- tag, rankset, group_size = _expand_group(group, tag)
+ group_name = _resolve_group_name(group, tag)
+ group_size = c10d._get_group_size_by_name(group_name)
assert len(scatter_dim) == len(inputs)
for idx, (dim, tensor) in enumerate(zip(scatter_dim, inputs)):
@@ -415,21 +348,12 @@ def reduce_scatter_tensor_coalesced(
tensor_list = torch.chunk(tensor, group_size, dim=dim)
inputs[idx] = torch.cat(tensor_list)
- if native_funcol_enabled():
- tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
- inputs,
- reduceOp.lower(),
- group_size,
- group_name, # type: ignore[possibly-undefined]
- )
- else:
- tensor_list = torch.ops.c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
- inputs,
- reduceOp,
- tag,
- rankset, # type: ignore[possibly-undefined]
- group_size,
- )
+ tensor_list = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced( # type: ignore[attr-defined]
+ inputs,
+ reduceOp.lower(),
+ group_size,
+ group_name, # type: ignore[possibly-undefined]
+ )
return list(map(_maybe_wrap_tensor, tensor_list))
@@ -475,32 +399,21 @@ def all_to_all_single(
assert all(
isinstance(size, (int, torch.SymInt)) for size in input_split_sizes
), input_split_sizes
- if native_funcol_enabled():
- group_name = _resolve_group_name(group, tag)
- group_size = c10d._get_group_size_by_name(group_name)
- if output_split_sizes is None or input_split_sizes is None:
- assert output_split_sizes is None and input_split_sizes is None, (
- "output_split_sizes and input_split_sizes must either be "
- "specified together or both set to None"
- )
- output_split_sizes = [self.shape[0] // group_size] * group_size
- input_split_sizes = output_split_sizes
- tensor = torch.ops._c10d_functional.all_to_all_single( # type: ignore[attr-defined]
- self,
- output_split_sizes,
- input_split_sizes,
- group_name,
- )
- else:
- tag, rankset, group_size = _expand_group(group, tag)
- tensor = torch.ops.c10d_functional.all_to_all_single( # type: ignore[attr-defined]
- self,
- output_split_sizes,
- input_split_sizes,
- tag,
- rankset,
- group_size,
+ group_name = _resolve_group_name(group, tag)
+ group_size = c10d._get_group_size_by_name(group_name)
+ if output_split_sizes is None or input_split_sizes is None:
+ assert output_split_sizes is None and input_split_sizes is None, (
+ "output_split_sizes and input_split_sizes must either be "
+ "specified together or both set to None"
)
+ output_split_sizes = [self.shape[0] // group_size] * group_size
+ input_split_sizes = output_split_sizes
+ tensor = torch.ops._c10d_functional.all_to_all_single( # type: ignore[attr-defined]
+ self,
+ output_split_sizes,
+ input_split_sizes,
+ group_name,
+ )
return _maybe_wrap_tensor(tensor)
@@ -832,7 +745,6 @@ def _maybe_wrap_tensor(self) -> torch.Tensor:
if _are_we_tracing():
return wait_tensor(self)
res = AsyncCollectiveTensor(self)
- _register_tensor_wrapper(res)
return cast(torch.Tensor, res)
diff --git a/torch/distributed/_functional_collectives_impl.py b/torch/distributed/_functional_collectives_impl.py
index d628dff7c2..308b317f52 100644
--- a/torch/distributed/_functional_collectives_impl.py
+++ b/torch/distributed/_functional_collectives_impl.py
@@ -1,5 +1,4 @@
import logging
-import os
import warnings
import weakref
from typing import cast, Dict, List, Optional
@@ -7,7 +6,6 @@ from typing import cast, Dict, List, Optional
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
-from torch._logging import warning_once
"""
Moved eager kernel implementations to a separate file partly for readability and partly as it is currently
@@ -30,33 +28,6 @@ logger = logging.getLogger(__name__)
_use_native_funcol: Optional[bool] = None
-if torch._running_with_deploy():
-
- def native_funcol_enabled():
- return False
-
-else:
- from torch._dynamo import assume_constant_result
-
- @assume_constant_result
- def native_funcol_enabled():
- global _use_native_funcol
- if _use_native_funcol is None:
- # When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol
- _use_native_funcol = os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1"
- if not _use_native_funcol:
- warning_once(
- logger,
- "The legacy backend for functional collective (selected via "
- "setting TORCH_DISABLE_NATIVE_FUNCOL) has been deprecated. "
- "There won't be active support for it and it will be removed "
- "before 2.14. Please switch to the new (a.k.a native) backend "
- "soon.",
- )
-
- return _use_native_funcol
-
-
data_ptr_to_work: Dict[int, "_WaitRegistration"] = dict()
work_version = 0
@@ -124,9 +95,6 @@ def _wait_reg_dec(ptr, wait_reg):
def _register_tensor_wrapper(tensor) -> None:
- if native_funcol_enabled():
- # Tensor storage -> work mapping is maintained in C++
- return
global data_ptr_to_work
# FIXME: This is almost definitely a bug.
diff --git a/torch/testing/_internal/common_distributed.py b/torch/testing/_internal/common_distributed.py
index 1ae4066aad..c9f621a90c 100644
--- a/torch/testing/_internal/common_distributed.py
+++ b/torch/testing/_internal/common_distributed.py
@@ -39,10 +39,6 @@ from torch.testing._internal.common_utils import (
TEST_WITH_TSAN,
TestCase,
)
-from torch.testing._internal.common_utils import (
- parametrize,
- subtest,
-)
from torch.testing._internal.distributed.multi_threaded_pg import (
_install_threaded_pg,
_uninstall_threaded_pg,
@@ -1299,57 +1295,3 @@ class DynamoDistributedMultiProcTestCase(MultiProcessTestCase):
self.rank = rank
self.file_name = file_name
self.run_test(test_name, parent_pipe)
-
-
-# NOTE [test parametrization utils for native funcol migration]
-#
-# Between the time we switch to the native funcol by default and the time when
-# we are confident that we can remove the legacy implementation, we want to
-# ensure that the legacy funcol remains covered by unit tests. This is to
-# prepare for any potential (but unlikely) reverts. The following utilities
-# help achieve this goal.
-#
-# run_with_{native,legacy}_funcol - mark a test to run with only
-# {native,legacy} funcol. These decorators are for impl specific tests (e.g.
-# verifying generated code with FileCheck).
-#
-# run_with_both_funcol_impls - parametrize a test to run with both legacy and
-# native funcol.
-#
-# run_with_both_funcol_impls_with_arg - same as run_with_both_funcol_impls, but
-# passes `enable_native_funcol` to the test so impl specific checks can be
-# carried out.
-def with_native_funcol(use_native_funcol: bool, remove_arg: bool):
- import torch.distributed._functional_collectives_impl as funcol_impl
-
- def decorator(fn):
- def inner(*args, **kwargs):
- if remove_arg:
- del kwargs["use_native_funcol"]
- with patch.object(funcol_impl, '_use_native_funcol', new=use_native_funcol):
- return fn(*args, **kwargs)
-
- return inner
-
- return decorator
-
-
-run_with_native_funcol = with_native_funcol(True, remove_arg=False)
-run_with_legacy_funcol = with_native_funcol(False, remove_arg=False)
-
-
-run_with_both_funcol_impls = parametrize(
- "use_native_funcol",
- [
- subtest(True, decorators=[with_native_funcol(True, remove_arg=True)]),
- subtest(False, decorators=[with_native_funcol(False, remove_arg=True)]),
- ]
-)
-
-run_with_both_funcol_impls_with_arg = parametrize(
- "use_native_funcol",
- [
- subtest(True, decorators=[with_native_funcol(True, remove_arg=False)]),
- subtest(False, decorators=[with_native_funcol(False, remove_arg=False)]),
- ]
-) | 2.41.0 |
1b8363f409c5b2b7a16d0a58e04dc4a8e6c5e8d | Sat, 13 Apr 2024 03:19:10 +0000 | [PATCH 0127/1000] [inductor] Remove unused local variable. (#120227) | Pull Request resolved: https://github.com/pytorch/pytorch/pull/120227 Approved by: https://github.com/Skylion007 | diff --git a/torch/_inductor/comm_analysis.py b/torch/_inductor/comm_analysis.py
index 6ff48e5dc6..de3f631b0e 100644
--- a/torch/_inductor/comm_analysis.py
+++ b/torch/_inductor/comm_analysis.py
@@ -61,7 +61,6 @@ def get_collective_type(node: ir.IRNode) -> NCCL_COLL:
def get_collective_input_size_bytes(node: ir.IRNode) -> int:
sz_bytes = 0
for inp in node.inputs: # type: ignore[attr-defined]
- shape = inp.layout.size
numel = sympy_product(inp.layout.size)
if isinstance(numel, sympy.Integer):
# For ease of testing
@@ -239,7 +238,6 @@ def estimate_nccl_collective_runtime(node: ir.IRNode) -> float:
# =============== latency computation ===============
intraHw = NCCL_HW.NVLINK
- hw = intraHw if nNodes == 1 else NCCL_HW.NET
if coll == NCCL_COLL.ALL_REDUCE:
if nNodes > 1: | 2.41.0 |
216068559b723b795d63723dd32fc0b82243bdf | Sat, 13 Apr 2024 03:31:56 +0000 | [PATCH 0128/1000] Enable UFMT on test/test_ops* (#123935) | Part of https://github.com/pytorch/pytorch/issues/123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123935 Approved by: https://github.com/ezyang | diff --git a/.lintrunner.toml b/.lintrunner.toml
index a0a74c86ad..4073f1bbff 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1476,10 +1476,6 @@ exclude_patterns = [
'test/test_nvfuser_dynamo.py',
'test/test_nvfuser_frontend.py',
'test/test_openmp.py',
- 'test/test_ops.py',
- 'test/test_ops_fwd_gradients.py',
- 'test/test_ops_gradients.py',
- 'test/test_ops_jit.py',
'test/test_optim.py',
'test/test_out_dtype_op.py',
'test/test_overrides.py',
diff --git a/test/test_ops.py b/test/test_ops.py
index 34462961df..3be010f83f 100644
--- a/test/test_ops.py
+++ b/test/test_ops.py
@@ -1,89 +1,86 @@
# Owner(s): ["module: unknown"]
+import contextlib
import copy
-from collections.abc import Sequence
-from functools import partial
-import warnings
-import unittest
import inspect
import itertools
-import torch
-import contextlib
-import re
import os
+import re
+import unittest
+import warnings
from collections import defaultdict
+from collections.abc import Sequence
+from functools import partial
from importlib import import_module
-from torch.utils._pytree import tree_map
from typing import Dict, List
+
+import torch
+
+import torch._prims as prims
+
+import torch.utils._pytree as pytree
+from torch._prims.context import TorchRefsMode
+from torch._prims_common.wrappers import _maybe_remove_out_wrapper
+from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
+from torch._subclasses.fake_utils import outputs_alias_inputs
from torch.testing import make_tensor
+
+from torch.testing._internal import composite_compliance, opinfo
+from torch.testing._internal.common_device_type import (
+ deviceCountAtLeast,
+ instantiate_device_type_tests,
+ onlyCPU,
+ onlyCUDA,
+ onlyNativeDeviceTypes,
+ OpDTypes,
+ ops,
+ skipMeta,
+)
from torch.testing._internal.common_dtype import (
- floating_and_complex_types_and,
all_types_and_complex_and,
+ floating_and_complex_types_and,
integral_types_and,
)
+from torch.testing._internal.common_methods_invocations import (
+ BinaryUfuncInfo,
+ op_db,
+ ops_and_refs,
+ python_ref_db,
+ ReductionOpInfo,
+ ReductionPythonRefInfo,
+ skip,
+ skipOps,
+ SpectralFuncInfo,
+ UnaryUfuncInfo,
+ xfail,
+)
from torch.testing._internal.common_utils import (
- TestCase,
- is_iterable_of_tensors,
- run_tests,
- IS_SANDCASTLE,
clone_input_helper,
+ first_sample,
IS_CI,
+ IS_FBCODE,
+ is_iterable_of_tensors,
+ IS_SANDCASTLE,
+ IS_WINDOWS,
+ noncontiguous_like,
+ parametrize,
+ run_tests,
set_default_dtype,
+ skipIfTorchInductor,
+ slowTest,
suppress_warnings,
- noncontiguous_like,
TEST_WITH_ASAN,
TEST_WITH_ROCM,
TEST_WITH_TORCHDYNAMO,
TEST_WITH_TORCHINDUCTOR,
TEST_WITH_UBSAN,
- IS_WINDOWS,
- IS_FBCODE,
- first_sample,
- parametrize,
- skipIfTorchInductor,
- slowTest,
+ TestCase,
unMarkDynamoStrictTest,
)
-from torch.testing._internal.common_methods_invocations import (
- op_db,
- UnaryUfuncInfo,
- ReductionOpInfo,
- ReductionPythonRefInfo,
- SpectralFuncInfo,
- ops_and_refs,
- python_ref_db,
- BinaryUfuncInfo,
- xfail,
- skip,
- skipOps
-)
-from torch.testing._internal.common_device_type import (
- deviceCountAtLeast,
- instantiate_device_type_tests,
- ops,
- onlyCUDA,
- onlyCPU,
- onlyNativeDeviceTypes,
- OpDTypes,
- skipMeta,
-)
-from torch._subclasses.fake_tensor import (
- FakeTensor,
- FakeTensorMode,
-)
-from torch._subclasses.fake_utils import outputs_alias_inputs
-
-import torch._prims as prims
-from torch._prims.context import TorchRefsMode
-from torch._prims_common.wrappers import _maybe_remove_out_wrapper
-
-from torch.testing._internal import opinfo
-from torch.testing._internal import composite_compliance
-
-import torch.utils._pytree as pytree
from torch.utils._python_dispatch import TorchDispatchMode
+from torch.utils._pytree import tree_map
assert torch.get_default_dtype() == torch.float32
@@ -108,16 +105,21 @@ _ref_test_ops = tuple(
)
)
+
def reduction_dtype_filter(op):
- if (not isinstance(op, ReductionPythonRefInfo) or not op.supports_out
- or torch.int16 not in op.dtypes):
+ if (
+ not isinstance(op, ReductionPythonRefInfo)
+ or not op.supports_out
+ or torch.int16 not in op.dtypes
+ ):
return False
argspec = inspect.getfullargspec(op.op)
- if 'dtype' not in argspec.kwonlyargs:
+ if "dtype" not in argspec.kwonlyargs:
return False
return True
+
# Create a list of operators that are a subset of _ref_test_ops but don't have a
# numpy ref to compare them too, If both CPU and CUDA are compared to numpy
# then they do not need to be compared to each other
@@ -125,6 +127,7 @@ _ops_and_refs_with_no_numpy_ref = [op for op in ops_and_refs if op.ref is None]
aten = torch.ops.aten
+
# Tests that apply to all operators and aren't related to any particular
# system
@unMarkDynamoStrictTest
@@ -171,7 +174,6 @@ class TestCommon(TestCase):
)
def test_pointwise_tag_coverage(self):
-
pytorch_dir = os.path.abspath(__file__ + "/../../")
files = [
"aten/src/ATen/native/UnaryOps.cpp",
@@ -230,7 +232,7 @@ class TestCommon(TestCase):
lines = f.read()
matches = regex.findall(lines)
for match in matches:
- kernel = match[len("DEFINE_DISPATCH("):-len("_stub")]
+ kernel = match[len("DEFINE_DISPATCH(") : -len("_stub")]
# no op definition for it, but defined with DEFINE_DISPATCH ?
if kernel == "trigamma":
@@ -265,10 +267,12 @@ class TestCommon(TestCase):
@ops(_ref_test_ops, allowed_dtypes=(torch.float64, torch.long, torch.complex128))
def test_numpy_ref(self, device, dtype, op):
if (
- TEST_WITH_TORCHINDUCTOR and
- op.formatted_name in ('signal_windows_exponential', 'signal_windows_bartlett') and
- dtype == torch.float64 and 'cuda' in device
- ): # noqa: E121
+ TEST_WITH_TORCHINDUCTOR
+ and op.formatted_name
+ in ("signal_windows_exponential", "signal_windows_bartlett")
+ and dtype == torch.float64
+ and "cuda" in device
+ ): # noqa: E121
raise unittest.SkipTest("XXX: raises tensor-likes are not close.")
# Sets the default dtype to NumPy's default dtype of double
@@ -284,10 +288,9 @@ class TestCommon(TestCase):
@slowTest
@ops(_ops_and_refs_with_no_numpy_ref, dtypes=OpDTypes.any_common_cpu_cuda_one)
def test_compare_cpu(self, device, dtype, op):
-
def to_cpu(arg):
if isinstance(arg, torch.Tensor):
- return arg.to(device='cpu')
+ return arg.to(device="cpu")
return arg
samples = op.reference_inputs(device, dtype)
@@ -336,7 +339,9 @@ class TestCommon(TestCase):
meta_sample = sample.transform(_to_tensormeta)
try:
with mode:
- meta_result = op(meta_sample.input, *meta_sample.args, **meta_sample.kwargs)
+ meta_result = op(
+ meta_sample.input, *meta_sample.args, **meta_sample.kwargs
+ )
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
continue
except torch._subclasses.fake_tensor.DataDependentOutputException:
@@ -346,12 +351,16 @@ class TestCommon(TestCase):
if isinstance(result, torch.Tensor):
self.assertTrue(isinstance(meta_result, FakeTensor))
- prims.utils.compare_tensor_meta(result, meta_result, check_conj=op.op not in CHECK_CONJ_SKIPS)
+ prims.utils.compare_tensor_meta(
+ result, meta_result, check_conj=op.op not in CHECK_CONJ_SKIPS
+ )
elif isinstance(result, Sequence):
for a, b in zip(result, meta_result):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
self.assertTrue(isinstance(b, FakeTensor))
- prims.utils.compare_tensor_meta(a, b, check_conj=op.op not in CHECK_CONJ_SKIPS)
+ prims.utils.compare_tensor_meta(
+ a, b, check_conj=op.op not in CHECK_CONJ_SKIPS
+ )
def _ref_test_helper(
self,
@@ -367,22 +376,27 @@ class TestCommon(TestCase):
# NOTE: this test works by comparing the reference
ex = None
for sample in op.reference_inputs(device, dtype, requires_grad=False):
- if isinstance(sample.input, torch.Tensor) and sample.input.numel() == 0 and skip_zero_numel:
+ if (
+ isinstance(sample.input, torch.Tensor)
+ and sample.input.numel() == 0
+ and skip_zero_numel
+ ):
continue
- if isinstance(sample.input, torch.Tensor) and sample.input.ndim == 0 and skip_zero_dim:
+ if (
+ isinstance(sample.input, torch.Tensor)
+ and sample.input.ndim == 0
+ and skip_zero_dim
+ ):
continue
- if (
- skip_bfloat
- and (
- (
- isinstance(sample.input, torch.Tensor)
- and sample.input.dtype == torch.bfloat16
- )
- or any(
- isinstance(arg, torch.Tensor) and arg.dtype == torch.bfloat16
- for arg in sample.args
- )
+ if skip_bfloat and (
+ (
+ isinstance(sample.input, torch.Tensor)
+ and sample.input.dtype == torch.bfloat16
+ )
+ or any(
+ isinstance(arg, torch.Tensor) and arg.dtype == torch.bfloat16
+ for arg in sample.args
)
):
continue
@@ -390,12 +404,19 @@ class TestCommon(TestCase):
ref_result = op(sample.input, *sample.args, **sample.kwargs)
torch_result = op.torch_opinfo(sample.input, *sample.args, **sample.kwargs)
- for a, b in zip(pytree.tree_leaves(ref_result), pytree.tree_leaves(torch_result)):
+ for a, b in zip(
+ pytree.tree_leaves(ref_result), pytree.tree_leaves(torch_result)
+ ):
if isinstance(a, torch.Tensor) or isinstance(b, torch.Tensor):
prims.utils.compare_tensor_meta(a, b)
- if getattr(op, 'validate_view_consistency', True) and not skip_view_consistency:
- msg = (f"The torch implementation {'returns' if b._is_view() else 'does not return'} "
- f"a view, while the reference {'does' if a._is_view() else 'does not'}")
+ if (
+ getattr(op, "validate_view_consistency", True)
+ and not skip_view_consistency
+ ):
+ msg = (
+ f"The torch implementation {'returns' if b._is_view() else 'does not return'} "
+ f"a view, while the reference {'does' if a._is_view() else 'does not'}"
+ )
self.assertEqual(a._is_view(), b._is_view(), msg)
# Computes the dtype the more precise computatino would occur in
@@ -427,7 +448,6 @@ class TestCommon(TestCase):
ex = e
-
# Goes to next sample if these results are close
if not ex:
continue
@@ -442,7 +462,9 @@ class TestCommon(TestCase):
return x
precise_sample = sample.transform(_make_precise)
- precise_result = op.torch_opinfo(precise_sample.input, *precise_sample.args, **precise_sample.kwargs)
+ precise_result = op.torch_opinfo(
+ precise_sample.input, *precise_sample.args, **precise_sample.kwargs
+ )
def _distance(a, b):
# Special-cases boolean comparisons
@@ -450,24 +472,34 @@ class TestCommon(TestCase):
assert b.dtype is torch.bool
return (a ^ b).sum()
- same = (a == b)
- if prims.utils.is_float_dtype(a.dtype) or prims.utils.is_complex_dtype(a.dtype):
- same = torch.logical_or(same, torch.logical_and(torch.isnan(a), torch.isnan(b)))
+ same = a == b
+ if prims.utils.is_float_dtype(a.dtype) or prims.utils.is_complex_dtype(
+ a.dtype
+ ):
+ same = torch.logical_or(
+ same, torch.logical_and(torch.isnan(a), torch.isnan(b))
+ )
actual_error = torch.where(same, 0, torch.abs(a - b)).sum()
return actual_error
ref_distance = 0
- for a, b in zip(pytree.tree_leaves(ref_result), pytree.tree_leaves(precise_result)):
+ for a, b in zip(
+ pytree.tree_leaves(ref_result), pytree.tree_leaves(precise_result)
+ ):
ref_distance = ref_distance + _distance(a, b)
torch_distance = 0
- for a, b in zip(pytree.tree_leaves(torch_result), pytree.tree_leaves(precise_result)):
+ for a, b in zip(
+ pytree.tree_leaves(torch_result), pytree.tree_leaves(precise_result)
+ ):
torch_distance = torch_distance + _distance(a, b)
# TODO: consider adding some tolerance to this comparison
- msg = f"Reference result was farther ({ref_distance}) from the precise " \
- f"computation than the torch result was ({torch_distance})!"
+ msg = (
+ f"Reference result was farther ({ref_distance}) from the precise "
+ f"computation than the torch result was ({torch_distance})!"
+ )
self.assertTrue(ref_distance <= torch_distance, msg=msg)
# Reports numerical accuracy discrepancies
@@ -485,7 +517,11 @@ class TestCommon(TestCase):
# In this test, primTorch refs call into the refs namespace
# For example, a ref with torch.foo in it will calls refs.foo instead
# Direct calls to refs and prims are not affected
- if TEST_WITH_ROCM and (op.name == "_refs.fft.ihfftn" or op.name == "_refs.fft.ihfft2") and dtype == torch.float16:
+ if (
+ TEST_WITH_ROCM
+ and (op.name == "_refs.fft.ihfftn" or op.name == "_refs.fft.ihfft2")
+ and dtype == torch.float16
+ ):
self.skipTest("Skipped on ROCm")
self._ref_test_helper(lambda: TorchRefsMode(strict=True), device, dtype, op)
@@ -506,10 +542,19 @@ class TestCommon(TestCase):
@unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN")
@onlyCUDA
@ops(python_ref_db)
- @parametrize('executor', ['aten',])
+ @parametrize(
+ "executor",
+ [
+ "aten",
+ ],
+ )
@skipIfTorchInductor("Takes too long for inductor")
def test_python_ref_executor(self, device, dtype, op, executor):
- if TEST_WITH_ROCM and (op.name == "_refs.fft.ihfftn" or op.name == "_refs.fft.ihfft2") and dtype == torch.float16:
+ if (
+ TEST_WITH_ROCM
+ and (op.name == "_refs.fft.ihfftn" or op.name == "_refs.fft.ihfft2")
+ and dtype == torch.float16
+ ):
self.skipTest("Skipped on ROCm")
# skip zero-dim tensors for some composites of reduction operations and view
skip_zero_dim_ops = [
@@ -521,8 +566,10 @@ class TestCommon(TestCase):
"ops.nvprims.view",
]
- from torch._prims.executor import make_traced
from copy import copy
+
+ from torch._prims.executor import make_traced
+
op = copy(op)
op.op = partial(make_traced(op.op), executor=executor)
self._ref_test_helper(
@@ -545,8 +592,20 @@ class TestCommon(TestCase):
@skipMeta
@onlyNativeDeviceTypes
- @ops([op for op in op_db if op.error_inputs_sparse_func is not None], dtypes=OpDTypes.none)
- @parametrize("layout", (torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc, torch.sparse_coo))
+ @ops(
+ [op for op in op_db if op.error_inputs_sparse_func is not None],
+ dtypes=OpDTypes.none,
+ )
+ @parametrize(
+ "layout",
+ (
+ torch.sparse_csr,
+ torch.sparse_csc,
+ torch.sparse_bsr,
+ torch.sparse_bsc,
+ torch.sparse_coo,
+ ),
+ )
def test_errors_sparse(self, device, op, layout):
for ei in op.error_inputs_sparse(device, layout):
si = ei.sample_input
@@ -556,7 +615,10 @@ class TestCommon(TestCase):
@skipMeta
@onlyNativeDeviceTypes
- @ops([op for op in python_ref_db if op.error_inputs_func is not None], dtypes=OpDTypes.none)
+ @ops(
+ [op for op in python_ref_db if op.error_inputs_func is not None],
+ dtypes=OpDTypes.none,
+ )
@skipIfTorchInductor("Takes too long for inductor")
def test_python_ref_errors(self, device, op):
mode = FakeTensorMode()
@@ -900,7 +962,6 @@ class TestCommon(TestCase):
# for non-integer types fills with NaN
return torch.full_like(t, float("nan"))
-
_compare_out(_case_zero_transform)
# Case 1: out= with the correct shape, dtype, and device,
@@ -940,7 +1001,6 @@ class TestCommon(TestCase):
elif torch.cuda.is_available():
wrong_device = "cuda"
-
factory_fn_msg = (
"\n\nNOTE: If your op is a factory function (i.e., it accepts TensorOptions) you should mark its "
"OpInfo with `is_factory_function=True`."
@@ -1000,9 +1060,13 @@ class TestCommon(TestCase):
op_out(out=out)
@ops(
- [op for op in op_db if op.supports_out and (op.supports_autograd or op.is_factory_function)],
+ [
+ op
+ for op in op_db
+ if op.supports_out and (op.supports_autograd or op.is_factory_function)
+ ],
dtypes=OpDTypes.supported,
- allowed_dtypes=[torch.float, torch.cfloat]
+ allowed_dtypes=[torch.float, torch.cfloat],
)
def test_out_requires_grad_error(self, device, dtype, op):
sample = first_sample(self, op.sample_inputs(device, dtype))
@@ -1045,24 +1109,32 @@ class TestCommon(TestCase):
self.assertFalse(expectFail)
except RuntimeError as err:
self.assertEqual(
- str(err), "dtype argument and out dtype must match in reduction")
+ str(err), "dtype argument and out dtype must match in reduction"
+ )
self.assertTrue(expectFail)
return out
+
samples = op.sample_inputs(device, dtype)
for sample in samples:
- if 'dtype' not in sample.kwargs:
+ if "dtype" not in sample.kwargs:
helper(False, False, op, sample.input, *sample.args, **sample.kwargs)
helper(True, False, op, sample.input, *sample.args, **sample.kwargs)
- sample.kwargs['dtype'] = torch.int16
+ sample.kwargs["dtype"] = torch.int16
helper(False, False, op, sample.input, *sample.args, **sample.kwargs)
helper(True, True, op, sample.input, *sample.args, **sample.kwargs)
- sample.kwargs['dtype'] = torch.int32
+ sample.kwargs["dtype"] = torch.int32
helper(False, False, op, sample.input, *sample.args, **sample.kwargs)
helper(True, False, op, sample.input, *sample.args, **sample.kwargs)
else:
helper(False, False, op, sample.input, *sample.args, **sample.kwargs)
- helper(True, sample.kwargs['dtype'] != torch.int32, op, sample.input,
- *sample.args, **sample.kwargs)
+ helper(
+ True,
+ sample.kwargs["dtype"] != torch.int32,
+ op,
+ sample.input,
+ *sample.args,
+ **sample.kwargs,
+ )
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (method, inplace)
@@ -1076,7 +1148,6 @@ class TestCommon(TestCase):
operator = op.operator_variant
inplace_operator = op.inplace_operator_variant
-
# list of all inplace ops: inplace variant + alias inplace variants if exist
inplace_ops = [inplace, inplace_operator]
variants = [method, inplace, operator, inplace_operator]
@@ -1257,8 +1328,11 @@ class TestCommon(TestCase):
actual = op(sample.input, *sample.args, **sample.kwargs)
# sample.transform applies the lambda to torch.Tensor and torch.dtype.
# However, we only want to apply it to Tensors with dtype `torch.complex32`..
- transformed_sample = sample.transform(lambda x: x.to(torch.complex64) if isinstance(
- x, torch.Tensor) and x.dtype is torch.complex32 else x)
+ transformed_sample = sample.transform(
+ lambda x: x.to(torch.complex64)
+ if isinstance(x, torch.Tensor) and x.dtype is torch.complex32
+ else x
+ )
expected = op(
transformed_sample.input,
*transformed_sample.args,
@@ -1267,8 +1341,12 @@ class TestCommon(TestCase):
# Since range of chalf is much less compared to cfloat,
# we get `inf`s easily (eg. with `pow`, `exp`),
# so we cast `cfloat` back to `chalf`.
- expected = tree_map(lambda x: x.to(torch.complex32) if isinstance(
- x, torch.Tensor) and x.dtype is torch.complex64 else x, expected)
+ expected = tree_map(
+ lambda x: x.to(torch.complex32)
+ if isinstance(x, torch.Tensor) and x.dtype is torch.complex64
+ else x,
+ expected,
+ )
# `exact_dtype` is False because for ops like real, imag
# we get different dtypes for `actual` and `expected`
@@ -1276,7 +1354,6 @@ class TestCommon(TestCase):
# `cfloat` input -> `float` output
self.assertEqual(actual, expected, exact_dtype=False)
-
@ops(op_db, allowed_dtypes=(torch.bool,))
@unittest.skipIf(TEST_WITH_UBSAN, "Test uses undefined behavior")
def test_non_standard_bool_values(self, device, dtype, op):
@@ -1286,7 +1363,9 @@ class TestCommon(TestCase):
return x
# Map False -> 0 and True -> Random value in [2, 255]
- true_vals = torch.randint(2, 255, x.shape, dtype=torch.uint8, device=x.device)
+ true_vals = torch.randint(
+ 2, 255, x.shape, dtype=torch.uint8, device=x.device
+ )
false_vals = torch.zeros((), dtype=torch.uint8, device=x.device)
x_int = torch.where(x, true_vals, false_vals)
@@ -1377,8 +1456,11 @@ class TestCommon(TestCase):
return False
- requires_grad = _tensor_requires_grad(sample.input) \
- or _tensor_requires_grad(sample.args) or _tensor_requires_grad(sample.kwargs)
+ requires_grad = (
+ _tensor_requires_grad(sample.input)
+ or _tensor_requires_grad(sample.args)
+ or _tensor_requires_grad(sample.kwargs)
+ )
if not requires_grad:
continue
@@ -1494,7 +1576,9 @@ class TestCommon(TestCase):
)
)
- all_claimed_but_unsupported = set.union(claimed_but_unsupported_backward, claimed_but_unsupported_forward)
+ all_claimed_but_unsupported = set.union(
+ claimed_but_unsupported_backward, claimed_but_unsupported_forward
+ )
if all_claimed_but_unsupported:
msg += "Unexpected failures raised the following errors:\n"
for dtype in all_claimed_but_unsupported:
@@ -1505,7 +1589,10 @@ class TestCommon(TestCase):
# Validates that each OpInfo that sets promotes_int_to_float=True does as it says
@skipMeta
@onlyNativeDeviceTypes
- @ops((op for op in op_db if op.promotes_int_to_float), allowed_dtypes=integral_types_and(torch.bool))
+ @ops(
+ (op for op in op_db if op.promotes_int_to_float),
+ allowed_dtypes=integral_types_and(torch.bool),
+ )
def test_promotes_int_to_float(self, device, dtype, op):
for sample in op.sample_inputs(device, dtype):
output = op(sample.input, *sample.args, **sample.kwargs)
@@ -1531,7 +1618,9 @@ class TestCompositeCompliance(TestCase):
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
composite_compliance.check_with_mode(op, args, kwargs, self.assertEqual)
- composite_compliance.check_all_permutations(op, args, kwargs, self.assertEqual)
+ composite_compliance.check_all_permutations(
+ op, args, kwargs, self.assertEqual
+ )
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
@@ -1546,9 +1635,13 @@ class TestCompositeCompliance(TestCase):
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_backward_formula(
- op.get_op(), args, kwargs,
+ op.get_op(),
+ args,
+ kwargs,
sample.output_process_fn_grad,
- op.gradcheck_wrapper, self.assertEqual)
+ op.gradcheck_wrapper,
+ self.assertEqual,
+ )
@unittest.skipIf(
IS_FBCODE or IS_SANDCASTLE, "__torch_dispatch__ does not work in fbcode"
@@ -1569,7 +1662,8 @@ class TestCompositeCompliance(TestCase):
# We pass assertEqual so that decorators like `toleranceOverride`
# actually work (otherwise they silently do nothing!)
composite_compliance.check_forward_ad_formula(
- op.get_op(), args, kwargs, op.gradcheck_wrapper, self.assertEqual)
+ op.get_op(), args, kwargs, op.gradcheck_wrapper, self.assertEqual
+ )
@ops(op_db, allowed_dtypes=(torch.float,))
def test_cow_input(self, device, dtype, op):
@@ -1582,22 +1676,25 @@ class TestCompositeCompliance(TestCase):
return (allow_list is not None) and (idx_or_kw in allow_list)
def check_cow_input(
- arg,
- arg_copy,
- idx_or_kw,
- backward_or_forward='forward',
- supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_forward,
- allow_list=op.allow_cow_input_materialize_forward):
-
+ arg,
+ arg_copy,
+ idx_or_kw,
+ backward_or_forward="forward",
+ supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_forward,
+ allow_list=op.allow_cow_input_materialize_forward,
+ ):
arg_name = (
- (f"Argument {idx_or_kw}" if isinstance(idx_or_kw, int) else f"Keyword argument '{idx_or_kw}'")
- + f" during {backward_or_forward} call"
- )
+ f"Argument {idx_or_kw}"
+ if isinstance(idx_or_kw, int)
+ else f"Keyword argument '{idx_or_kw}'"
+ ) + f" during {backward_or_forward} call"
if is_strided_tensor(arg):
is_cow = torch._C._is_cow_tensor(arg)
- if supports_cow_input_no_materialize and not check_ignore_materialize(idx_or_kw, allow_list):
+ if supports_cow_input_no_materialize and not check_ignore_materialize(
+ idx_or_kw, allow_list
+ ):
self.assertTrue(
is_cow,
msg=(
@@ -1605,7 +1702,9 @@ class TestCompositeCompliance(TestCase):
f"Either set `supports_cow_input_no_materialize_{backward_or_forward}=False` "
"in this operation's OpInfo, add the arg to the OpInfo's "
f"`allow_cow_input_materialize_{backward_or_forward}` list, or change the "
- "implementation to avoid materialization."))
+ "implementation to avoid materialization."
+ ),
+ )
if is_cow:
self.assertTrue(
@@ -1613,7 +1712,8 @@ class TestCompositeCompliance(TestCase):
msg=(
f"{arg_name} avoided materialization, "
"but the operation mutated its data."
- ))
+ ),
+ )
for sample in samples:
args_raw = [sample.input] + list(sample.args)
@@ -1661,19 +1761,31 @@ class TestCompositeCompliance(TestCase):
# Call backward op if it is supported. This part of the test is
# based on `composite_compliance.check_backward_formula`
- if op.supports_autograd and len(leaf_tensors) > 0 and not op.skip_cow_input_backward:
+ if (
+ op.supports_autograd
+ and len(leaf_tensors) > 0
+ and not op.skip_cow_input_backward
+ ):
if sample.output_process_fn_grad is not None:
results_raw = sample.output_process_fn_grad(results_raw)
leaf_results = pytree.tree_leaves(results_raw)
- results = [r for r in leaf_results if isinstance(r, torch.Tensor) and r.requires_grad]
+ results = [
+ r
+ for r in leaf_results
+ if isinstance(r, torch.Tensor) and r.requires_grad
+ ]
- all_results_strided = all(is_strided_tensor(result) for result in results)
+ all_results_strided = all(
+ is_strided_tensor(result) for result in results
+ )
# Only test backward if the results are strided tensors
if all_results_strided:
output_grads_raw = [
- torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in results]
+ torch.ones(r.shape, device=r.device, dtype=r.dtype)
+ for r in results
+ ]
output_grads_copy = []
output_grads = []
@@ -1687,7 +1799,8 @@ class TestCompositeCompliance(TestCase):
leaf_tensors,
output_grads,
allow_unused=True,
- retain_graph=True)
+ retain_graph=True,
+ )
# Check that COW inputs remain COW after the backward op is executed
for idx, arg in enumerate(args):
@@ -1695,20 +1808,21 @@ class TestCompositeCompliance(TestCase):
arg,
args_copy[idx],
idx,
- backward_or_forward='backward',
+ backward_or_forward="backward",
supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
- allow_list=op.allow_cow_input_materialize_backward)
+ allow_list=op.allow_cow_input_materialize_backward,
+ )
# Check that COW inputs remain COW after the backward op is executed
for idx, output_grad in enumerate(output_grads):
check_cow_input(
output_grad,
output_grads_copy[idx],
- f'output grad {idx}',
- backward_or_forward='backward',
+ f"output grad {idx}",
+ backward_or_forward="backward",
supports_cow_input_no_materialize=op.supports_cow_input_no_materialize_backward,
- allow_list=op.allow_cow_input_materialize_backward)
-
+ allow_list=op.allow_cow_input_materialize_backward,
+ )
@ops(op_db, allowed_dtypes=(torch.float,))
def test_view_replay(self, device, dtype, op):
@@ -1731,9 +1845,9 @@ class TestCompositeCompliance(TestCase):
# forward and reverse views via a functioning view_func() / rev_view_func().
for out in outs:
if not (
- isinstance(out, torch.Tensor) and
- out._is_view() and
- out._base is inp
+ isinstance(out, torch.Tensor)
+ and out._is_view()
+ and out._base is inp
):
continue
@@ -1940,6 +2054,7 @@ class TestMathBits(TestCase):
torch.is_complex,
)
+
# input strides and size may have been altered due to the result of an inplace op
def check_inplace_view(func, input, rs, input_size, input_strides):
if func is None:
@@ -1951,7 +2066,7 @@ def check_inplace_view(func, input, rs, input_size, input_strides):
unequal_strides = rs.stride() != input_strides
# resize_ should probably have inplace_view tag. Not adding the tag since it
# breaks some codegen logic
- if (unequal_size or unequal_strides):
+ if unequal_size or unequal_strides:
if isinstance(func, torch._ops.OpOverloadPacket):
func = func.default
# Reference: https://github.com/pytorch/pytorch/issues/78759
@@ -1959,6 +2074,7 @@ def check_inplace_view(func, input, rs, input_size, input_strides):
# TODO: use self.assertIn when we have separate tests for each tag
assert torch.Tag.inplace_view in func.tags
+
# A mode that when enabled runs correctness checks to ensure
# that operators have expected tags based on their input and
# output tensor properties
@@ -1973,6 +2089,7 @@ class TestTagsMode(TorchDispatchMode):
rs = func(*args, **kwargs)
return rs
+
# Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags`
@unMarkDynamoStrictTest
class TestTags(TestCase):
@@ -1993,6 +2110,7 @@ class TestTags(TestCase):
opoverloadpacket = getattr(torch.ops.aten, aten_name, None)
check_inplace_view(opoverloadpacket, input, rs, old_size, old_stride)
+
class TestSelfKwarg(TestCase):
def test_self_kwargs(self):
"""Verify that we can call the aten ops with all kwargs even if the
@@ -2001,160 +2119,171 @@ class TestSelfKwarg(TestCase):
torch.ops.aten.reshape.default(self=torch.rand(1, 2), shape=[2])
torch.ops.aten.min.default(self=torch.rand(100))
+
@unMarkDynamoStrictTest
class TestRefsOpsInfo(TestCase):
-
- import_paths = ["_refs", "_refs.special", "_refs.nn.functional", "_refs.fft", "_refs._conversions"]
- module_alls = [(path, import_module(f"torch.{path}").__all__) for path in import_paths]
- ref_ops_names = tuple(itertools.chain.from_iterable(
- [f"{path}.{op}" for op in module_all] for path, module_all in module_alls))
+ import_paths = [
+ "_refs",
+ "_refs.special",
+ "_refs.nn.functional",
+ "_refs.fft",
+ "_refs._conversions",
+ ]
+ module_alls = [
+ (path, import_module(f"torch.{path}").__all__) for path in import_paths
+ ]
+ ref_ops_names = tuple(
+ itertools.chain.from_iterable(
+ [f"{path}.{op}" for op in module_all] for path, module_all in module_alls
+ )
+ )
ref_db_names = {ref_op.name for ref_op in python_ref_db}
# TODO: References that do not have an entry in python_ref_db
skip_ref_ops = {
- '_refs.alias',
- '_refs.bitwise_right_shift',
- '_refs.copy_to',
- '_refs.empty_permuted',
- '_refs.empty_strided',
- '_refs.equal',
- '_refs.full',
- '_refs.full_like',
- '_refs.is_complex',
- '_refs.to',
- '_refs.mvlgamma',
- '_refs.ones',
- '_refs.ones_like',
- '_refs.special.expit',
- '_refs.std_var',
- '_refs.swap_axes',
- '_refs.uniform',
- '_refs.scalar_tensor',
- '_refs.trunc_divide',
- '_refs.zero',
- '_refs.zeros',
- '_refs.zeros_like',
- '_refs.rfloordiv',
- '_refs.rtruediv',
- '_refs.rpow',
+ "_refs.alias",
+ "_refs.bitwise_right_shift",
+ "_refs.copy_to",
+ "_refs.empty_permuted",
+ "_refs.empty_strided",
+ "_refs.equal",
+ "_refs.full",
+ "_refs.full_like",
+ "_refs.is_complex",
+ "_refs.to",
+ "_refs.mvlgamma",
+ "_refs.ones",
+ "_refs.ones_like",
+ "_refs.special.expit",
+ "_refs.std_var",
+ "_refs.swap_axes",
+ "_refs.uniform",
+ "_refs.scalar_tensor",
+ "_refs.trunc_divide",
+ "_refs.zero",
+ "_refs.zeros",
+ "_refs.zeros_like",
+ "_refs.rfloordiv",
+ "_refs.rtruediv",
+ "_refs.rpow",
# These should be tested with their out-of-place counterparts
- '_refs.index_add_',
- '_refs.index_copy_',
- '_refs.index_fill_',
- '_refs.native_group_norm',
+ "_refs.index_add_",
+ "_refs.index_copy_",
+ "_refs.index_fill_",
+ "_refs.native_group_norm",
}
not_in_decomp_table = {
# duplicated in _decomp and _refs
- '_refs.nn.functional.group_norm',
- '_refs.nn.functional.mse_loss',
- '_refs.floor_divide',
+ "_refs.nn.functional.group_norm",
+ "_refs.nn.functional.mse_loss",
+ "_refs.floor_divide",
# duplicated as refs do not have decent support for advanced indexing
- '_refs.index_copy',
- '_refs.index_copy_',
- '_refs.index_add',
- '_refs.index_add_',
+ "_refs.index_copy",
+ "_refs.index_copy_",
+ "_refs.index_add",
+ "_refs.index_add_",
# these are not aten ops?
- '_refs._conversions.bfloat16',
- '_refs._conversions.bool',
- '_refs._conversions.byte',
- '_refs._conversions.char',
- '_refs._conversions.double',
- '_refs._conversions.float',
- '_refs._conversions.half',
- '_refs._conversions.int',
- '_refs._conversions.long',
- '_refs._conversions.short',
- '_refs._conversions.chalf',
- '_refs._conversions.cfloat',
- '_refs._conversions.cdouble',
- '_refs.broadcast_shapes',
- '_refs.broadcast_tensors',
- '_refs.mvlgamma',
- '_refs.nn.functional.layer_norm',
- '_refs.nn.functional.tanhshrink',
- '_refs.nn.functional.triplet_margin_loss',
- '_refs.rfloordiv',
- '_refs.rtruediv',
- '_refs.rpow',
+ "_refs._conversions.bfloat16",
+ "_refs._conversions.bool",
+ "_refs._conversions.byte",
+ "_refs._conversions.char",
+ "_refs._conversions.double",
+ "_refs._conversions.float",
+ "_refs._conversions.half",
+ "_refs._conversions.int",
+ "_refs._conversions.long",
+ "_refs._conversions.short",
+ "_refs._conversions.chalf",
+ "_refs._conversions.cfloat",
+ "_refs._conversions.cdouble",
+ "_refs.broadcast_shapes",
+ "_refs.broadcast_tensors",
+ "_refs.mvlgamma",
+ "_refs.nn.functional.layer_norm",
+ "_refs.nn.functional.tanhshrink",
+ "_refs.nn.functional.triplet_margin_loss",
+ "_refs.rfloordiv",
+ "_refs.rtruediv",
+ "_refs.rpow",
# CompositeImplicitAutograd
- '_refs.allclose',
- '_refs.atleast_1d',
- '_refs.atleast_2d',
- '_refs.atleast_3d',
- '_refs.broadcast_to',
- '_refs.chunk',
- '_refs.column_stack',
- '_refs.contiguous',
- '_refs.dsplit',
- '_refs.dstack',
- '_refs.fill',
- '_refs.fill_',
- '_refs.flatten',
- '_refs.fliplr',
- '_refs.flipud',
- '_refs.float_power',
- '_refs.hsplit',
- '_refs.hstack',
- '_refs.isclose',
- '_refs.isfinite',
- '_refs.isreal',
- '_refs.istft',
- '_refs.log_softmax',
- '_refs.movedim',
- '_refs.narrow',
- '_refs.nn.functional.dropout',
- '_refs.nn.functional.l1_loss',
- '_refs.nn.functional.smooth_l1_loss',
- '_refs.nn.functional.log_softmax',
- '_refs.nn.functional.poisson_nll_loss',
- '_refs.nn.functional.softmax',
- '_refs.nn.functional.softmin',
- '_refs.positive',
- '_refs.ravel',
- '_refs.reshape',
- '_refs.softmax',
- '_refs.special.expit',
- '_refs.special.log_softmax',
- '_refs.special.softmax',
- '_refs.square',
- '_refs.stft',
- '_refs.T',
- '_refs.take_along_dim',
- '_refs.tensor_split',
- '_refs.to',
- '_refs.true_divide',
- '_refs.trunc_divide',
- '_refs.vsplit',
- '_refs.vstack',
- '_refs.linalg.matrix_norm',
- '_refs.linalg.norm',
- '_refs.linalg.svd',
- '_refs.linalg.svdvals',
- '_refs.unflatten',
- '_refs.sum_to_size',
+ "_refs.allclose",
+ "_refs.atleast_1d",
+ "_refs.atleast_2d",
+ "_refs.atleast_3d",
+ "_refs.broadcast_to",
+ "_refs.chunk",
+ "_refs.column_stack",
+ "_refs.contiguous",
+ "_refs.dsplit",
+ "_refs.dstack",
+ "_refs.fill",
+ "_refs.fill_",
+ "_refs.flatten",
+ "_refs.fliplr",
+ "_refs.flipud",
+ "_refs.float_power",
+ "_refs.hsplit",
+ "_refs.hstack",
+ "_refs.isclose",
+ "_refs.isfinite",
+ "_refs.isreal",
+ "_refs.istft",
+ "_refs.log_softmax",
+ "_refs.movedim",
+ "_refs.narrow",
+ "_refs.nn.functional.dropout",
+ "_refs.nn.functional.l1_loss",
+ "_refs.nn.functional.smooth_l1_loss",
+ "_refs.nn.functional.log_softmax",
+ "_refs.nn.functional.poisson_nll_loss",
+ "_refs.nn.functional.softmax",
+ "_refs.nn.functional.softmin",
+ "_refs.positive",
+ "_refs.ravel",
+ "_refs.reshape",
+ "_refs.softmax",
+ "_refs.special.expit",
+ "_refs.special.log_softmax",
+ "_refs.special.softmax",
+ "_refs.square",
+ "_refs.stft",
+ "_refs.T",
+ "_refs.take_along_dim",
+ "_refs.tensor_split",
+ "_refs.to",
+ "_refs.true_divide",
+ "_refs.trunc_divide",
+ "_refs.vsplit",
+ "_refs.vstack",
+ "_refs.linalg.matrix_norm",
+ "_refs.linalg.norm",
+ "_refs.linalg.svd",
+ "_refs.linalg.svdvals",
+ "_refs.unflatten",
+ "_refs.sum_to_size",
# ref implementation missing kwargs
- '_refs.full_like', # missing "layout"
- '_refs.scalar_tensor', # missing "layout"
+ "_refs.full_like", # missing "layout"
+ "_refs.scalar_tensor", # missing "layout"
# other
- '_refs.block_diag', # only refs._block_diag_iterable is in decomposition table
- '_refs.empty', # intentional; direct empty is faster and has less guards
- '_refs.empty_permuted', # intentional; direct empty is faster and has less guards
- '_refs.expand_as',
- '_refs.as_strided', # _prims._as_strided_meta: "reduce() of empty sequence with no initial value"
- '_refs.copy_to', # torch._C._jit_get_operation: No such operator aten::copy_to
- '_refs.equal', # 'bool' object has no attribute 'dtype'
- '_refs.conj', # Calls _prims.conj
- '_refs.real',
- '_refs.imag',
- '_refs.reshape_as',
- '_refs.view_as',
- '_refs.view_as_complex', # TorchInductor does not support complex at the moment.
+ "_refs.block_diag", # only refs._block_diag_iterable is in decomposition table
+ "_refs.empty", # intentional; direct empty is faster and has less guards
+ "_refs.empty_permuted", # intentional; direct empty is faster and has less guards
+ "_refs.expand_as",
+ "_refs.as_strided", # _prims._as_strided_meta: "reduce() of empty sequence with no initial value"
+ "_refs.copy_to", # torch._C._jit_get_operation: No such operator aten::copy_to
+ "_refs.equal", # 'bool' object has no attribute 'dtype'
+ "_refs.conj", # Calls _prims.conj
+ "_refs.real",
+ "_refs.imag",
+ "_refs.reshape_as",
+ "_refs.view_as",
+ "_refs.view_as_complex", # TorchInductor does not support complex at the moment.
# the decompositions for these ops are slightly different
# because of out handling
- '_refs.var_mean',
- '_refs.std_mean',
- '_refs.native_layer_norm',
+ "_refs.var_mean",
+ "_refs.std_mean",
+ "_refs.native_layer_norm",
}
@parametrize("op", ref_ops_names)
@@ -2163,7 +2292,11 @@ class TestRefsOpsInfo(TestCase):
if op in self.skip_ref_ops:
raise unittest.SkipTest(f"{op} does not have an entry in python_ref_db")
elif inplace:
- self.assertNotIn(op, self.ref_db_names, msg=f"{op} is an in-place operation and should not have an OpInfo")
+ self.assertNotIn(
+ op,
+ self.ref_db_names,
+ msg=f"{op} is an in-place operation and should not have an OpInfo",
+ )
else:
# Intentionally don't use assertIn to avoid printing the
# (very large) container
@@ -2171,17 +2304,23 @@ class TestRefsOpsInfo(TestCase):
@parametrize("op", ref_ops_names)
def test_refs_are_in_decomp_table(self, op):
- path = op.split('.')
- module_path = '.'.join(path[:-1])
+ path = op.split(".")
+ module_path = ".".join(path[:-1])
op_name = path[-1]
op_impl = getattr(import_module(f"torch.{module_path}"), op_name)
if op in self.not_in_decomp_table:
- self.assertNotIn(op_impl, torch._decomp.decomposition_table.values(),
- f"Unexpectedly found {op} in torch._decomp.decomposition_table.values()")
+ self.assertNotIn(
+ op_impl,
+ torch._decomp.decomposition_table.values(),
+ f"Unexpectedly found {op} in torch._decomp.decomposition_table.values()",
+ )
else:
- self.assertIn(op_impl, torch._decomp.decomposition_table.values(),
- f"Did not find {op} in torch._decomp.decomposition_table.values()")
+ self.assertIn(
+ op_impl,
+ torch._decomp.decomposition_table.values(),
+ f"Did not find {op} in torch._decomp.decomposition_table.values()",
+ )
fake_skips = (
@@ -2248,9 +2387,7 @@ data_dependent_op_tests = (
"allclose",
)
-aliasing_failures = (
- "histogramdd",
-)
+aliasing_failures = ("histogramdd",)
fake_backward_skips = {
"linalg.cond",
@@ -2267,7 +2404,7 @@ fake_backward_skips = {
fake_backward_xfails = {skip(s) for s in fake_backward_skips} | {
xfail("fft.ihfftn"), # Mismatch in aten._conj_physical.default
xfail("fft.ihfft2"), # Mismatch in aten._conj_physical.default
- skip('nn.functional.ctc_loss'),
+ skip("nn.functional.ctc_loss"),
}
fake_autocast_backward_xfails = {
@@ -2276,9 +2413,10 @@ fake_autocast_backward_xfails = {
skip("linalg.pinv"),
skip("linalg.pinv", "hermitian"),
skip("linalg.pinv", "singular"),
- skip('pinverse'),
+ skip("pinverse"),
}
+
@unMarkDynamoStrictTest
class TestFakeTensor(TestCase):
def setUp(self):
@@ -2327,7 +2465,6 @@ class TestFakeTensor(TestCase):
with mode:
res_fake = op(input, *args, **kwargs)
-
for fake_out, real_out in zip(
pytree.tree_leaves(res_fake), pytree.tree_leaves(res)
):
@@ -2345,18 +2482,28 @@ class TestFakeTensor(TestCase):
prims.utils.compare_tensor_meta(fake_out, real_out, True)
if name not in aliasing_failures:
- fake_aliasing = outputs_alias_inputs((input, args, kwargs), res_fake)
- real_aliasing = outputs_alias_inputs((sample.input, sample, args, sample.kwargs), res)
+ fake_aliasing = outputs_alias_inputs(
+ (input, args, kwargs), res_fake
+ )
+ real_aliasing = outputs_alias_inputs(
+ (sample.input, sample, args, sample.kwargs), res
+ )
self.assertEqual(fake_aliasing, real_aliasing)
- self.assertTrue(name not in dynamic_output_op_tests and name not in data_dependent_op_tests)
+ self.assertTrue(
+ name not in dynamic_output_op_tests
+ and name not in data_dependent_op_tests
+ )
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
- self.assertTrue(name in dynamic_output_op_tests or name in sometimes_dynamic_output_op_test)
+ self.assertTrue(
+ name in dynamic_output_op_tests
+ or name in sometimes_dynamic_output_op_test
+ )
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
@@ -2421,7 +2568,9 @@ class TestFakeTensor(TestCase):
def test_fake_autocast(self, device, dtype, op):
if op.name in fake_autocast_device_skips[device]:
self.skipTest("Skip failing test")
- context = torch.cuda.amp.autocast if device == "cuda" else torch.cpu.amp.autocast
+ context = (
+ torch.cuda.amp.autocast if device == "cuda" else torch.cpu.amp.autocast
+ )
self._test_fake_helper(device, dtype, op, context)
def _test_fake_crossref_helper(self, device, dtype, op, context):
@@ -2441,24 +2590,37 @@ class TestFakeTensor(TestCase):
# TODO: enable check_aliasing, batch norm fails
try:
- with torch._subclasses.CrossRefFakeMode(ignore_op_fn=lambda fn: fn in common_skip_ops, check_aliasing=True):
- with warnings.catch_warnings(), context(), torch.autograd.set_multithreading_enabled(False):
+ with torch._subclasses.CrossRefFakeMode(
+ ignore_op_fn=lambda fn: fn in common_skip_ops, check_aliasing=True
+ ):
+ with warnings.catch_warnings(), context(), torch.autograd.set_multithreading_enabled(
+ False
+ ):
composite_compliance.compute_expected_grads(
- op.get_op(), args, kwargs,
+ op.get_op(),
+ args,
+ kwargs,
sample.output_process_fn_grad,
- op.gradcheck_wrapper)
+ op.gradcheck_wrapper,
+ )
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
- @skipOps('TestFakeTensor', 'test_fake_crossref_backward_no_amp', fake_backward_xfails)
+ @skipOps(
+ "TestFakeTensor", "test_fake_crossref_backward_no_amp", fake_backward_xfails
+ )
def test_fake_crossref_backward_no_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, contextlib.nullcontext)
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
- @skipOps('TestFakeTensor', 'test_fake_crossref_backward_amp', fake_backward_xfails | fake_autocast_backward_xfails)
+ @skipOps(
+ "TestFakeTensor",
+ "test_fake_crossref_backward_amp",
+ fake_backward_xfails | fake_autocast_backward_xfails,
+ )
def test_fake_crossref_backward_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, torch.cuda.amp.autocast)
@@ -2467,7 +2629,7 @@ class TestFakeTensor(TestCase):
samples = op.sample_inputs(device, dtype)
for sample in samples:
kwargs = sample.kwargs.copy()
- kwargs['layout'] = torch.strided
+ kwargs["layout"] = torch.strided
strided_result = op(sample.input, *sample.args, **kwargs)
self.assertEqual(strided_result.layout, torch.strided)
diff --git a/test/test_ops_fwd_gradients.py b/test/test_ops_fwd_gradients.py
index 30748aa001..e54db321db 100644
--- a/test/test_ops_fwd_gradients.py
+++ b/test/test_ops_fwd_gradients.py
@@ -1,16 +1,25 @@
# Owner(s): ["module: unknown"]
-from functools import partial
import platform
+from functools import partial
from unittest import skipIf as skipif
+
import torch
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ OpDTypes,
+ ops,
+)
+from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_utils import unMarkDynamoStrictTest
from torch.testing._internal.common_utils import (
- TestGradients, run_tests, skipIfTorchInductor, IS_MACOS, TestCase)
-from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_device_type import \
- (instantiate_device_type_tests, ops, OpDTypes)
+ IS_MACOS,
+ run_tests,
+ skipIfTorchInductor,
+ TestCase,
+ TestGradients,
+ unMarkDynamoStrictTest,
+)
# TODO: mitigate flaky issue on macOS https://github.com/pytorch/pytorch/issues/66033
# AFAIK, c10::ThreadPool looks correct in the way it uses condition_variable wait. The
@@ -19,8 +28,10 @@ if IS_MACOS:
torch.set_num_threads(1)
# gradcheck requires double precision
-_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
- allowed_dtypes=[torch.double, torch.cdouble])
+_gradcheck_ops = partial(
+ ops, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble]
+)
+
@unMarkDynamoStrictTest
class TestFwdGradients(TestGradients):
@@ -33,31 +44,46 @@ class TestFwdGradients(TestGradients):
self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
else:
err_msg = r"Trying to use forward AD with .* that does not support it"
- hint_msg = ("Running forward-over-backward gradgrad for an OP that has does not support it did not "
- "raise any error. If your op supports forward AD, you should set supports_fwgrad_bwgrad=True.")
+ hint_msg = (
+ "Running forward-over-backward gradgrad for an OP that has does not support it did not "
+ "raise any error. If your op supports forward AD, you should set supports_fwgrad_bwgrad=True."
+ )
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
self._check_helper(device, dtype, op, op.get_op(), "fwgrad_bwgrad")
-
def _forward_grad_helper(self, device, dtype, op, variant, is_inplace):
# TODO: clean up how attributes are passed to gradcheck from OpInfos
def call_grad_test_helper():
- check_batched_forward_grad = ((op.check_batched_forward_grad and not is_inplace) or
- (op.check_inplace_batched_forward_grad and is_inplace))
- self._grad_test_helper(device, dtype, op, variant, check_forward_ad=True, check_backward_ad=False,
- check_batched_grad=False, check_batched_forward_grad=check_batched_forward_grad)
+ check_batched_forward_grad = (
+ op.check_batched_forward_grad and not is_inplace
+ ) or (op.check_inplace_batched_forward_grad and is_inplace)
+ self._grad_test_helper(
+ device,
+ dtype,
+ op,
+ variant,
+ check_forward_ad=True,
+ check_backward_ad=False,
+ check_batched_grad=False,
+ check_batched_forward_grad=check_batched_forward_grad,
+ )
+
if op.supports_forward_ad:
call_grad_test_helper()
else:
err_msg = r"Trying to use forward AD with .* that does not support it"
- hint_msg = ("Running forward AD for an OP that has does not support it did not "
- "raise any error. If your op supports forward AD, you should set supports_forward_ad=True")
+ hint_msg = (
+ "Running forward AD for an OP that has does not support it did not "
+ "raise any error. If your op supports forward AD, you should set supports_forward_ad=True"
+ )
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
call_grad_test_helper()
@_gradcheck_ops(op_db)
- @skipif(platform.machine() == "s390x",
- reason="Different precision of openblas functions: https://github.com/OpenMathLib/OpenBLAS/issues/4194")
+ @skipif(
+ platform.machine() == "s390x",
+ reason="Different precision of openblas functions: https://github.com/OpenMathLib/OpenBLAS/issues/4194",
+ )
def test_forward_mode_AD(self, device, dtype, op):
self._skip_helper(op, device, dtype)
@@ -71,10 +97,13 @@ class TestFwdGradients(TestGradients):
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
- self._forward_grad_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()), is_inplace=True)
+ self._forward_grad_helper(
+ device, dtype, op, self._get_safe_inplace(op.get_inplace()), is_inplace=True
+ )
+
instantiate_device_type_tests(TestFwdGradients, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
diff --git a/test/test_ops_gradients.py b/test/test_ops_gradients.py
index a867140efe..a78112ec0d 100644
--- a/test/test_ops_gradients.py
+++ b/test/test_ops_gradients.py
@@ -1,19 +1,29 @@
# Owner(s): ["module: unknown"]
from functools import partial
-import torch
-from torch.testing._internal.common_utils import TestGradients, run_tests, TestCase
+import torch
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ OpDTypes,
+ ops,
+)
from torch.testing._internal.common_methods_invocations import op_db
+
+from torch.testing._internal.common_utils import (
+ run_tests,
+ TestCase,
+ TestGradients,
+ unMarkDynamoStrictTest,
+)
from torch.testing._internal.custom_op_db import custom_op_db
from torch.testing._internal.hop_db import hop_db
-from torch.testing._internal.common_device_type import \
- (instantiate_device_type_tests, ops, OpDTypes)
-from torch.testing._internal.common_utils import unMarkDynamoStrictTest
# gradcheck requires double precision
-_gradcheck_ops = partial(ops, dtypes=OpDTypes.supported,
- allowed_dtypes=[torch.double, torch.cdouble])
+_gradcheck_ops = partial(
+ ops, dtypes=OpDTypes.supported, allowed_dtypes=[torch.double, torch.cdouble]
+)
+
@unMarkDynamoStrictTest
class TestBwdGradients(TestGradients):
@@ -49,16 +59,20 @@ class TestBwdGradients(TestGradients):
result = inplace(sample)
result.sum().backward()
else:
- self._grad_test_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()))
+ self._grad_test_helper(
+ device, dtype, op, self._get_safe_inplace(op.get_inplace())
+ )
# Test that gradients of gradients are computed correctly
@_gradcheck_ops(op_db + hop_db + custom_op_db)
def test_fn_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.supports_gradgrad:
- self.skipTest("Op claims it doesn't support gradgrad. This is not verified.")
+ self.skipTest(
+ "Op claims it doesn't support gradgrad. This is not verified."
+ )
else:
- self._check_helper(device, dtype, op, op.get_op(), 'bwgrad_bwgrad')
+ self._check_helper(device, dtype, op, op.get_op(), "bwgrad_bwgrad")
# Test that gradients of gradients are properly raising
@_gradcheck_ops(op_db + custom_op_db)
@@ -69,7 +83,7 @@ class TestBwdGradients(TestGradients):
err_msg = r"derivative for .* is not implemented"
with self.assertRaisesRegex(RuntimeError, err_msg):
- self._check_helper(device, dtype, op, op.get_op(), 'bwgrad_bwgrad')
+ self._check_helper(device, dtype, op, op.get_op(), "bwgrad_bwgrad")
# Method gradgrad (and grad, see above) tests are disabled since they're
# costly and redundant with function gradgrad (and grad) tests
@@ -83,11 +97,13 @@ class TestBwdGradients(TestGradients):
self._skip_helper(op, device, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
- self._check_helper(device, dtype, op, self._get_safe_inplace(op.get_inplace()), "bwgrad_bwgrad")
+ self._check_helper(
+ device, dtype, op, self._get_safe_inplace(op.get_inplace()), "bwgrad_bwgrad"
+ )
instantiate_device_type_tests(TestBwdGradients, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
diff --git a/test/test_ops_jit.py b/test/test_ops_jit.py
index 758f6d47df..05ae05b94f 100644
--- a/test/test_ops_jit.py
+++ b/test/test_ops_jit.py
@@ -6,20 +6,39 @@ from textwrap import dedent
import torch
from torch.testing import FileCheck
-from torch.testing._internal.common_utils import \
- (run_tests, IS_SANDCASTLE, clone_input_helper, first_sample, TestCase)
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ OpDTypes,
+ ops,
+)
+from torch.testing._internal.common_jit import (
+ check_against_reference,
+ JitCommonTestCase,
+)
from torch.testing._internal.common_methods_invocations import op_db
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes
-from torch.testing._internal.common_jit import JitCommonTestCase, check_against_reference
-from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn, check_alias_annotation
-from torch.testing._internal.jit_utils import disable_autodiff_subgraph_inlining, is_lambda
-from torch.testing._internal.common_utils import unMarkDynamoStrictTest
+from torch.testing._internal.common_utils import (
+ clone_input_helper,
+ first_sample,
+ IS_SANDCASTLE,
+ run_tests,
+ TestCase,
+ unMarkDynamoStrictTest,
+)
+from torch.testing._internal.jit_metaprogramming_utils import (
+ check_alias_annotation,
+ create_script_fn,
+ create_traced_fn,
+)
+from torch.testing._internal.jit_utils import (
+ disable_autodiff_subgraph_inlining,
+ is_lambda,
+)
# variant testing is only done with torch.float and torch.cfloat to avoid
# excessive test times and maximize signal to noise ratio
-_variant_ops = partial(ops, dtypes=OpDTypes.supported,
- allowed_dtypes=(torch.float, torch.cfloat))
-
+_variant_ops = partial(
+ ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float, torch.cfloat)
+)
# Tests operators for consistency between JIT and eager, also checks
@@ -37,17 +56,25 @@ class TestJit(JitCommonTestCase):
# TODO WARNING: inplace x {traced, scripted} not currently tested
@_variant_ops(op_db)
def test_variant_consistency_jit(self, device, dtype, op):
- _requires_grad = (dtype in op.supported_backward_dtypes(torch.device(device).type))
+ _requires_grad = dtype in op.supported_backward_dtypes(
+ torch.device(device).type
+ )
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
- samples = op.sample_inputs(device, dtype, requires_grad=_requires_grad, include_conjugated_inputs=include_conjugated_inputs)
+ samples = op.sample_inputs(
+ device,
+ dtype,
+ requires_grad=_requires_grad,
+ include_conjugated_inputs=include_conjugated_inputs,
+ )
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
- 'function': func, 'method': method,
+ "function": func,
+ "method": method,
}
# scripting strips the torch.ops prefix from these operators
@@ -57,13 +84,12 @@ class TestJit(JitCommonTestCase):
self.skipTest("variant consistency doesn't work on torch.ops")
# TODO: find better way to standardize on op registration itself..
- has_fake_function = op.name in ["resize_", 'resize_as_']
+ has_fake_function = op.name in ["resize_", "resize_as_"]
if has_fake_function:
- variants = {'method': getattr(torch.Tensor, op.name)}
+ variants = {"method": getattr(torch.Tensor, op.name)}
samples = op.sample_inputs(device, dtype, requires_grad=False)
-
tested = False
for sample in samples:
# Test traced and scripted consistency
@@ -80,22 +106,30 @@ class TestJit(JitCommonTestCase):
tested = True
try:
- self.indiv_variant_test_jit(device, dtype, op, sample, func_type, variant, has_fake_function)
+ self.indiv_variant_test_jit(
+ device, dtype, op, sample, func_type, variant, has_fake_function
+ )
except Exception as e:
- variant_error_info = dedent(f"""
+ variant_error_info = dedent(
+ f"""
Error testing {op.name} {func_type} variant
with dtype: {dtype}
with inputs {sample}:
- """)
+ """
+ )
raise Exception(variant_error_info) from e
assert tested, "JIT Test does not execute any logic"
- def indiv_variant_test_jit(self, device, dtype, op, sample, func_type, variant, has_fake_function):
- _requires_grad = (dtype in op.supported_backward_dtypes(torch.device(device).type))
+ def indiv_variant_test_jit(
+ self, device, dtype, op, sample, func_type, variant, has_fake_function
+ ):
+ _requires_grad = dtype in op.supported_backward_dtypes(
+ torch.device(device).type
+ )
support_script = op.supports_scripting
# Create accessor for script function variant
- name = op.name + '_' if func_type == 'inplace' else op.name
+ name = op.name + "_" if func_type == "inplace" else op.name
# run with disable_autodiff_subgraph_inlining(True) to test
# autodiff support. Context manager forces the graph to contain
@@ -112,16 +146,23 @@ class TestJit(JitCommonTestCase):
return output
def get_sample():
- return clone_input_helper(sample.input) if op.name[-1] == '_' else sample.input
+ return (
+ clone_input_helper(sample.input)
+ if op.name[-1] == "_"
+ else sample.input
+ )
if support_script:
- check_against_reference(self,
- script_fn,
- op.get_op(),
- out_fn,
- (get_sample(),) + sample.args,
- sample.kwargs,
- no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)
+ check_against_reference(
+ self,
+ script_fn,
+ op.get_op(),
+ out_fn,
+ (get_sample(),) + sample.args,
+ sample.kwargs,
+ no_grad=not _requires_grad,
+ no_gradgrad=not op.supports_gradgrad,
+ )
# Check traced forward, grad, and grad grad
# TODO: fix tracing here
@@ -131,13 +172,16 @@ class TestJit(JitCommonTestCase):
if supports_tracing:
traced_fn = create_traced_fn(self, variant)
- check_against_reference(self,
- traced_fn,
- op.get_op(),
- out_fn,
- (get_sample(),) + sample.args,
- sample.kwargs,
- no_grad=not _requires_grad, no_gradgrad=not op.supports_gradgrad)
+ check_against_reference(
+ self,
+ traced_fn,
+ op.get_op(),
+ out_fn,
+ (get_sample(),) + sample.args,
+ sample.kwargs,
+ no_grad=not _requires_grad,
+ no_gradgrad=not op.supports_gradgrad,
+ )
# Check alias annotation schema for correctness (make
# sure inputs that aren't supposed to be modified aren't)
@@ -146,8 +190,13 @@ class TestJit(JitCommonTestCase):
if dtype == torch.float32:
# TODO: no reason why we cant run this with tracing graph
if support_script and op.name != "rsub":
- check_alias_annotation(name, (get_sample(),) + sample.args, sample.kwargs,
- func_type=func_type, aten_name=op.aten_name)
+ check_alias_annotation(
+ name,
+ (get_sample(),) + sample.args,
+ sample.kwargs,
+ func_type=func_type,
+ aten_name=op.aten_name,
+ )
# TODO: use script graph as well
checked_shape_analysis = False
@@ -156,14 +205,18 @@ class TestJit(JitCommonTestCase):
# right now, tuple of outputs and tensor output supported
# TODO: list of tensor outputs
- tuple_of_tensors = isinstance(out, tuple) and all(isinstance(elem, torch.Tensor) for elem in out)
+ tuple_of_tensors = isinstance(out, tuple) and all(
+ isinstance(elem, torch.Tensor) for elem in out
+ )
if isinstance(out, torch.Tensor) or tuple_of_tensors:
if tuple_of_tensors:
sizes = [elem.size() for elem in out]
else:
sizes = out.size()
- self.checkShapeAnalysis(sizes, traced_fn.graph, op.assert_jit_shape_analysis)
+ self.checkShapeAnalysis(
+ sizes, traced_fn.graph, op.assert_jit_shape_analysis
+ )
checked_shape_analysis = True
if op.assert_jit_shape_analysis:
self.assertTrue(checked_shape_analysis)
@@ -173,20 +226,31 @@ class TestJit(JitCommonTestCase):
# Sandcastle doesn't fuse nodes
if IS_SANDCASTLE:
# fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs
- nonfusible_nodes = op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes
+ nonfusible_nodes = (
+ op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes
+ )
fusible_nodes = []
else:
nonfusible_nodes = op.autodiff_nonfusible_nodes
fusible_nodes = op.autodiff_fusible_nodes
if supports_tracing:
- self.assertAutodiffNode(traced_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
+ self.assertAutodiffNode(
+ traced_fn.last_graph,
+ op.assert_autodiffed,
+ nonfusible_nodes,
+ fusible_nodes,
+ )
if support_script:
- self.assertAutodiffNode(script_fn.last_graph, op.assert_autodiffed, nonfusible_nodes, fusible_nodes)
+ self.assertAutodiffNode(
+ script_fn.last_graph,
+ op.assert_autodiffed,
+ nonfusible_nodes,
+ fusible_nodes,
+ )
# alias testing is only done with torch.float for the same reason
- _alias_ops = partial(ops, dtypes=OpDTypes.supported,
- allowed_dtypes=(torch.float,))
+ _alias_ops = partial(ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float,))
@_alias_ops(op for op in op_db if op.aliases)
def test_jit_alias_remapping(self, device, dtype, op):
@@ -209,16 +273,18 @@ class TestJit(JitCommonTestCase):
return str(v)
- args_kw = args + \
- [f"{v}" for v in sample.args] + \
- [f"{k}={quote_strs(v)}" for k, v in sample.kwargs.items()]
+ args_kw = (
+ args
+ + [f"{v}" for v in sample.args]
+ + [f"{k}={quote_strs(v)}" for k, v in sample.kwargs.items()]
+ )
# Prepare data for test tracing
sample_args_kwargs = ()
if len(sample.args) > 0:
- sample_args_kwargs += (sample.args, )
+ sample_args_kwargs += (sample.args,)
if len(sample.kwargs) > 0:
- sample_args_kwargs += (sample.kwargs, )
+ sample_args_kwargs += (sample.kwargs,)
original_name = op.aten_name
original_name_inplace = original_name + "_"
@@ -227,7 +293,11 @@ class TestJit(JitCommonTestCase):
for a_op in op.aliases:
inplace = a_op.inplace_variant
method_or_inplace = [a_op.inplace_variant, a_op.method_variant]
- variants = (v for v in (a_op.op, a_op.method_variant, a_op.inplace_variant) if v is not None)
+ variants = (
+ v
+ for v in (a_op.op, a_op.method_variant, a_op.inplace_variant)
+ if v is not None
+ )
# Test scripting:
for variant in variants:
@@ -235,10 +305,10 @@ class TestJit(JitCommonTestCase):
op_name = original_name_inplace if variant is inplace else original_name
if variant in method_or_inplace:
- fn_template = '''
+ fn_template = """
def _fn(t0{c}):
return t0.{alias_name}({args_kw})
- '''
+ """
# remove the first input tensor
script = fn_template.format(
c=", " if len(args_kw[1:]) > 1 else "",
@@ -246,10 +316,10 @@ class TestJit(JitCommonTestCase):
alias_name=variant_name,
)
else:
- fn_template = '''
+ fn_template = """
def _fn({args}):
return variant({args_kw})
- '''
+ """
script = fn_template.format(
args=", ".join(args),
args_kw=", ".join(args_kw),
@@ -261,13 +331,15 @@ class TestJit(JitCommonTestCase):
scripted = torch.jit.CompilationUnit(script)._fn
- if (variant is inplace and not torch.can_cast(expected_dtype, dtype)):
+ if variant is inplace and not torch.can_cast(expected_dtype, dtype):
try:
inp = clone_input_helper(sample.input)
scripted(inp)
except Exception as e:
continue
- self.fail("Inplace operation on integer tensor that should be promoted to float didn't fail!")
+ self.fail(
+ "Inplace operation on integer tensor that should be promoted to float didn't fail!"
+ )
inp = clone_input_helper(sample.input)
scripted(inp)
@@ -294,6 +366,6 @@ class TestJit(JitCommonTestCase):
instantiate_device_type_tests(TestJit, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests() | 2.41.0 |
39e6b3156f91cd0250c5e9625d1235c7dd276f5 | Sat, 13 Apr 2024 04:04:09 +0000 | [PATCH 0130/1000] Cleanup: Remove redundant `inference_patterns` PatternMatcherPass (#121602) | ## Summary Removes a redundant `PatternMatcherPass` in Inductor post-grad passes Pull Request resolved: https://github.com/pytorch/pytorch/pull/121602 Approved by: https://github.com/jgong5, https://github.com/eellison | diff --git a/torch/_inductor/fx_passes/post_grad.py b/torch/_inductor/fx_passes/post_grad.py
index ff09069a2e..89a3978845 100644
--- a/torch/_inductor/fx_passes/post_grad.py
+++ b/torch/_inductor/fx_passes/post_grad.py
@@ -61,8 +61,6 @@ pass_patterns = [
PatternMatcherPass(),
PatternMatcherPass(),
]
-# patterns applied only in inference
-inference_patterns = PatternMatcherPass()
def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
@@ -100,8 +98,6 @@ def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
optimus_scuba_log[
f"{pattern_matcher_pass.pass_name}_post_grad"
] = upload_graph(gm.graph)
- if is_inference:
- inference_patterns.apply(gm.graph) # type: ignore[arg-type]
if config._fuse_ddp_communication:
fuse_ddp_communication( | 2.41.0 |
e3f80f00f3995e335bd6313b8c4be998cc4e2cd | Sat, 13 Apr 2024 04:18:42 +0000 | [PATCH 0131/1000] accelerate `binary_cross_entropy_with_logits` (#122789) | Following https://github.com/pytorch/pytorch/pull/115539 Same benchmark in #115539: |avg time (ms)|with `pos_weight`|no `pos_weight`| |-|-|-| |before #115539 |2049|1736| |after #115539 |1320|1049| |this PR |907 |801| This PR is faster 24-31% than the version after #115539. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122789 Approved by: https://github.com/peterbell10 | diff --git a/aten/src/ATen/native/Activation.cpp b/aten/src/ATen/native/Activation.cpp
index 75f373811c..533bc32216 100644
--- a/aten/src/ATen/native/Activation.cpp
+++ b/aten/src/ATen/native/Activation.cpp
@@ -76,7 +76,6 @@
#include <ATen/ops/tanh.h>
#include <ATen/ops/threshold_backward_native.h>
#include <ATen/ops/threshold_native.h>
-#include <ATen/ops/zeros_like.h>
#include <utility>
#endif
@@ -748,9 +747,8 @@ Tensor infinitely_differentiable_gelu_backward(
}
std::tuple<Tensor, Tensor> log_sigmoid_forward_cpu(const Tensor& input) {
- // FIXME: do these actually need to be zeros_like or can they be empty_like?
- auto result = at::zeros_like(input, at::MemoryFormat::Contiguous);
- auto buffer = at::zeros_like(input, at::MemoryFormat::Contiguous);
+ auto result = at::empty_like(input, at::MemoryFormat::Contiguous);
+ auto buffer = at::empty_like(input, at::MemoryFormat::Contiguous);
log_sigmoid_cpu_stub(kCPU, result, buffer, input.contiguous());
return std::make_tuple(result, buffer);
}
diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp
index 231ac54f67..ea0cb5419a 100644
--- a/aten/src/ATen/native/Loss.cpp
+++ b/aten/src/ATen/native/Loss.cpp
@@ -359,15 +359,15 @@ Tensor binary_cross_entropy_with_logits(const Tensor& input, const Tensor& targe
c10::MaybeOwned<Tensor> pos_weight_maybe_owned = at::borrow_from_optional_tensor(pos_weight_opt);
const Tensor& pos_weight = *pos_weight_maybe_owned;
- Tensor loss;
+ auto log_sigmoid_input = at::log_sigmoid(input);
if (pos_weight.defined()) {
// pos_weight need to be broadcasted, thus mul(target) is not inplace.
auto log_weight = (pos_weight - 1).mul(target).add_(1);
- loss = (1 - target).mul_(input).sub_(log_weight.mul_(at::log_sigmoid(input)));
- } else {
- loss = (1 - target).mul_(input).sub_(at::log_sigmoid(input));
+ log_sigmoid_input.mul_(log_weight);
}
+ Tensor loss = (1 - target).mul_(input).sub_(log_sigmoid_input);
+
if (weight.defined()) {
loss.mul_(weight);
} | 2.41.0 |
91736f115cfb8414bb46df2821693a7cee140bf | Sat, 13 Apr 2024 01:51:52 +0000 | [PATCH 0132/1000] Fix links rendering when surrounding code in Dynamo deepdive (#123427) | I thought the RST was rendering correctly, but here we are. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123427 Approved by: https://github.com/peterbell10 | diff --git a/docs/source/export.rst b/docs/source/export.rst
index f3278e2721..2ebf780944 100644
--- a/docs/source/export.rst
+++ b/docs/source/export.rst
@@ -668,7 +668,8 @@ Read More
:caption: Deep Dive for PyTorch Developers
:maxdepth: 1
- torch.compiler_deepdive
+ torch.compiler_dynamo_overview
+ torch.compiler_dynamo_deepdive
torch.compiler_dynamic_shapes
torch.compiler_fake_tensor
diff --git a/docs/source/torch.compiler.rst b/docs/source/torch.compiler.rst
index 69dcd70eff..c861e413d0 100644
--- a/docs/source/torch.compiler.rst
+++ b/docs/source/torch.compiler.rst
@@ -102,7 +102,7 @@ Read More
:caption: Deep Dive for PyTorch Developers
:maxdepth: 1
- torch.compiler_deepdive
+ torch.compiler_dynamo_overview
torch.compiler_dynamo_deepdive
torch.compiler_dynamic_shapes
torch.compiler_nn_module
diff --git a/docs/source/torch.compiler_dynamo_deepdive.rst b/docs/source/torch.compiler_dynamo_deepdive.rst
index 79af3dab26..f4c45807d1 100644
--- a/docs/source/torch.compiler_dynamo_deepdive.rst
+++ b/docs/source/torch.compiler_dynamo_deepdive.rst
@@ -1,3 +1,5 @@
+.. _torch.compiler_dynamo_deepdive:
+
Dynamo Deep-Dive
================
@@ -14,7 +16,7 @@ ground up. We will discuss the functionality it provides, and how it is
implemented. By the end of this post, you will have a better
understanding of what went wrong when you ``torch.compiled`` a PyTorch
program and the compilation errored out, or succeeded but the speed-up
-was not what you expected. [1]_
+was not what you expected.
A Gentle Introduction to Dynamo
-------------------------------
@@ -60,11 +62,11 @@ we see the output that Dynamo traced
We call this a **graph (or trace) of the function for the given
inputs**. This is represented via an `FX
-graph <https://pytorch.org/docs/stable/fx.html>`__. We will simply think
+graph <https://pytorch.org/docs/main/fx.html>`__. We will simply think
of an FX graph as a container that stores a list of function calls.
The first thing we should notice is that the graph is a linear sequence
-of PyTorch operations. [2]_ Dynamo records all the PyTorch operations
+of PyTorch operations. [1]_ Dynamo records all the PyTorch operations
and stores them sequentially. For example, it split ``z = (x - y) ** 2``
into its two constituting operations, ``sub = l_x_ - l_y_`` and
``z = sub ** 2``.
@@ -215,10 +217,10 @@ variables and their names - The builtin functions like ``abs`` or
``print``
You can see all the fields
-`here <https://github.com/pytorch/pytorch/blob/e891a3bba9f05697d72776f6e89347231a141f03/torch/csrc/dynamo/eval_frame.c#L50-L59>`__. [3]_
+`here <https://github.com/pytorch/pytorch/blob/e891a3bba9f05697d72776f6e89347231a141f03/torch/csrc/dynamo/eval_frame.c#L50-L59>`__. [2]_
In summary, CPython provides the user’s interpreter with all the
-information necessary to execute the function. [4]_
+information necessary to execute the function. [3]_
With this API, we can implement a tracer by implementing an interpreter
that runs the code and records in a graph all the PyTorch operations
@@ -242,10 +244,10 @@ Implementing CPython in Python
So, we are back in the Python world. We have the bytecode of a function,
and all the context necessary to execute it. In particular, we have
landed at
-```_convert_frame_assert`` <https://github.com/pytorch/pytorch/blob/b6df8414601e1e086e830ca9e919e7fdc8874e71/torch/_dynamo/convert_frame.py#L272-L274>`__.
+`_convert_frame_assert <https://github.com/pytorch/pytorch/blob/b6df8414601e1e086e830ca9e919e7fdc8874e71/torch/_dynamo/convert_frame.py#L272-L274>`__.
This is the function that the decorator ``torch.compile`` returns! We
get to this function from
-```_dynamo.optimize`` <https://github.com/pytorch/pytorch/blob/b6df8414601e1e086e830ca9e919e7fdc8874e71/torch/_dynamo/eval_frame.py#L715-L727>`__.
+`_dynamo.optimize <https://github.com/pytorch/pytorch/blob/b6df8414601e1e086e830ca9e919e7fdc8874e71/torch/_dynamo/eval_frame.py#L715-L727>`__.
The decorator ``torch.compile`` is just a nice API around
``_dynamo.optimize``.
@@ -259,8 +261,7 @@ of Dynamo.
The parent class of the internal class structure is ``VariableTracker``
and represents the different objects that Dynamo understands. For
example, ``ListVariable``, represents a ``list`` object, and keeps
-internally a `list of
-``VariableTracker``\ s <https://github.com/pytorch/pytorch/blob/e38a3a6079a3861b4bc9f256120ec661f34e726d/torch/_dynamo/variables/lists.py#L48-L56>`__.
+internally a `list of VariableTrackers <https://github.com/pytorch/pytorch/blob/e38a3a6079a3861b4bc9f256120ec661f34e726d/torch/_dynamo/variables/lists.py#L48-L56>`__.
Another example of ``VariableTracker`` is
`ConstantVariable <https://github.com/pytorch/pytorch/blob/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables/constant.py#L30>`__.
ConstantVariable wraps all the `objects considered constant by
@@ -269,12 +270,12 @@ We also have special subclasses for objects that require special
attention, like
`TensorVariable <https://github.com/pytorch/pytorch/blob/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables/tensor.py#L68-L69>`__.
All these internal classes are defined in the
-```torch/_dynamo/variables`` <https://github.com/pytorch/pytorch/tree/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables>`__
+`torch/_dynamo/variables <https://github.com/pytorch/pytorch/tree/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables>`__
folder.
Python objects are wrapped into their corresponding ``VariableTracker``
class in
-```VariableBuilder._wrap`` <https://github.com/pytorch/pytorch/blob/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables/builder.py#L365>`__.
+`VariableBuilder._wrap <https://github.com/pytorch/pytorch/blob/83c0763dda1f93c6cf552ba88260a0dc7a3ecb70/torch/_dynamo/variables/builder.py#L365>`__.
This function is just a very long chain of ``elif``\ s that tries to
recursively pattern-match the Python inputs into the appropriate type of
``VariableTracker``.
@@ -304,9 +305,9 @@ traced into the right ``VariableTracker``.
Ok, so we have an IR for our tracer, now we *just* need to reimplement
CPython’s stack machine. This is implemented by
-```InstructorTranslatorBase`` <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py#L576-L594>`__
+`InstructorTranslatorBase <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py#L576-L594>`__
in
-```symbolic_convert.py`` <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py>`__.
+`symbolic_convert.py <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py>`__.
``InstructionTranslatorBase`` has about 200 methods, implementing almost
all of Python bytecodes. As an example, we can see the implementation of
@@ -330,10 +331,9 @@ Generating the Output Graph
With a way to symbolically execute Python code, we are set to extract
the PyTorch operations that happen during the symbolic execution of a
program given some inputs. This is implemented in Dynamo via the
-```OutputGraph`` <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/output_graph.py#L221-L230>`__
+`OutputGraph <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/output_graph.py#L221-L230>`__
object. The ``OutputGraph`` object is `bound to an
-``InstructionTranslator``
-object <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py#L2060-L2071>`__
+`InstructionTranslator object <https://github.com/pytorch/pytorch/blob/69f112d5867f785a3a090a0c6d6644ae047033ac/torch/_dynamo/symbolic_convert.py#L2060-L2071>`__
and it tracks all the data necessary to create the FX graph which will
be returned by Dynamo.
@@ -342,9 +342,9 @@ All the inputs and intermediary elements of the FX graph are
``fx.Proxy``\ s. ``fx.Proxy``\ s are used to build the FX graph.
In particular, they record every PyTorch operation performed on them
into the graph. You can can create a new operation to be added to
-the graph by calling ```create_proxy`` <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/_dynamo/output_graph.py#L430-L431>`__.
+the graph by calling `create_proxy <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/_dynamo/output_graph.py#L430-L431>`__.
Then, we can add it to the graph through the function
-```wrap_fx_proxy`` <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/_dynamo/variables/builder.py#L1311>`__.
+`wrap_fx_proxy <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/_dynamo/variables/builder.py#L1311>`__.
A graph stores operations on tensors… and operations on symbolic
integers. We will discuss symbolic integers later on, but first we will
@@ -358,7 +358,7 @@ Making Dynamo Sound: Guards
At this point, we have a way to trace programs completely disregarding control flow.
And for that, we have reimplemented all of CPython… If this sounds like a bit of an
overkill, that is because it is.
-```torch.jit.trace`` <https://pytorch.org/docs/stable/generated/torch.jit.trace.html>`__
+`torch.jit.trace <https://pytorch.org/docs/main/generated/torch.jit.trace.html>`__
already implements this without all this machinery, so what gives?
The issue with ``torch.jit.trace``, as it is warned in its docs, is that
@@ -399,7 +399,7 @@ with ``TORCH_LOGS=guards`` prints (among other guards)
L['b'] == 'Hello'
This reads as “the local variable ``b`` should have a specific type
-(``str`` in this case, represented by the constant `9433...`) and
+(``str`` in this case, represented by the constant ``9433...``) and
its value should be ``'Hello'``”. If we then execute the function
again passing a different argument
@@ -442,15 +442,15 @@ the objects they contain. In
return a * x
``x`` and ``y`` have
-```LocalSource`` <https://github.com/pytorch/pytorch/blob/40dc0580a69565b06ec5263efe5d87cecc8200f7/torch/_dynamo/source.py#L80-L92>`__
+`LocalSource <https://github.com/pytorch/pytorch/blob/40dc0580a69565b06ec5263efe5d87cecc8200f7/torch/_dynamo/source.py#L80-L92>`__
as their source, and ``y[0]`` has
-```GetItemSource`` <https://github.com/pytorch/pytorch/blob/40dc0580a69565b06ec5263efe5d87cecc8200f7/torch/_dynamo/source.py#L302>`__,
+`GetItemSource <https://github.com/pytorch/pytorch/blob/40dc0580a69565b06ec5263efe5d87cecc8200f7/torch/_dynamo/source.py#L302>`__,
which stores a ``LocalSource`` inside. On the other hand, ``a`` will not
have a source as it is an intermediate variable that only exists within
the fx graph.
All these are defined in
-```torch/_dynamo/source.py`` <https://github.com/pytorch/pytorch/blob/main/torch/_dynamo/source.py>`__.
+`torch/_dynamo/source.py <https://github.com/pytorch/pytorch/blob/main/torch/_dynamo/source.py>`__.
We can see the guard generated by ``GetItemSource`` in the following
example:
@@ -496,9 +496,9 @@ Symbolic Shapes
Another point we discussed in the introduction is that Dynamo knows how
to trace integers. In order to implement this, we use a symbolic class
-```torch.SymInt`` <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/__init__.py#L244-L249>`__\ [5]_
+`torch.SymInt <https://github.com/pytorch/pytorch/blob/fb80f05ee2e1cba17892980701bfd5dbce58349f/torch/__init__.py#L244-L249>`__
that acts like an ``int`` but it records all the operations performed on
-it in the output FX graph. We already saw this class in the introduction
+it in the output FX graph. [4]_ We already saw this class in the introduction
when introducing symbolic integer tracing.
Let us now discuss the three properties that define symbolic shape
@@ -588,7 +588,7 @@ more general guards on this more generic kernel.
**Compilation performance tip**. If you know that a dimension will vary
in size, you can mark it as dynamic by calling
-```torch._dynamo.mark_dynamic`` <https://github.com/pytorch/pytorch/blob/66a76516bfc341b2b55bb2056d2faa9c2de46d69/torch/_dynamo/decorators.py#L176>`__
+`torch._dynamo.mark_dynamic <https://github.com/pytorch/pytorch/blob/66a76516bfc341b2b55bb2056d2faa9c2de46d69/torch/_dynamo/decorators.py#L176>`__
before calling ``torch.compile``. This will avoid the first compilation
with a static shape. There are other useful utility functions like
``maybe_mark_dynamic`` or ``mark_static``. You can also have all
@@ -671,7 +671,7 @@ arbitrary Python code” is perhaps a bit too general. Dynamo implements a
good part of Python, but does it implement the more complex parts, like
coroutines or async? Does it implement the whole Python standard
library? NumPy also has a Python API. Does ``torch.compile`` also
-understand NumPy? and Django? [6]_
+understand NumPy? and Django? [5]_
Python’s ecosystem is massive, and a good part of it is written in other
more performant languages like C++ or Rust, and it just exposes Python
@@ -683,15 +683,15 @@ The usual way machine learning tracers handle this issue is by informing
the user that the operation they choked on and giving up tracing
altogether. This would pose a real usability issue in the case of
PyTorch, where its users are used to the flexibility it gives them. As a
-real-world example the ```doctr_det_predictor`` model uses NumPy and the
-``cv2`` library to postprocess the model’s
+real-world example the ``doctr_det_predictor`` model uses NumPy and the
+``cv2`` library to `postprocess the model’s
result <https://github.com/mindee/doctr/blob/f2114758d529ed8d3d0030581638f0520b6b98d8/doctr/models/detection/core.py#L86>`__.
Here is another place where having access to CPython is interesting.
Rather than erroring out, Dynamo can let CPython run that problematic
code! To do this, Dynamo generates at trace time one graph with all the
operations before the problematic code, and one with all the operations
-after. [7]_ Then, at runtime, it will delegate to CPython to execute the
+after. [6]_ Then, at runtime, it will delegate to CPython to execute the
first graph, then the problematic code, and then the second graph. This
process of stopping the tracing and generating multiple graphs is called
a **graph break**.
@@ -811,10 +811,9 @@ implementing the strategy that we described before
The code generation of the stack in Dynamo is delegated to
``VariableTracker`` subclasses. Every ``VariableTracker`` object in
-Dynamo has a ```reconstruct``
-method <https://github.com/pytorch/pytorch/blob/e891a3bba9f05697d72776f6e89347231a141f03/torch/_dynamo/variables/lists.py#L307-L309>`__
-that generates the necessary bytecode to create the python object it
-represents on the stack.
+Dynamo has a `reconstruct <https://github.com/pytorch/pytorch/blob/e891a3bba9f05697d72776f6e89347231a141f03/torch/_dynamo/variables/lists.py#L307-L309>`__
+method that generates the necessary bytecode to create the python object
+it represents on the stack.
**Debugging tip**. Graph breaks hamper performance, and as such, it is
best to avoid them. Running a program with ``TORCH_LOGS=graph_breaks``
@@ -843,34 +842,24 @@ github <https://github.com/pytorch/pytorch/issues?q=is%3Aissue+is%3Aopen+label%3
Many of them require very minor changes in the code, once you find where
you need to make those changes.
-.. [1]
- In the same way that Dynamo takes its name from
- [Dynamorio].(https://dynamorio.org/), this blog post’s name is a
- small nod to `You Could Have Invented Spectral
- Sequences <https://www.ams.org/notices/200601/fea-chow.pdf>`__.
+Footnotes
+---------
-.. [2]
- In the literature, this is called a Directed Acyclical Graph (DAG).
+.. [1] In the literature, this is called a Directed Acyclical Graph (DAG).
-.. [3]
- All this binding code lives in ``torch/csrc/dynamo/eval_frame.c``.
+.. [2] All this binding code lives in ``torch/csrc/dynamo/eval_frame.c``.
-.. [4]
- In CPython lingo, the set of all these objects are called `a
+.. [3] In CPython lingo, the set of all these objects are called `a
frame <https://github.com/python/cpython/blob/f26bfe4b25f7e5a4f68fcac26207b7175abad208/Include/internal/pycore_frame.h#L57-L71>`__.
-.. [5]
- There are also ``SymBool`` and ``SymFloat`` classes. The latter one
+.. [4] There are also ``SymBool`` and ``SymFloat`` classes. The latter one
is not used all that much at the time of this writing.
-.. [6]
- Interestingly enough, it does understand NumPy code! Have a look at
+.. [5] Interestingly enough, it does understand NumPy code! Have a look at
`this blogpost <https://pytorch.org/blog/compiling-numpy-code/>`__
- and `the
- docs <https://pytorch.org/docs/stable/torch.compiler_faq.html#does-numpy-work-with-torch-compile>`__.
+ and `the docs <https://pytorch.org/docs/main/torch.compiler_faq.html#does-numpy-work-with-torch-compile>`__.
Now, this is just possible because we reimplemented NumPy using
PyTorch. Good luck implementing Django in PyTorch though…
-.. [7]
- Assuming there is just one piece of problematic code. If there are
+.. [6] Assuming there is just one piece of problematic code. If there are
more, Dynamo can split the code into as many graphs as it needs.
diff --git a/docs/source/torch.compiler_deepdive.rst b/docs/source/torch.compiler_dynamo_overview.rst
similarity index 99%
rename from docs/source/torch.compiler_deepdive.rst
rename to docs/source/torch.compiler_dynamo_overview.rst
index bdaf13278e..cce1c39316 100644
--- a/docs/source/torch.compiler_deepdive.rst
+++ b/docs/source/torch.compiler_dynamo_overview.rst
@@ -1,5 +1,5 @@
-TorchDynamo Deep Dive
-=====================
+TorchDynamo Overview
+====================
Before you read this section, read :ref:`torch.compiler_overview`.
@@ -346,3 +346,5 @@ To summarize, the compiled code is conceptually equivalent to the code below:
The following diagram demonstrates how ``torch.compile`` transforms and optimizes user-written code: it first extracts computation graphs from the user-written function, and compiles these graphs into optimized functions, then assembles them into a new function, which is functionally equivalent to the user-written code but optimized to have a good computation speed.
.. image:: _static/img/dynamo/flowchart.jpg
+
+To learn more about how all this is implemented internally, see :ref:`torch.compiler_dynamo_deepdive`. | 2.41.0 |
961e23e76a83640170916b5cecd5090512c0b74 | Sat, 13 Apr 2024 05:27:52 +0000 | [PATCH 0133/1000] primitive attribute assignment (#123898) | This PR ensures that assignment of attributes of primitive type work without needing any code changes in non-strict mode. (In a previous PR we banned attribute assignments of tensor type unless such attributes are registered as buffers.) While strict mode errors on (all) attribute assignments, non-strict doesn't care, so one might assume that this kind of attribute assignment should already work in non-strict. However, there's a problem: we run through the program once for metadata collection and then run through it again for tracing, so the values observed during tracing (and potentially burned into the graph) do not reflect what should have been observed had the metadata collection pass not run. So the only thing this PR needs to do is restore values of assigned attributes of primitive type once the metadata collection pass has run. We do this by moving the attribute assignment detecting context manager from the overall `aot_export` call in `_trace.py` to the metadata collection pass in `aot_autograd.py`, and extending it. The rest of the PR moves some utils around. Differential Revision: D56047952 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123898 Approved by: https://github.com/angelayi | diff --git a/test/export/test_export.py b/test/export/test_export.py
index 5b2f920739..4bc660b916 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -424,7 +424,7 @@ class TestExport(TestCase):
foo, bad_example_inp, dynamic_shapes=dynamic_shapes, strict=False
)
- def test_state(self):
+ def test_state_tensors(self):
class M(torch.nn.Module): # simple with register buffer
def __init__(self):
super().__init__()
@@ -465,7 +465,7 @@ class TestExport(TestCase):
with self.assertRaisesRegex(
ValueError,
- "The attribute self.buf was assigned during export",
+ "The tensor attribute self.buf was assigned during export",
):
torch.export.export(M(), (torch.randn(2, 3),), strict=False)
@@ -523,10 +523,29 @@ class TestExport(TestCase):
with self.assertRaisesRegex(
ValueError,
- "The attributes self.tensors\\[0\\], self.tensors\\[1\\] were assigned during export",
+ "The tensor attributes self.tensors\\[0\\], self.tensors\\[1\\] were assigned during export",
):
torch.export.export(M(), (torch.randn(2, 3),), strict=False)
+ def test_state_primitives(self):
+ class M(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.x = 1
+ self.y = {"k": 2}
+ self.z = (3,)
+
+ def forward(self, x):
+ self.x = self.x + 4
+ self.y["k"] = self.y["k"] + 5
+ self.z = (self.z[0] + 6,)
+ return x + self.x + self.y["k"] + self.z[0]
+
+ ep = torch.export.export(M(), (torch.randn(2, 3),), strict=False)
+ self.assertTrue(
+ torch.allclose(ep.module()(torch.zeros(2, 3)), torch.ones(2, 3) * 21)
+ )
+
# Predispatch has different expected results
@testing.expectedFailureSerDerPreDispatch
def test_torch_fn(self):
diff --git a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
index de54569571..653a882680 100644
--- a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
+++ b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
@@ -28,7 +28,7 @@ from .traced_function_transforms import (
fn_input_mutations_to_outputs,
fn_prepped_for_autograd,
)
-from .utils import unlift_tokens
+from .utils import root_module_when_exporting_non_strict, unlift_tokens
aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
@@ -49,16 +49,6 @@ def _create_graph(f, args, *, aot_config: AOTConfig) -> torch.fx.GraphModule:
return fx_g
-def _root_module_when_exporting_non_strict(flat_fn):
- # When exporting in non-strict mode, we wrap the root module in a specific pattern.
- # See `_aot_export_non_strict` in torch.export._trace.py.
- # We look for that wrapping pattern here.
- if hasattr(flat_fn, "_orig_mod") and hasattr(flat_fn._orig_mod, "_export_root"):
- return flat_fn._orig_mod._export_root
- else:
- return None
-
-
def aot_dispatch_base_graph(
flat_fn,
flat_args: List[Tensor],
@@ -107,7 +97,7 @@ def aot_dispatch_base_graph(
# We track buffer assignments when exporting in non-strict mode.
# (In contrast, strict mode errors on any attribute assignment.)
- mod_when_exporting_non_strict = _root_module_when_exporting_non_strict(flat_fn)
+ mod_when_exporting_non_strict = root_module_when_exporting_non_strict(flat_fn)
if aot_config.is_export and mod_when_exporting_non_strict is not None:
# For any buffer that is assigned, we want to associate it to the final proxy node
# that it is assigned to. This node can then be added as a buffer mutation output.
diff --git a/torch/_functorch/_aot_autograd/utils.py b/torch/_functorch/_aot_autograd/utils.py
index 97512f6836..172f792826 100644
--- a/torch/_functorch/_aot_autograd/utils.py
+++ b/torch/_functorch/_aot_autograd/utils.py
@@ -282,3 +282,13 @@ def unlift_tokens(fw_module, fw_metadata):
fw_metadata.num_forward_returns -= num_tokens
fw_metadata.num_forward -= num_tokens
fw_metadata.tokens = {}
+
+
+def root_module_when_exporting_non_strict(flat_fn):
+ # When exporting in non-strict mode, we wrap the root module in a specific pattern.
+ # See `_aot_export_non_strict` in torch.export._trace.py.
+ # We look for that wrapping pattern here.
+ if hasattr(flat_fn, "_orig_mod") and hasattr(flat_fn._orig_mod, "_export_root"):
+ return flat_fn._orig_mod._export_root
+ else:
+ return None
diff --git a/torch/_functorch/aot_autograd.py b/torch/_functorch/aot_autograd.py
index 4e5e8787fe..030306072f 100644
--- a/torch/_functorch/aot_autograd.py
+++ b/torch/_functorch/aot_autograd.py
@@ -1,7 +1,7 @@
# mypy: ignore-errors
import itertools
-from contextlib import nullcontext
+from contextlib import contextmanager, nullcontext
from functools import partial, wraps
from typing import Any, Callable, Dict, List, Optional, Tuple
from unittest.mock import patch
@@ -118,6 +118,7 @@ from ._aot_autograd.utils import ( # noqa: F401
maybe_to_fresh_input,
normalize_as_list,
partial_flatten_asdict,
+ root_module_when_exporting_non_strict,
strict_zip,
)
from .partitioners import default_partition
@@ -548,12 +549,18 @@ def create_aot_dispatcher_function(
# Patch set_rng_state as set_rng_state with fake tensors is
# nonsensical. This does not affect the collection of metadata.
with patch("torch.cuda.set_rng_state", lambda *args: None):
- fw_metadata = run_functionalized_fw_and_collect_metadata(
- flat_fn,
- keep_input_mutations=aot_config.keep_inference_input_mutations,
- is_train=needs_autograd,
- pre_dispatch=aot_config.pre_dispatch,
- )(*fake_flat_args)
+ mod = root_module_when_exporting_non_strict(flat_fn)
+ if mod is not None:
+ ctx = _detect_attribute_assignment(mod)
+ else:
+ ctx = nullcontext()
+ with ctx:
+ fw_metadata = run_functionalized_fw_and_collect_metadata(
+ flat_fn,
+ keep_input_mutations=aot_config.keep_inference_input_mutations,
+ is_train=needs_autograd,
+ pre_dispatch=aot_config.pre_dispatch,
+ )(*fake_flat_args)
req_subclass_dispatch = requires_subclass_dispatch(
fake_flat_args, fw_metadata
@@ -1352,5 +1359,71 @@ def _aot_export_function(
return fx_g, meta, in_spec, out_spec.spec
+@contextmanager
+def _detect_attribute_assignment(mod: torch.nn.Module):
+ # Do not allow assignment of tensor attributes during export unless
+ # the attribute is registered as a buffer.
+
+ STD_ATTRS = {
+ "_backward_hooks",
+ "_backward_pre_hooks",
+ "_buffers",
+ "_forward_hooks",
+ "_forward_hooks_always_called",
+ "_forward_hooks_with_kwargs",
+ "_forward_pre_hooks",
+ "_forward_pre_hooks_with_kwargs",
+ "_is_full_backward_hook",
+ "_load_state_dict_post_hooks",
+ "_load_state_dict_pre_hooks",
+ "_modules",
+ "_non_persistent_buffers_set",
+ "_parameters",
+ "_state_dict_hooks",
+ "_state_dict_pre_hooks",
+ "training",
+ }
+
+ def _get_attributes(mod):
+ # return any attributes of a module that are not standard attributes
+ return {k: v for k, v in mod.__dict__.items() if k not in STD_ATTRS}
+
+ # save state of attributes before enter
+ snapshot = pytree.tree_map(lambda x: x, _get_attributes(mod))
+ try:
+ yield
+ finally:
+ # after exit, compare state of attributes with snapshot
+ # to detect which tensor attributes were assigned
+ assigned_tensor_attributes = []
+
+ def _collect_assigned_tensor_attributes(kp, v, _v):
+ if _v is not v:
+ attr, *rest = kp
+ if isinstance(v, torch.Tensor):
+ assigned_tensor_attributes.append(
+ f"self.{attr.key}{pytree.keystr(rest)}"
+ )
+ # TODO(avik): Assigning all other types are allowed right now.
+ # Maybe in the future we want to limit this to primitive types?
+
+ pytree.tree_map_with_path(
+ _collect_assigned_tensor_attributes, snapshot, _get_attributes(mod)
+ )
+ # restore state of all attributes (including, e.g., of primitive types)
+ mod.__dict__.update(snapshot)
+
+ if assigned_tensor_attributes:
+ if len(assigned_tensor_attributes) > 1:
+ noun, verb = "attributes", "were"
+ else:
+ noun, verb = "attribute", "was"
+ raise ValueError(
+ f"The tensor {noun} {', '.join(assigned_tensor_attributes)} {verb} assigned during export. "
+ "Such attributes must be registered as buffers using the `register_buffer` API "
+ "(https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.register_buffer)."
+ )
+
+
compiled_function = aot_function
compiled_module = aot_module
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 2c7249c3f6..bec2189725 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -394,67 +394,6 @@ def _make_module_call_graph(
return ret
-def _get_attributes(mod):
- # return any attributes of a module that are not standard attributes
- STD_ATTRS = {
- "_backward_hooks",
- "_backward_pre_hooks",
- "_buffers",
- "_forward_hooks",
- "_forward_hooks_always_called",
- "_forward_hooks_with_kwargs",
- "_forward_pre_hooks",
- "_forward_pre_hooks_with_kwargs",
- "_is_full_backward_hook",
- "_load_state_dict_post_hooks",
- "_load_state_dict_pre_hooks",
- "_modules",
- "_non_persistent_buffers_set",
- "_parameters",
- "_state_dict_hooks",
- "_state_dict_pre_hooks",
- "training",
- }
- return {k: v for k, v in mod.__dict__.items() if k not in STD_ATTRS}
-
-
-@contextmanager
-def detect_attribute_assignment(mod: torch.nn.Module):
- # Do not allow assignment of tensor attributes during export unless
- # the attribute is registered as a buffer.
-
- # save state of attributes before enter
- snapshot = pytree.tree_map(lambda x: x, _get_attributes(mod))
- try:
- yield
- finally:
- # after exit, compare state of attributes with snapshot
- # to detect which attributes were assigned
- assigned_attributes = []
-
- def _collect_assigned_attributes(kp, t, _t):
- if isinstance(t, torch.Tensor) and _t is not t:
- attr, *rest = kp
- assigned_attributes.append(
- f"self.{attr.key}{torch.utils._pytree.keystr(rest)}"
- )
-
- pytree.tree_map_with_path(
- _collect_assigned_attributes, snapshot, _get_attributes(mod)
- )
-
- if assigned_attributes:
- if len(assigned_attributes) > 1:
- msg = f"attributes {', '.join(assigned_attributes)} were"
- else:
- msg = f"attribute {assigned_attributes[0]} was"
- raise ValueError(
- f"The {msg} assigned during export. "
- "Such attributes must be registered as buffers using the `register_buffer` API "
- "(https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.register_buffer)."
- )
-
-
def _export_to_torch_ir(
f: Callable,
args: Tuple[Any, ...],
@@ -1006,8 +945,7 @@ def _export(
*args, **kwargs
)
else:
- with detect_attribute_assignment(self._export_root):
- tree_out = self._export_root(*args, **kwargs)
+ tree_out = self._export_root(*args, **kwargs)
flat_outs, out_spec = pytree.tree_flatten(tree_out)
return tuple(flat_outs)
| 2.41.0 |
Subsets and Splits