Build uploaded using `kernels`.
Browse files- build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc +0 -0
- build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_320b408.abi3.so} +2 -2
- build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py +3 -3
- build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc +0 -0
- build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so +3 -0
- build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_ops.py +3 -3
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py +46 -0
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc +0 -0
- build/{torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so → torch29-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so} +2 -2
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py +9 -0
- build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/layers.py +84 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py +46 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so +3 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py +9 -0
- build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/layers.py +84 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py +46 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc +0 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so +3 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/_ops.py +9 -0
- build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/layers.py +84 -0
build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
ADDED
|
Binary file (542 Bytes). View file
|
|
|
build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/{_deformable_detr_7c33cbe.abi3.so → _deformable_detr_320b408.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8926ca42814a03cdbac750f3a0cd3e3cbc28614a58e1ca5a77e82b3ad0148043
|
| 3 |
+
size 9979264
|
build/torch27-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _deformable_detr_320b408
|
| 3 |
+
ops = torch.ops._deformable_detr_320b408
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_deformable_detr_320b408::{op_name}"
|
build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
CHANGED
|
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc differ
|
|
|
build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
CHANGED
|
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc differ
|
|
|
build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
CHANGED
|
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc and b/build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc differ
|
|
|
build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c7f99924b14f0522d25c5f0a307ab1364a3f76d9ecf29684f80aa388f6bd443b
|
| 3 |
+
size 10047704
|
build/torch28-cxx11-cu129-aarch64-linux/deformable_detr/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _deformable_detr_320b408
|
| 3 |
+
ops = torch.ops._deformable_detr_320b408
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_deformable_detr_320b408::{op_name}"
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
from . import layers
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def ms_deform_attn_backward(
|
| 9 |
+
value: torch.Tensor,
|
| 10 |
+
spatial_shapes: torch.Tensor,
|
| 11 |
+
level_start_index: torch.Tensor,
|
| 12 |
+
sampling_loc: torch.Tensor,
|
| 13 |
+
attn_weight: torch.Tensor,
|
| 14 |
+
grad_output: torch.Tensor,
|
| 15 |
+
im2col_step: int,
|
| 16 |
+
) -> List[torch.Tensor]:
|
| 17 |
+
return ops.ms_deform_attn_backward(
|
| 18 |
+
value,
|
| 19 |
+
spatial_shapes,
|
| 20 |
+
level_start_index,
|
| 21 |
+
sampling_loc,
|
| 22 |
+
attn_weight,
|
| 23 |
+
grad_output,
|
| 24 |
+
im2col_step,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def ms_deform_attn_forward(
|
| 29 |
+
value: torch.Tensor,
|
| 30 |
+
spatial_shapes: torch.Tensor,
|
| 31 |
+
level_start_index: torch.Tensor,
|
| 32 |
+
sampling_loc: torch.Tensor,
|
| 33 |
+
attn_weight: torch.Tensor,
|
| 34 |
+
im2col_step: int,
|
| 35 |
+
) -> torch.Tensor:
|
| 36 |
+
return ops.ms_deform_attn_forward(
|
| 37 |
+
value,
|
| 38 |
+
spatial_shapes,
|
| 39 |
+
level_start_index,
|
| 40 |
+
sampling_loc,
|
| 41 |
+
attn_weight,
|
| 42 |
+
im2col_step,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
ADDED
|
Binary file (542 Bytes). View file
|
|
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
build/{torch28-cxx11-cu129-aarch64-linux/deformable_detr/_deformable_detr_a92c8ea_dirty.abi3.so → torch29-cxx11-cu126-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e471a8e5692e1fb09dc5750fd1c76031fdb4e166082a70c9c248aa7b3a2388ca
|
| 3 |
+
size 6966520
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _deformable_detr_320b408
|
| 3 |
+
ops = torch.ops._deformable_detr_320b408
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_deformable_detr_320b408::{op_name}"
|
build/torch29-cxx11-cu126-aarch64-linux/deformable_detr/layers.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union, Tuple
|
| 2 |
+
|
| 3 |
+
from torch import Tensor
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from ._ops import ops
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
+
@staticmethod
|
| 13 |
+
def forward(
|
| 14 |
+
context,
|
| 15 |
+
value: Tensor,
|
| 16 |
+
value_spatial_shapes: Tensor,
|
| 17 |
+
value_level_start_index: Tensor,
|
| 18 |
+
sampling_locations: Tensor,
|
| 19 |
+
attention_weights: Tensor,
|
| 20 |
+
im2col_step: int,
|
| 21 |
+
):
|
| 22 |
+
context.im2col_step = im2col_step
|
| 23 |
+
output = ops.ms_deform_attn_forward(
|
| 24 |
+
value,
|
| 25 |
+
value_spatial_shapes,
|
| 26 |
+
value_level_start_index,
|
| 27 |
+
sampling_locations,
|
| 28 |
+
attention_weights,
|
| 29 |
+
context.im2col_step,
|
| 30 |
+
)
|
| 31 |
+
context.save_for_backward(
|
| 32 |
+
value,
|
| 33 |
+
value_spatial_shapes,
|
| 34 |
+
value_level_start_index,
|
| 35 |
+
sampling_locations,
|
| 36 |
+
attention_weights,
|
| 37 |
+
)
|
| 38 |
+
return output
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
@once_differentiable
|
| 42 |
+
def backward(context, grad_output):
|
| 43 |
+
(
|
| 44 |
+
value,
|
| 45 |
+
value_spatial_shapes,
|
| 46 |
+
value_level_start_index,
|
| 47 |
+
sampling_locations,
|
| 48 |
+
attention_weights,
|
| 49 |
+
) = context.saved_tensors
|
| 50 |
+
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
+
value,
|
| 52 |
+
value_spatial_shapes,
|
| 53 |
+
value_level_start_index,
|
| 54 |
+
sampling_locations,
|
| 55 |
+
attention_weights,
|
| 56 |
+
grad_output,
|
| 57 |
+
context.im2col_step,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
+
def forward(
|
| 65 |
+
self,
|
| 66 |
+
value: Tensor,
|
| 67 |
+
value_spatial_shapes: Tensor,
|
| 68 |
+
value_spatial_shapes_list: List[Tuple],
|
| 69 |
+
level_start_index: Tensor,
|
| 70 |
+
sampling_locations: Tensor,
|
| 71 |
+
attention_weights: Tensor,
|
| 72 |
+
im2col_step: int,
|
| 73 |
+
):
|
| 74 |
+
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
+
value,
|
| 76 |
+
value_spatial_shapes,
|
| 77 |
+
level_start_index,
|
| 78 |
+
sampling_locations,
|
| 79 |
+
attention_weights,
|
| 80 |
+
im2col_step,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
__all__ = ["MultiScaleDeformableAttention"]
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
from . import layers
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def ms_deform_attn_backward(
|
| 9 |
+
value: torch.Tensor,
|
| 10 |
+
spatial_shapes: torch.Tensor,
|
| 11 |
+
level_start_index: torch.Tensor,
|
| 12 |
+
sampling_loc: torch.Tensor,
|
| 13 |
+
attn_weight: torch.Tensor,
|
| 14 |
+
grad_output: torch.Tensor,
|
| 15 |
+
im2col_step: int,
|
| 16 |
+
) -> List[torch.Tensor]:
|
| 17 |
+
return ops.ms_deform_attn_backward(
|
| 18 |
+
value,
|
| 19 |
+
spatial_shapes,
|
| 20 |
+
level_start_index,
|
| 21 |
+
sampling_loc,
|
| 22 |
+
attn_weight,
|
| 23 |
+
grad_output,
|
| 24 |
+
im2col_step,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def ms_deform_attn_forward(
|
| 29 |
+
value: torch.Tensor,
|
| 30 |
+
spatial_shapes: torch.Tensor,
|
| 31 |
+
level_start_index: torch.Tensor,
|
| 32 |
+
sampling_loc: torch.Tensor,
|
| 33 |
+
attn_weight: torch.Tensor,
|
| 34 |
+
im2col_step: int,
|
| 35 |
+
) -> torch.Tensor:
|
| 36 |
+
return ops.ms_deform_attn_forward(
|
| 37 |
+
value,
|
| 38 |
+
spatial_shapes,
|
| 39 |
+
level_start_index,
|
| 40 |
+
sampling_loc,
|
| 41 |
+
attn_weight,
|
| 42 |
+
im2col_step,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
ADDED
|
Binary file (542 Bytes). View file
|
|
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b54665ded7f4ed9bdedb765f3010a52ede13f0b9107aae15ae268f08dd171d21
|
| 3 |
+
size 9980808
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _deformable_detr_320b408
|
| 3 |
+
ops = torch.ops._deformable_detr_320b408
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_deformable_detr_320b408::{op_name}"
|
build/torch29-cxx11-cu128-aarch64-linux/deformable_detr/layers.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union, Tuple
|
| 2 |
+
|
| 3 |
+
from torch import Tensor
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from ._ops import ops
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
+
@staticmethod
|
| 13 |
+
def forward(
|
| 14 |
+
context,
|
| 15 |
+
value: Tensor,
|
| 16 |
+
value_spatial_shapes: Tensor,
|
| 17 |
+
value_level_start_index: Tensor,
|
| 18 |
+
sampling_locations: Tensor,
|
| 19 |
+
attention_weights: Tensor,
|
| 20 |
+
im2col_step: int,
|
| 21 |
+
):
|
| 22 |
+
context.im2col_step = im2col_step
|
| 23 |
+
output = ops.ms_deform_attn_forward(
|
| 24 |
+
value,
|
| 25 |
+
value_spatial_shapes,
|
| 26 |
+
value_level_start_index,
|
| 27 |
+
sampling_locations,
|
| 28 |
+
attention_weights,
|
| 29 |
+
context.im2col_step,
|
| 30 |
+
)
|
| 31 |
+
context.save_for_backward(
|
| 32 |
+
value,
|
| 33 |
+
value_spatial_shapes,
|
| 34 |
+
value_level_start_index,
|
| 35 |
+
sampling_locations,
|
| 36 |
+
attention_weights,
|
| 37 |
+
)
|
| 38 |
+
return output
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
@once_differentiable
|
| 42 |
+
def backward(context, grad_output):
|
| 43 |
+
(
|
| 44 |
+
value,
|
| 45 |
+
value_spatial_shapes,
|
| 46 |
+
value_level_start_index,
|
| 47 |
+
sampling_locations,
|
| 48 |
+
attention_weights,
|
| 49 |
+
) = context.saved_tensors
|
| 50 |
+
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
+
value,
|
| 52 |
+
value_spatial_shapes,
|
| 53 |
+
value_level_start_index,
|
| 54 |
+
sampling_locations,
|
| 55 |
+
attention_weights,
|
| 56 |
+
grad_output,
|
| 57 |
+
context.im2col_step,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
+
def forward(
|
| 65 |
+
self,
|
| 66 |
+
value: Tensor,
|
| 67 |
+
value_spatial_shapes: Tensor,
|
| 68 |
+
value_spatial_shapes_list: List[Tuple],
|
| 69 |
+
level_start_index: Tensor,
|
| 70 |
+
sampling_locations: Tensor,
|
| 71 |
+
attention_weights: Tensor,
|
| 72 |
+
im2col_step: int,
|
| 73 |
+
):
|
| 74 |
+
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
+
value,
|
| 76 |
+
value_spatial_shapes,
|
| 77 |
+
level_start_index,
|
| 78 |
+
sampling_locations,
|
| 79 |
+
attention_weights,
|
| 80 |
+
im2col_step,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
__all__ = ["MultiScaleDeformableAttention"]
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
from . import layers
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def ms_deform_attn_backward(
|
| 9 |
+
value: torch.Tensor,
|
| 10 |
+
spatial_shapes: torch.Tensor,
|
| 11 |
+
level_start_index: torch.Tensor,
|
| 12 |
+
sampling_loc: torch.Tensor,
|
| 13 |
+
attn_weight: torch.Tensor,
|
| 14 |
+
grad_output: torch.Tensor,
|
| 15 |
+
im2col_step: int,
|
| 16 |
+
) -> List[torch.Tensor]:
|
| 17 |
+
return ops.ms_deform_attn_backward(
|
| 18 |
+
value,
|
| 19 |
+
spatial_shapes,
|
| 20 |
+
level_start_index,
|
| 21 |
+
sampling_loc,
|
| 22 |
+
attn_weight,
|
| 23 |
+
grad_output,
|
| 24 |
+
im2col_step,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def ms_deform_attn_forward(
|
| 29 |
+
value: torch.Tensor,
|
| 30 |
+
spatial_shapes: torch.Tensor,
|
| 31 |
+
level_start_index: torch.Tensor,
|
| 32 |
+
sampling_loc: torch.Tensor,
|
| 33 |
+
attn_weight: torch.Tensor,
|
| 34 |
+
im2col_step: int,
|
| 35 |
+
) -> torch.Tensor:
|
| 36 |
+
return ops.ms_deform_attn_forward(
|
| 37 |
+
value,
|
| 38 |
+
spatial_shapes,
|
| 39 |
+
level_start_index,
|
| 40 |
+
sampling_loc,
|
| 41 |
+
attn_weight,
|
| 42 |
+
im2col_step,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
__all__ = ["layers", "ms_deform_attn_forward", "ms_deform_attn_backward"]
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/_ops.cpython-313.pyc
ADDED
|
Binary file (542 Bytes). View file
|
|
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/__pycache__/layers.cpython-313.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/_deformable_detr_320b408.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cc1595c80314a757e2d418bf2cf278ea2e1f8fadee0cd5440a753924ef87f0a
|
| 3 |
+
size 9103096
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _deformable_detr_320b408
|
| 3 |
+
ops = torch.ops._deformable_detr_320b408
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_deformable_detr_320b408::{op_name}"
|
build/torch29-cxx11-cu130-aarch64-linux/deformable_detr/layers.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union, Tuple
|
| 2 |
+
|
| 3 |
+
from torch import Tensor
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from ._ops import ops
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MultiScaleDeformableAttentionFunction(Function):
|
| 12 |
+
@staticmethod
|
| 13 |
+
def forward(
|
| 14 |
+
context,
|
| 15 |
+
value: Tensor,
|
| 16 |
+
value_spatial_shapes: Tensor,
|
| 17 |
+
value_level_start_index: Tensor,
|
| 18 |
+
sampling_locations: Tensor,
|
| 19 |
+
attention_weights: Tensor,
|
| 20 |
+
im2col_step: int,
|
| 21 |
+
):
|
| 22 |
+
context.im2col_step = im2col_step
|
| 23 |
+
output = ops.ms_deform_attn_forward(
|
| 24 |
+
value,
|
| 25 |
+
value_spatial_shapes,
|
| 26 |
+
value_level_start_index,
|
| 27 |
+
sampling_locations,
|
| 28 |
+
attention_weights,
|
| 29 |
+
context.im2col_step,
|
| 30 |
+
)
|
| 31 |
+
context.save_for_backward(
|
| 32 |
+
value,
|
| 33 |
+
value_spatial_shapes,
|
| 34 |
+
value_level_start_index,
|
| 35 |
+
sampling_locations,
|
| 36 |
+
attention_weights,
|
| 37 |
+
)
|
| 38 |
+
return output
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
@once_differentiable
|
| 42 |
+
def backward(context, grad_output):
|
| 43 |
+
(
|
| 44 |
+
value,
|
| 45 |
+
value_spatial_shapes,
|
| 46 |
+
value_level_start_index,
|
| 47 |
+
sampling_locations,
|
| 48 |
+
attention_weights,
|
| 49 |
+
) = context.saved_tensors
|
| 50 |
+
grad_value, grad_sampling_loc, grad_attn_weight = ops.ms_deform_attn_backward(
|
| 51 |
+
value,
|
| 52 |
+
value_spatial_shapes,
|
| 53 |
+
value_level_start_index,
|
| 54 |
+
sampling_locations,
|
| 55 |
+
attention_weights,
|
| 56 |
+
grad_output,
|
| 57 |
+
context.im2col_step,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class MultiScaleDeformableAttention(nn.Module):
|
| 64 |
+
def forward(
|
| 65 |
+
self,
|
| 66 |
+
value: Tensor,
|
| 67 |
+
value_spatial_shapes: Tensor,
|
| 68 |
+
value_spatial_shapes_list: List[Tuple],
|
| 69 |
+
level_start_index: Tensor,
|
| 70 |
+
sampling_locations: Tensor,
|
| 71 |
+
attention_weights: Tensor,
|
| 72 |
+
im2col_step: int,
|
| 73 |
+
):
|
| 74 |
+
return MultiScaleDeformableAttentionFunction.apply(
|
| 75 |
+
value,
|
| 76 |
+
value_spatial_shapes,
|
| 77 |
+
level_start_index,
|
| 78 |
+
sampling_locations,
|
| 79 |
+
attention_weights,
|
| 80 |
+
im2col_step,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
__all__ = ["MultiScaleDeformableAttention"]
|