Spaces:
Runtime error
Runtime error
update
Browse files- gradio_app.py → .ipynb_checkpoints/app-checkpoint.py +3 -3
- groundingdino/util/.ipynb_checkpoints/__init__-checkpoint.py +1 -0
- groundingdino/util/.ipynb_checkpoints/get_tokenlizer-checkpoint.py +30 -0
- groundingdino/util/.ipynb_checkpoints/utils-checkpoint.py +610 -0
- groundingdino/util/__init__.py +1 -0
- groundingdino/util/__pycache__/__init__.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/box_ops.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/get_tokenlizer.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/misc.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/slconfig.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/utils.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/visualizer.cpython-310.pyc +0 -0
- groundingdino/util/__pycache__/vl_utils.cpython-310.pyc +0 -0
- groundingdino/util/box_ops.py +140 -0
- groundingdino/util/get_tokenlizer.py +30 -0
- groundingdino/util/inference.py +259 -0
- groundingdino/util/logger.py +93 -0
- groundingdino/util/misc.py +717 -0
- groundingdino/util/slconfig.py +427 -0
- groundingdino/util/slio.py +177 -0
- groundingdino/util/time_counter.py +62 -0
- groundingdino/util/utils.py +610 -0
- groundingdino/util/visualizer.py +318 -0
- groundingdino/util/vl_utils.py +100 -0
gradio_app.py → .ipynb_checkpoints/app-checkpoint.py
RENAMED
|
@@ -34,9 +34,9 @@ from huggingface_hub import hf_hub_download
|
|
| 34 |
|
| 35 |
|
| 36 |
# Use this command for evaluate the Grounding DINO model
|
| 37 |
-
config_file = "
|
| 38 |
-
ckpt_repo_id = "
|
| 39 |
-
ckpt_filenmae = "
|
| 40 |
|
| 41 |
|
| 42 |
def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
# Use this command for evaluate the Grounding DINO model
|
| 37 |
+
config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
|
| 38 |
+
ckpt_repo_id = "ShilongLiu/GroundingDINO"
|
| 39 |
+
ckpt_filenmae = "groundingdino_swint_ogc.pth"
|
| 40 |
|
| 41 |
|
| 42 |
def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
|
groundingdino/util/.ipynb_checkpoints/__init__-checkpoint.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
groundingdino/util/.ipynb_checkpoints/get_tokenlizer-checkpoint.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def get_tokenlizer(text_encoder_type):
|
| 5 |
+
if not isinstance(text_encoder_type, str):
|
| 6 |
+
# print("text_encoder_type is not a str")
|
| 7 |
+
if hasattr(text_encoder_type, "text_encoder_type"):
|
| 8 |
+
text_encoder_type = text_encoder_type.text_encoder_type
|
| 9 |
+
elif text_encoder_type.get("text_encoder_type", False):
|
| 10 |
+
text_encoder_type = text_encoder_type.get("text_encoder_type")
|
| 11 |
+
elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):
|
| 12 |
+
pass
|
| 13 |
+
else:
|
| 14 |
+
raise ValueError(
|
| 15 |
+
"Unknown type of text_encoder_type: {}".format(type(text_encoder_type))
|
| 16 |
+
)
|
| 17 |
+
print("final text_encoder_type: {}".format(text_encoder_type))
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)
|
| 19 |
+
print("load tokenizer done.")
|
| 20 |
+
return tokenizer
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_pretrained_language_model(text_encoder_type):
|
| 24 |
+
if text_encoder_type == "bert-base-uncased" or (os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type)):
|
| 25 |
+
return BertModel.from_pretrained(text_encoder_type)
|
| 26 |
+
if text_encoder_type == "roberta-base":
|
| 27 |
+
return RobertaModel.from_pretrained(text_encoder_type)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type))
|
groundingdino/util/.ipynb_checkpoints/utils-checkpoint.py
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import warnings
|
| 4 |
+
from collections import OrderedDict
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from typing import Any, Dict, List
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from transformers import AutoTokenizer
|
| 11 |
+
|
| 12 |
+
from groundingdino.util.slconfig import SLConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def slprint(x, name="x"):
|
| 16 |
+
if isinstance(x, (torch.Tensor, np.ndarray)):
|
| 17 |
+
print(f"{name}.shape:", x.shape)
|
| 18 |
+
elif isinstance(x, (tuple, list)):
|
| 19 |
+
print("type x:", type(x))
|
| 20 |
+
for i in range(min(10, len(x))):
|
| 21 |
+
slprint(x[i], f"{name}[{i}]")
|
| 22 |
+
elif isinstance(x, dict):
|
| 23 |
+
for k, v in x.items():
|
| 24 |
+
slprint(v, f"{name}[{k}]")
|
| 25 |
+
else:
|
| 26 |
+
print(f"{name}.type:", type(x))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def clean_state_dict(state_dict):
|
| 30 |
+
new_state_dict = OrderedDict()
|
| 31 |
+
for k, v in state_dict.items():
|
| 32 |
+
if k[:7] == "module.":
|
| 33 |
+
k = k[7:] # remove `module.`
|
| 34 |
+
new_state_dict[k] = v
|
| 35 |
+
return new_state_dict
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def renorm(
|
| 39 |
+
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
| 40 |
+
) -> torch.FloatTensor:
|
| 41 |
+
# img: tensor(3,H,W) or tensor(B,3,H,W)
|
| 42 |
+
# return: same as img
|
| 43 |
+
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
|
| 44 |
+
if img.dim() == 3:
|
| 45 |
+
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
|
| 46 |
+
img.size(0),
|
| 47 |
+
str(img.size()),
|
| 48 |
+
)
|
| 49 |
+
img_perm = img.permute(1, 2, 0)
|
| 50 |
+
mean = torch.Tensor(mean)
|
| 51 |
+
std = torch.Tensor(std)
|
| 52 |
+
img_res = img_perm * std + mean
|
| 53 |
+
return img_res.permute(2, 0, 1)
|
| 54 |
+
else: # img.dim() == 4
|
| 55 |
+
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
|
| 56 |
+
img.size(1),
|
| 57 |
+
str(img.size()),
|
| 58 |
+
)
|
| 59 |
+
img_perm = img.permute(0, 2, 3, 1)
|
| 60 |
+
mean = torch.Tensor(mean)
|
| 61 |
+
std = torch.Tensor(std)
|
| 62 |
+
img_res = img_perm * std + mean
|
| 63 |
+
return img_res.permute(0, 3, 1, 2)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class CocoClassMapper:
|
| 67 |
+
def __init__(self) -> None:
|
| 68 |
+
self.category_map_str = {
|
| 69 |
+
"1": 1,
|
| 70 |
+
"2": 2,
|
| 71 |
+
"3": 3,
|
| 72 |
+
"4": 4,
|
| 73 |
+
"5": 5,
|
| 74 |
+
"6": 6,
|
| 75 |
+
"7": 7,
|
| 76 |
+
"8": 8,
|
| 77 |
+
"9": 9,
|
| 78 |
+
"10": 10,
|
| 79 |
+
"11": 11,
|
| 80 |
+
"13": 12,
|
| 81 |
+
"14": 13,
|
| 82 |
+
"15": 14,
|
| 83 |
+
"16": 15,
|
| 84 |
+
"17": 16,
|
| 85 |
+
"18": 17,
|
| 86 |
+
"19": 18,
|
| 87 |
+
"20": 19,
|
| 88 |
+
"21": 20,
|
| 89 |
+
"22": 21,
|
| 90 |
+
"23": 22,
|
| 91 |
+
"24": 23,
|
| 92 |
+
"25": 24,
|
| 93 |
+
"27": 25,
|
| 94 |
+
"28": 26,
|
| 95 |
+
"31": 27,
|
| 96 |
+
"32": 28,
|
| 97 |
+
"33": 29,
|
| 98 |
+
"34": 30,
|
| 99 |
+
"35": 31,
|
| 100 |
+
"36": 32,
|
| 101 |
+
"37": 33,
|
| 102 |
+
"38": 34,
|
| 103 |
+
"39": 35,
|
| 104 |
+
"40": 36,
|
| 105 |
+
"41": 37,
|
| 106 |
+
"42": 38,
|
| 107 |
+
"43": 39,
|
| 108 |
+
"44": 40,
|
| 109 |
+
"46": 41,
|
| 110 |
+
"47": 42,
|
| 111 |
+
"48": 43,
|
| 112 |
+
"49": 44,
|
| 113 |
+
"50": 45,
|
| 114 |
+
"51": 46,
|
| 115 |
+
"52": 47,
|
| 116 |
+
"53": 48,
|
| 117 |
+
"54": 49,
|
| 118 |
+
"55": 50,
|
| 119 |
+
"56": 51,
|
| 120 |
+
"57": 52,
|
| 121 |
+
"58": 53,
|
| 122 |
+
"59": 54,
|
| 123 |
+
"60": 55,
|
| 124 |
+
"61": 56,
|
| 125 |
+
"62": 57,
|
| 126 |
+
"63": 58,
|
| 127 |
+
"64": 59,
|
| 128 |
+
"65": 60,
|
| 129 |
+
"67": 61,
|
| 130 |
+
"70": 62,
|
| 131 |
+
"72": 63,
|
| 132 |
+
"73": 64,
|
| 133 |
+
"74": 65,
|
| 134 |
+
"75": 66,
|
| 135 |
+
"76": 67,
|
| 136 |
+
"77": 68,
|
| 137 |
+
"78": 69,
|
| 138 |
+
"79": 70,
|
| 139 |
+
"80": 71,
|
| 140 |
+
"81": 72,
|
| 141 |
+
"82": 73,
|
| 142 |
+
"84": 74,
|
| 143 |
+
"85": 75,
|
| 144 |
+
"86": 76,
|
| 145 |
+
"87": 77,
|
| 146 |
+
"88": 78,
|
| 147 |
+
"89": 79,
|
| 148 |
+
"90": 80,
|
| 149 |
+
}
|
| 150 |
+
self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
|
| 151 |
+
self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
|
| 152 |
+
|
| 153 |
+
def origin2compact(self, idx):
|
| 154 |
+
return self.origin2compact_mapper[int(idx)]
|
| 155 |
+
|
| 156 |
+
def compact2origin(self, idx):
|
| 157 |
+
return self.compact2origin_mapper[int(idx)]
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def to_device(item, device):
|
| 161 |
+
if isinstance(item, torch.Tensor):
|
| 162 |
+
return item.to(device)
|
| 163 |
+
elif isinstance(item, list):
|
| 164 |
+
return [to_device(i, device) for i in item]
|
| 165 |
+
elif isinstance(item, dict):
|
| 166 |
+
return {k: to_device(v, device) for k, v in item.items()}
|
| 167 |
+
else:
|
| 168 |
+
raise NotImplementedError(
|
| 169 |
+
"Call Shilong if you use other containers! type: {}".format(type(item))
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
#
|
| 174 |
+
def get_gaussian_mean(x, axis, other_axis, softmax=True):
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
x (float): Input images(BxCxHxW)
|
| 179 |
+
axis (int): The index for weighted mean
|
| 180 |
+
other_axis (int): The other index
|
| 181 |
+
|
| 182 |
+
Returns: weighted index for axis, BxC
|
| 183 |
+
|
| 184 |
+
"""
|
| 185 |
+
mat2line = torch.sum(x, axis=other_axis)
|
| 186 |
+
# mat2line = mat2line / mat2line.mean() * 10
|
| 187 |
+
if softmax:
|
| 188 |
+
u = torch.softmax(mat2line, axis=2)
|
| 189 |
+
else:
|
| 190 |
+
u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
|
| 191 |
+
size = x.shape[axis]
|
| 192 |
+
ind = torch.linspace(0, 1, size).to(x.device)
|
| 193 |
+
batch = x.shape[0]
|
| 194 |
+
channel = x.shape[1]
|
| 195 |
+
index = ind.repeat([batch, channel, 1])
|
| 196 |
+
mean_position = torch.sum(index * u, dim=2)
|
| 197 |
+
return mean_position
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def get_expected_points_from_map(hm, softmax=True):
|
| 201 |
+
"""get_gaussian_map_from_points
|
| 202 |
+
B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
|
| 203 |
+
softargmax function
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
hm (float): Input images(BxCxHxW)
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
weighted index for axis, BxCx2. float between 0 and 1.
|
| 210 |
+
|
| 211 |
+
"""
|
| 212 |
+
# hm = 10*hm
|
| 213 |
+
B, C, H, W = hm.shape
|
| 214 |
+
y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
|
| 215 |
+
x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
|
| 216 |
+
# return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
|
| 217 |
+
return torch.stack([x_mean, y_mean], dim=2)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
# Positional encoding (section 5.1)
|
| 221 |
+
# borrow from nerf
|
| 222 |
+
class Embedder:
|
| 223 |
+
def __init__(self, **kwargs):
|
| 224 |
+
self.kwargs = kwargs
|
| 225 |
+
self.create_embedding_fn()
|
| 226 |
+
|
| 227 |
+
def create_embedding_fn(self):
|
| 228 |
+
embed_fns = []
|
| 229 |
+
d = self.kwargs["input_dims"]
|
| 230 |
+
out_dim = 0
|
| 231 |
+
if self.kwargs["include_input"]:
|
| 232 |
+
embed_fns.append(lambda x: x)
|
| 233 |
+
out_dim += d
|
| 234 |
+
|
| 235 |
+
max_freq = self.kwargs["max_freq_log2"]
|
| 236 |
+
N_freqs = self.kwargs["num_freqs"]
|
| 237 |
+
|
| 238 |
+
if self.kwargs["log_sampling"]:
|
| 239 |
+
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
|
| 240 |
+
else:
|
| 241 |
+
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
|
| 242 |
+
|
| 243 |
+
for freq in freq_bands:
|
| 244 |
+
for p_fn in self.kwargs["periodic_fns"]:
|
| 245 |
+
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
|
| 246 |
+
out_dim += d
|
| 247 |
+
|
| 248 |
+
self.embed_fns = embed_fns
|
| 249 |
+
self.out_dim = out_dim
|
| 250 |
+
|
| 251 |
+
def embed(self, inputs):
|
| 252 |
+
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def get_embedder(multires, i=0):
|
| 256 |
+
import torch.nn as nn
|
| 257 |
+
|
| 258 |
+
if i == -1:
|
| 259 |
+
return nn.Identity(), 3
|
| 260 |
+
|
| 261 |
+
embed_kwargs = {
|
| 262 |
+
"include_input": True,
|
| 263 |
+
"input_dims": 3,
|
| 264 |
+
"max_freq_log2": multires - 1,
|
| 265 |
+
"num_freqs": multires,
|
| 266 |
+
"log_sampling": True,
|
| 267 |
+
"periodic_fns": [torch.sin, torch.cos],
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
embedder_obj = Embedder(**embed_kwargs)
|
| 271 |
+
embed = lambda x, eo=embedder_obj: eo.embed(x)
|
| 272 |
+
return embed, embedder_obj.out_dim
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class APOPMeter:
|
| 276 |
+
def __init__(self) -> None:
|
| 277 |
+
self.tp = 0
|
| 278 |
+
self.fp = 0
|
| 279 |
+
self.tn = 0
|
| 280 |
+
self.fn = 0
|
| 281 |
+
|
| 282 |
+
def update(self, pred, gt):
|
| 283 |
+
"""
|
| 284 |
+
Input:
|
| 285 |
+
pred, gt: Tensor()
|
| 286 |
+
"""
|
| 287 |
+
assert pred.shape == gt.shape
|
| 288 |
+
self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
|
| 289 |
+
self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
|
| 290 |
+
self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
|
| 291 |
+
self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
|
| 292 |
+
|
| 293 |
+
def update_cm(self, tp, fp, tn, fn):
|
| 294 |
+
self.tp += tp
|
| 295 |
+
self.fp += fp
|
| 296 |
+
self.tn += tn
|
| 297 |
+
self.tn += fn
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def inverse_sigmoid(x, eps=1e-5):
|
| 301 |
+
x = x.clamp(min=0, max=1)
|
| 302 |
+
x1 = x.clamp(min=eps)
|
| 303 |
+
x2 = (1 - x).clamp(min=eps)
|
| 304 |
+
return torch.log(x1 / x2)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_raw_dict(args):
|
| 308 |
+
"""
|
| 309 |
+
return the dicf contained in args.
|
| 310 |
+
|
| 311 |
+
e.g:
|
| 312 |
+
>>> with open(path, 'w') as f:
|
| 313 |
+
json.dump(get_raw_dict(args), f, indent=2)
|
| 314 |
+
"""
|
| 315 |
+
if isinstance(args, argparse.Namespace):
|
| 316 |
+
return vars(args)
|
| 317 |
+
elif isinstance(args, dict):
|
| 318 |
+
return args
|
| 319 |
+
elif isinstance(args, SLConfig):
|
| 320 |
+
return args._cfg_dict
|
| 321 |
+
else:
|
| 322 |
+
raise NotImplementedError("Unknown type {}".format(type(args)))
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def stat_tensors(tensor):
|
| 326 |
+
assert tensor.dim() == 1
|
| 327 |
+
tensor_sm = tensor.softmax(0)
|
| 328 |
+
entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
|
| 329 |
+
|
| 330 |
+
return {
|
| 331 |
+
"max": tensor.max(),
|
| 332 |
+
"min": tensor.min(),
|
| 333 |
+
"mean": tensor.mean(),
|
| 334 |
+
"var": tensor.var(),
|
| 335 |
+
"std": tensor.var() ** 0.5,
|
| 336 |
+
"entropy": entropy,
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class NiceRepr:
|
| 341 |
+
"""Inherit from this class and define ``__nice__`` to "nicely" print your
|
| 342 |
+
objects.
|
| 343 |
+
|
| 344 |
+
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
|
| 345 |
+
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
|
| 346 |
+
If the inheriting class has a ``__len__``, method then the default
|
| 347 |
+
``__nice__`` method will return its length.
|
| 348 |
+
|
| 349 |
+
Example:
|
| 350 |
+
>>> class Foo(NiceRepr):
|
| 351 |
+
... def __nice__(self):
|
| 352 |
+
... return 'info'
|
| 353 |
+
>>> foo = Foo()
|
| 354 |
+
>>> assert str(foo) == '<Foo(info)>'
|
| 355 |
+
>>> assert repr(foo).startswith('<Foo(info) at ')
|
| 356 |
+
|
| 357 |
+
Example:
|
| 358 |
+
>>> class Bar(NiceRepr):
|
| 359 |
+
... pass
|
| 360 |
+
>>> bar = Bar()
|
| 361 |
+
>>> import pytest
|
| 362 |
+
>>> with pytest.warns(None) as record:
|
| 363 |
+
>>> assert 'object at' in str(bar)
|
| 364 |
+
>>> assert 'object at' in repr(bar)
|
| 365 |
+
|
| 366 |
+
Example:
|
| 367 |
+
>>> class Baz(NiceRepr):
|
| 368 |
+
... def __len__(self):
|
| 369 |
+
... return 5
|
| 370 |
+
>>> baz = Baz()
|
| 371 |
+
>>> assert str(baz) == '<Baz(5)>'
|
| 372 |
+
"""
|
| 373 |
+
|
| 374 |
+
def __nice__(self):
|
| 375 |
+
"""str: a "nice" summary string describing this module"""
|
| 376 |
+
if hasattr(self, "__len__"):
|
| 377 |
+
# It is a common pattern for objects to use __len__ in __nice__
|
| 378 |
+
# As a convenience we define a default __nice__ for these objects
|
| 379 |
+
return str(len(self))
|
| 380 |
+
else:
|
| 381 |
+
# In all other cases force the subclass to overload __nice__
|
| 382 |
+
raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
|
| 383 |
+
|
| 384 |
+
def __repr__(self):
|
| 385 |
+
"""str: the string of the module"""
|
| 386 |
+
try:
|
| 387 |
+
nice = self.__nice__()
|
| 388 |
+
classname = self.__class__.__name__
|
| 389 |
+
return f"<{classname}({nice}) at {hex(id(self))}>"
|
| 390 |
+
except NotImplementedError as ex:
|
| 391 |
+
warnings.warn(str(ex), category=RuntimeWarning)
|
| 392 |
+
return object.__repr__(self)
|
| 393 |
+
|
| 394 |
+
def __str__(self):
|
| 395 |
+
"""str: the string of the module"""
|
| 396 |
+
try:
|
| 397 |
+
classname = self.__class__.__name__
|
| 398 |
+
nice = self.__nice__()
|
| 399 |
+
return f"<{classname}({nice})>"
|
| 400 |
+
except NotImplementedError as ex:
|
| 401 |
+
warnings.warn(str(ex), category=RuntimeWarning)
|
| 402 |
+
return object.__repr__(self)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def ensure_rng(rng=None):
|
| 406 |
+
"""Coerces input into a random number generator.
|
| 407 |
+
|
| 408 |
+
If the input is None, then a global random state is returned.
|
| 409 |
+
|
| 410 |
+
If the input is a numeric value, then that is used as a seed to construct a
|
| 411 |
+
random state. Otherwise the input is returned as-is.
|
| 412 |
+
|
| 413 |
+
Adapted from [1]_.
|
| 414 |
+
|
| 415 |
+
Args:
|
| 416 |
+
rng (int | numpy.random.RandomState | None):
|
| 417 |
+
if None, then defaults to the global rng. Otherwise this can be an
|
| 418 |
+
integer or a RandomState class
|
| 419 |
+
Returns:
|
| 420 |
+
(numpy.random.RandomState) : rng -
|
| 421 |
+
a numpy random number generator
|
| 422 |
+
|
| 423 |
+
References:
|
| 424 |
+
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
if rng is None:
|
| 428 |
+
rng = np.random.mtrand._rand
|
| 429 |
+
elif isinstance(rng, int):
|
| 430 |
+
rng = np.random.RandomState(rng)
|
| 431 |
+
else:
|
| 432 |
+
rng = rng
|
| 433 |
+
return rng
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def random_boxes(num=1, scale=1, rng=None):
|
| 437 |
+
"""Simple version of ``kwimage.Boxes.random``
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
|
| 441 |
+
|
| 442 |
+
References:
|
| 443 |
+
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
|
| 444 |
+
|
| 445 |
+
Example:
|
| 446 |
+
>>> num = 3
|
| 447 |
+
>>> scale = 512
|
| 448 |
+
>>> rng = 0
|
| 449 |
+
>>> boxes = random_boxes(num, scale, rng)
|
| 450 |
+
>>> print(boxes)
|
| 451 |
+
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
|
| 452 |
+
[216.9113, 330.6978, 224.0446, 456.5878],
|
| 453 |
+
[405.3632, 196.3221, 493.3953, 270.7942]])
|
| 454 |
+
"""
|
| 455 |
+
rng = ensure_rng(rng)
|
| 456 |
+
|
| 457 |
+
tlbr = rng.rand(num, 4).astype(np.float32)
|
| 458 |
+
|
| 459 |
+
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
|
| 460 |
+
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
|
| 461 |
+
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
|
| 462 |
+
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
|
| 463 |
+
|
| 464 |
+
tlbr[:, 0] = tl_x * scale
|
| 465 |
+
tlbr[:, 1] = tl_y * scale
|
| 466 |
+
tlbr[:, 2] = br_x * scale
|
| 467 |
+
tlbr[:, 3] = br_y * scale
|
| 468 |
+
|
| 469 |
+
boxes = torch.from_numpy(tlbr)
|
| 470 |
+
return boxes
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
class ModelEma(torch.nn.Module):
|
| 474 |
+
def __init__(self, model, decay=0.9997, device=None):
|
| 475 |
+
super(ModelEma, self).__init__()
|
| 476 |
+
# make a copy of the model for accumulating moving average of weights
|
| 477 |
+
self.module = deepcopy(model)
|
| 478 |
+
self.module.eval()
|
| 479 |
+
|
| 480 |
+
# import ipdb; ipdb.set_trace()
|
| 481 |
+
|
| 482 |
+
self.decay = decay
|
| 483 |
+
self.device = device # perform ema on different device from model if set
|
| 484 |
+
if self.device is not None:
|
| 485 |
+
self.module.to(device=device)
|
| 486 |
+
|
| 487 |
+
def _update(self, model, update_fn):
|
| 488 |
+
with torch.no_grad():
|
| 489 |
+
for ema_v, model_v in zip(
|
| 490 |
+
self.module.state_dict().values(), model.state_dict().values()
|
| 491 |
+
):
|
| 492 |
+
if self.device is not None:
|
| 493 |
+
model_v = model_v.to(device=self.device)
|
| 494 |
+
ema_v.copy_(update_fn(ema_v, model_v))
|
| 495 |
+
|
| 496 |
+
def update(self, model):
|
| 497 |
+
self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
|
| 498 |
+
|
| 499 |
+
def set(self, model):
|
| 500 |
+
self._update(model, update_fn=lambda e, m: m)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class BestMetricSingle:
|
| 504 |
+
def __init__(self, init_res=0.0, better="large") -> None:
|
| 505 |
+
self.init_res = init_res
|
| 506 |
+
self.best_res = init_res
|
| 507 |
+
self.best_ep = -1
|
| 508 |
+
|
| 509 |
+
self.better = better
|
| 510 |
+
assert better in ["large", "small"]
|
| 511 |
+
|
| 512 |
+
def isbetter(self, new_res, old_res):
|
| 513 |
+
if self.better == "large":
|
| 514 |
+
return new_res > old_res
|
| 515 |
+
if self.better == "small":
|
| 516 |
+
return new_res < old_res
|
| 517 |
+
|
| 518 |
+
def update(self, new_res, ep):
|
| 519 |
+
if self.isbetter(new_res, self.best_res):
|
| 520 |
+
self.best_res = new_res
|
| 521 |
+
self.best_ep = ep
|
| 522 |
+
return True
|
| 523 |
+
return False
|
| 524 |
+
|
| 525 |
+
def __str__(self) -> str:
|
| 526 |
+
return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
|
| 527 |
+
|
| 528 |
+
def __repr__(self) -> str:
|
| 529 |
+
return self.__str__()
|
| 530 |
+
|
| 531 |
+
def summary(self) -> dict:
|
| 532 |
+
return {
|
| 533 |
+
"best_res": self.best_res,
|
| 534 |
+
"best_ep": self.best_ep,
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class BestMetricHolder:
|
| 539 |
+
def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
|
| 540 |
+
self.best_all = BestMetricSingle(init_res, better)
|
| 541 |
+
self.use_ema = use_ema
|
| 542 |
+
if use_ema:
|
| 543 |
+
self.best_ema = BestMetricSingle(init_res, better)
|
| 544 |
+
self.best_regular = BestMetricSingle(init_res, better)
|
| 545 |
+
|
| 546 |
+
def update(self, new_res, epoch, is_ema=False):
|
| 547 |
+
"""
|
| 548 |
+
return if the results is the best.
|
| 549 |
+
"""
|
| 550 |
+
if not self.use_ema:
|
| 551 |
+
return self.best_all.update(new_res, epoch)
|
| 552 |
+
else:
|
| 553 |
+
if is_ema:
|
| 554 |
+
self.best_ema.update(new_res, epoch)
|
| 555 |
+
return self.best_all.update(new_res, epoch)
|
| 556 |
+
else:
|
| 557 |
+
self.best_regular.update(new_res, epoch)
|
| 558 |
+
return self.best_all.update(new_res, epoch)
|
| 559 |
+
|
| 560 |
+
def summary(self):
|
| 561 |
+
if not self.use_ema:
|
| 562 |
+
return self.best_all.summary()
|
| 563 |
+
|
| 564 |
+
res = {}
|
| 565 |
+
res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
|
| 566 |
+
res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
|
| 567 |
+
res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
|
| 568 |
+
return res
|
| 569 |
+
|
| 570 |
+
def __repr__(self) -> str:
|
| 571 |
+
return json.dumps(self.summary(), indent=2)
|
| 572 |
+
|
| 573 |
+
def __str__(self) -> str:
|
| 574 |
+
return self.__repr__()
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def targets_to(targets: List[Dict[str, Any]], device):
|
| 578 |
+
"""Moves the target dicts to the given device."""
|
| 579 |
+
excluded_keys = [
|
| 580 |
+
"questionId",
|
| 581 |
+
"tokens_positive",
|
| 582 |
+
"strings_positive",
|
| 583 |
+
"tokens",
|
| 584 |
+
"dataset_name",
|
| 585 |
+
"sentence_id",
|
| 586 |
+
"original_img_id",
|
| 587 |
+
"nb_eval",
|
| 588 |
+
"task_id",
|
| 589 |
+
"original_id",
|
| 590 |
+
"token_span",
|
| 591 |
+
"caption",
|
| 592 |
+
"dataset_type",
|
| 593 |
+
]
|
| 594 |
+
return [
|
| 595 |
+
{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets
|
| 596 |
+
]
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def get_phrases_from_posmap(
|
| 600 |
+
posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255
|
| 601 |
+
):
|
| 602 |
+
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
|
| 603 |
+
if posmap.dim() == 1:
|
| 604 |
+
posmap[0: left_idx + 1] = False
|
| 605 |
+
posmap[right_idx:] = False
|
| 606 |
+
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
|
| 607 |
+
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
|
| 608 |
+
return tokenizer.decode(token_ids)
|
| 609 |
+
else:
|
| 610 |
+
raise NotImplementedError("posmap must be 1-dim")
|
groundingdino/util/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
groundingdino/util/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (193 Bytes). View file
|
|
|
groundingdino/util/__pycache__/box_ops.cpython-310.pyc
ADDED
|
Binary file (3.86 kB). View file
|
|
|
groundingdino/util/__pycache__/get_tokenlizer.cpython-310.pyc
ADDED
|
Binary file (1.17 kB). View file
|
|
|
groundingdino/util/__pycache__/misc.cpython-310.pyc
ADDED
|
Binary file (20.3 kB). View file
|
|
|
groundingdino/util/__pycache__/slconfig.cpython-310.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
groundingdino/util/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (19.9 kB). View file
|
|
|
groundingdino/util/__pycache__/visualizer.cpython-310.pyc
ADDED
|
Binary file (7.84 kB). View file
|
|
|
groundingdino/util/__pycache__/vl_utils.cpython-310.pyc
ADDED
|
Binary file (3.12 kB). View file
|
|
|
groundingdino/util/box_ops.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Utilities for bounding box manipulation and GIoU.
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
from torchvision.ops.boxes import box_area
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def box_cxcywh_to_xyxy(x):
|
| 10 |
+
x_c, y_c, w, h = x.unbind(-1)
|
| 11 |
+
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
| 12 |
+
return torch.stack(b, dim=-1)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def box_xyxy_to_cxcywh(x):
|
| 16 |
+
x0, y0, x1, y1 = x.unbind(-1)
|
| 17 |
+
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
|
| 18 |
+
return torch.stack(b, dim=-1)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# modified from torchvision to also return the union
|
| 22 |
+
def box_iou(boxes1, boxes2):
|
| 23 |
+
area1 = box_area(boxes1)
|
| 24 |
+
area2 = box_area(boxes2)
|
| 25 |
+
|
| 26 |
+
# import ipdb; ipdb.set_trace()
|
| 27 |
+
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
|
| 28 |
+
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
|
| 29 |
+
|
| 30 |
+
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
| 31 |
+
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
|
| 32 |
+
|
| 33 |
+
union = area1[:, None] + area2 - inter
|
| 34 |
+
|
| 35 |
+
iou = inter / (union + 1e-6)
|
| 36 |
+
return iou, union
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def generalized_box_iou(boxes1, boxes2):
|
| 40 |
+
"""
|
| 41 |
+
Generalized IoU from https://giou.stanford.edu/
|
| 42 |
+
|
| 43 |
+
The boxes should be in [x0, y0, x1, y1] format
|
| 44 |
+
|
| 45 |
+
Returns a [N, M] pairwise matrix, where N = len(boxes1)
|
| 46 |
+
and M = len(boxes2)
|
| 47 |
+
"""
|
| 48 |
+
# degenerate boxes gives inf / nan results
|
| 49 |
+
# so do an early check
|
| 50 |
+
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
|
| 51 |
+
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
|
| 52 |
+
# except:
|
| 53 |
+
# import ipdb; ipdb.set_trace()
|
| 54 |
+
iou, union = box_iou(boxes1, boxes2)
|
| 55 |
+
|
| 56 |
+
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
|
| 57 |
+
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
|
| 58 |
+
|
| 59 |
+
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
| 60 |
+
area = wh[:, :, 0] * wh[:, :, 1]
|
| 61 |
+
|
| 62 |
+
return iou - (area - union) / (area + 1e-6)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# modified from torchvision to also return the union
|
| 66 |
+
def box_iou_pairwise(boxes1, boxes2):
|
| 67 |
+
area1 = box_area(boxes1)
|
| 68 |
+
area2 = box_area(boxes2)
|
| 69 |
+
|
| 70 |
+
lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2]
|
| 71 |
+
rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2]
|
| 72 |
+
|
| 73 |
+
wh = (rb - lt).clamp(min=0) # [N,2]
|
| 74 |
+
inter = wh[:, 0] * wh[:, 1] # [N]
|
| 75 |
+
|
| 76 |
+
union = area1 + area2 - inter
|
| 77 |
+
|
| 78 |
+
iou = inter / union
|
| 79 |
+
return iou, union
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def generalized_box_iou_pairwise(boxes1, boxes2):
|
| 83 |
+
"""
|
| 84 |
+
Generalized IoU from https://giou.stanford.edu/
|
| 85 |
+
|
| 86 |
+
Input:
|
| 87 |
+
- boxes1, boxes2: N,4
|
| 88 |
+
Output:
|
| 89 |
+
- giou: N, 4
|
| 90 |
+
"""
|
| 91 |
+
# degenerate boxes gives inf / nan results
|
| 92 |
+
# so do an early check
|
| 93 |
+
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
|
| 94 |
+
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
|
| 95 |
+
assert boxes1.shape == boxes2.shape
|
| 96 |
+
iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4
|
| 97 |
+
|
| 98 |
+
lt = torch.min(boxes1[:, :2], boxes2[:, :2])
|
| 99 |
+
rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])
|
| 100 |
+
|
| 101 |
+
wh = (rb - lt).clamp(min=0) # [N,2]
|
| 102 |
+
area = wh[:, 0] * wh[:, 1]
|
| 103 |
+
|
| 104 |
+
return iou - (area - union) / area
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def masks_to_boxes(masks):
|
| 108 |
+
"""Compute the bounding boxes around the provided masks
|
| 109 |
+
|
| 110 |
+
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
|
| 111 |
+
|
| 112 |
+
Returns a [N, 4] tensors, with the boxes in xyxy format
|
| 113 |
+
"""
|
| 114 |
+
if masks.numel() == 0:
|
| 115 |
+
return torch.zeros((0, 4), device=masks.device)
|
| 116 |
+
|
| 117 |
+
h, w = masks.shape[-2:]
|
| 118 |
+
|
| 119 |
+
y = torch.arange(0, h, dtype=torch.float)
|
| 120 |
+
x = torch.arange(0, w, dtype=torch.float)
|
| 121 |
+
y, x = torch.meshgrid(y, x)
|
| 122 |
+
|
| 123 |
+
x_mask = masks * x.unsqueeze(0)
|
| 124 |
+
x_max = x_mask.flatten(1).max(-1)[0]
|
| 125 |
+
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
| 126 |
+
|
| 127 |
+
y_mask = masks * y.unsqueeze(0)
|
| 128 |
+
y_max = y_mask.flatten(1).max(-1)[0]
|
| 129 |
+
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
| 130 |
+
|
| 131 |
+
return torch.stack([x_min, y_min, x_max, y_max], 1)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__":
|
| 135 |
+
x = torch.rand(5, 4)
|
| 136 |
+
y = torch.rand(3, 4)
|
| 137 |
+
iou, union = box_iou(x, y)
|
| 138 |
+
import ipdb
|
| 139 |
+
|
| 140 |
+
ipdb.set_trace()
|
groundingdino/util/get_tokenlizer.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def get_tokenlizer(text_encoder_type):
|
| 5 |
+
if not isinstance(text_encoder_type, str):
|
| 6 |
+
# print("text_encoder_type is not a str")
|
| 7 |
+
if hasattr(text_encoder_type, "text_encoder_type"):
|
| 8 |
+
text_encoder_type = text_encoder_type.text_encoder_type
|
| 9 |
+
elif text_encoder_type.get("text_encoder_type", False):
|
| 10 |
+
text_encoder_type = text_encoder_type.get("text_encoder_type")
|
| 11 |
+
elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):
|
| 12 |
+
pass
|
| 13 |
+
else:
|
| 14 |
+
raise ValueError(
|
| 15 |
+
"Unknown type of text_encoder_type: {}".format(type(text_encoder_type))
|
| 16 |
+
)
|
| 17 |
+
print("final text_encoder_type: {}".format(text_encoder_type))
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)
|
| 19 |
+
print("load tokenizer done.")
|
| 20 |
+
return tokenizer
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_pretrained_language_model(text_encoder_type):
|
| 24 |
+
if text_encoder_type == "bert-base-uncased" or (os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type)):
|
| 25 |
+
return BertModel.from_pretrained(text_encoder_type)
|
| 26 |
+
if text_encoder_type == "roberta-base":
|
| 27 |
+
return RobertaModel.from_pretrained(text_encoder_type)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type))
|
groundingdino/util/inference.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple, List
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import supervision as sv
|
| 6 |
+
import torch
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from torchvision.ops import box_convert
|
| 9 |
+
import bisect
|
| 10 |
+
|
| 11 |
+
import groundingdino.datasets.transforms as T
|
| 12 |
+
from groundingdino.models import build_model
|
| 13 |
+
from groundingdino.util.misc import clean_state_dict
|
| 14 |
+
from groundingdino.util.slconfig import SLConfig
|
| 15 |
+
from groundingdino.util.utils import get_phrases_from_posmap
|
| 16 |
+
|
| 17 |
+
# ----------------------------------------------------------------------------------------------------------------------
|
| 18 |
+
# OLD API
|
| 19 |
+
# ----------------------------------------------------------------------------------------------------------------------
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def preprocess_caption(caption: str) -> str:
|
| 23 |
+
result = caption.lower().strip()
|
| 24 |
+
if result.endswith("."):
|
| 25 |
+
return result
|
| 26 |
+
return result + "."
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
|
| 30 |
+
args = SLConfig.fromfile(model_config_path)
|
| 31 |
+
args.device = device
|
| 32 |
+
model = build_model(args)
|
| 33 |
+
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
|
| 34 |
+
model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
|
| 35 |
+
model.eval()
|
| 36 |
+
return model
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
|
| 40 |
+
transform = T.Compose(
|
| 41 |
+
[
|
| 42 |
+
T.RandomResize([800], max_size=1333),
|
| 43 |
+
T.ToTensor(),
|
| 44 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 45 |
+
]
|
| 46 |
+
)
|
| 47 |
+
image_source = Image.open(image_path).convert("RGB")
|
| 48 |
+
image = np.asarray(image_source)
|
| 49 |
+
image_transformed, _ = transform(image_source, None)
|
| 50 |
+
return image, image_transformed
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def predict(
|
| 54 |
+
model,
|
| 55 |
+
image: torch.Tensor,
|
| 56 |
+
caption: str,
|
| 57 |
+
box_threshold: float,
|
| 58 |
+
text_threshold: float,
|
| 59 |
+
device: str = "cuda",
|
| 60 |
+
remove_combined: bool = False
|
| 61 |
+
) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
|
| 62 |
+
caption = preprocess_caption(caption=caption)
|
| 63 |
+
|
| 64 |
+
model = model.to(device)
|
| 65 |
+
image = image.to(device)
|
| 66 |
+
|
| 67 |
+
with torch.no_grad():
|
| 68 |
+
outputs = model(image[None], captions=[caption])
|
| 69 |
+
|
| 70 |
+
prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
|
| 71 |
+
prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
|
| 72 |
+
|
| 73 |
+
mask = prediction_logits.max(dim=1)[0] > box_threshold
|
| 74 |
+
logits = prediction_logits[mask] # logits.shape = (n, 256)
|
| 75 |
+
boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
|
| 76 |
+
|
| 77 |
+
tokenizer = model.tokenizer
|
| 78 |
+
tokenized = tokenizer(caption)
|
| 79 |
+
|
| 80 |
+
if remove_combined:
|
| 81 |
+
sep_idx = [i for i in range(len(tokenized['input_ids'])) if tokenized['input_ids'][i] in [101, 102, 1012]]
|
| 82 |
+
|
| 83 |
+
phrases = []
|
| 84 |
+
for logit in logits:
|
| 85 |
+
max_idx = logit.argmax()
|
| 86 |
+
insert_idx = bisect.bisect_left(sep_idx, max_idx)
|
| 87 |
+
right_idx = sep_idx[insert_idx]
|
| 88 |
+
left_idx = sep_idx[insert_idx - 1]
|
| 89 |
+
phrases.append(get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer, left_idx, right_idx).replace('.', ''))
|
| 90 |
+
else:
|
| 91 |
+
phrases = [
|
| 92 |
+
get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace('.', '')
|
| 93 |
+
for logit
|
| 94 |
+
in logits
|
| 95 |
+
]
|
| 96 |
+
|
| 97 |
+
return boxes, logits.max(dim=1)[0], phrases
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str]) -> np.ndarray:
|
| 101 |
+
h, w, _ = image_source.shape
|
| 102 |
+
boxes = boxes * torch.Tensor([w, h, w, h])
|
| 103 |
+
xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
|
| 104 |
+
detections = sv.Detections(xyxy=xyxy)
|
| 105 |
+
|
| 106 |
+
labels = [
|
| 107 |
+
f"{phrase} {logit:.2f}"
|
| 108 |
+
for phrase, logit
|
| 109 |
+
in zip(phrases, logits)
|
| 110 |
+
]
|
| 111 |
+
|
| 112 |
+
box_annotator = sv.BoxAnnotator()
|
| 113 |
+
annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR)
|
| 114 |
+
annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
|
| 115 |
+
return annotated_frame
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# ----------------------------------------------------------------------------------------------------------------------
|
| 119 |
+
# NEW API
|
| 120 |
+
# ----------------------------------------------------------------------------------------------------------------------
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class Model:
|
| 124 |
+
|
| 125 |
+
def __init__(
|
| 126 |
+
self,
|
| 127 |
+
model_config_path: str,
|
| 128 |
+
model_checkpoint_path: str,
|
| 129 |
+
device: str = "cuda"
|
| 130 |
+
):
|
| 131 |
+
self.model = load_model(
|
| 132 |
+
model_config_path=model_config_path,
|
| 133 |
+
model_checkpoint_path=model_checkpoint_path,
|
| 134 |
+
device=device
|
| 135 |
+
).to(device)
|
| 136 |
+
self.device = device
|
| 137 |
+
|
| 138 |
+
def predict_with_caption(
|
| 139 |
+
self,
|
| 140 |
+
image: np.ndarray,
|
| 141 |
+
caption: str,
|
| 142 |
+
box_threshold: float = 0.35,
|
| 143 |
+
text_threshold: float = 0.25
|
| 144 |
+
) -> Tuple[sv.Detections, List[str]]:
|
| 145 |
+
"""
|
| 146 |
+
import cv2
|
| 147 |
+
|
| 148 |
+
image = cv2.imread(IMAGE_PATH)
|
| 149 |
+
|
| 150 |
+
model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
|
| 151 |
+
detections, labels = model.predict_with_caption(
|
| 152 |
+
image=image,
|
| 153 |
+
caption=caption,
|
| 154 |
+
box_threshold=BOX_THRESHOLD,
|
| 155 |
+
text_threshold=TEXT_THRESHOLD
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
import supervision as sv
|
| 159 |
+
|
| 160 |
+
box_annotator = sv.BoxAnnotator()
|
| 161 |
+
annotated_image = box_annotator.annotate(scene=image, detections=detections, labels=labels)
|
| 162 |
+
"""
|
| 163 |
+
processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
|
| 164 |
+
boxes, logits, phrases = predict(
|
| 165 |
+
model=self.model,
|
| 166 |
+
image=processed_image,
|
| 167 |
+
caption=caption,
|
| 168 |
+
box_threshold=box_threshold,
|
| 169 |
+
text_threshold=text_threshold,
|
| 170 |
+
device=self.device)
|
| 171 |
+
source_h, source_w, _ = image.shape
|
| 172 |
+
detections = Model.post_process_result(
|
| 173 |
+
source_h=source_h,
|
| 174 |
+
source_w=source_w,
|
| 175 |
+
boxes=boxes,
|
| 176 |
+
logits=logits)
|
| 177 |
+
return detections, phrases
|
| 178 |
+
|
| 179 |
+
def predict_with_classes(
|
| 180 |
+
self,
|
| 181 |
+
image: np.ndarray,
|
| 182 |
+
classes: List[str],
|
| 183 |
+
box_threshold: float,
|
| 184 |
+
text_threshold: float
|
| 185 |
+
) -> sv.Detections:
|
| 186 |
+
"""
|
| 187 |
+
import cv2
|
| 188 |
+
|
| 189 |
+
image = cv2.imread(IMAGE_PATH)
|
| 190 |
+
|
| 191 |
+
model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
|
| 192 |
+
detections = model.predict_with_classes(
|
| 193 |
+
image=image,
|
| 194 |
+
classes=CLASSES,
|
| 195 |
+
box_threshold=BOX_THRESHOLD,
|
| 196 |
+
text_threshold=TEXT_THRESHOLD
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
import supervision as sv
|
| 201 |
+
|
| 202 |
+
box_annotator = sv.BoxAnnotator()
|
| 203 |
+
annotated_image = box_annotator.annotate(scene=image, detections=detections)
|
| 204 |
+
"""
|
| 205 |
+
caption = ". ".join(classes)
|
| 206 |
+
processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
|
| 207 |
+
boxes, logits, phrases = predict(
|
| 208 |
+
model=self.model,
|
| 209 |
+
image=processed_image,
|
| 210 |
+
caption=caption,
|
| 211 |
+
box_threshold=box_threshold,
|
| 212 |
+
text_threshold=text_threshold,
|
| 213 |
+
device=self.device)
|
| 214 |
+
source_h, source_w, _ = image.shape
|
| 215 |
+
detections = Model.post_process_result(
|
| 216 |
+
source_h=source_h,
|
| 217 |
+
source_w=source_w,
|
| 218 |
+
boxes=boxes,
|
| 219 |
+
logits=logits)
|
| 220 |
+
class_id = Model.phrases2classes(phrases=phrases, classes=classes)
|
| 221 |
+
detections.class_id = class_id
|
| 222 |
+
return detections
|
| 223 |
+
|
| 224 |
+
@staticmethod
|
| 225 |
+
def preprocess_image(image_bgr: np.ndarray) -> torch.Tensor:
|
| 226 |
+
transform = T.Compose(
|
| 227 |
+
[
|
| 228 |
+
T.RandomResize([800], max_size=1333),
|
| 229 |
+
T.ToTensor(),
|
| 230 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
| 231 |
+
]
|
| 232 |
+
)
|
| 233 |
+
image_pillow = Image.fromarray(cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB))
|
| 234 |
+
image_transformed, _ = transform(image_pillow, None)
|
| 235 |
+
return image_transformed
|
| 236 |
+
|
| 237 |
+
@staticmethod
|
| 238 |
+
def post_process_result(
|
| 239 |
+
source_h: int,
|
| 240 |
+
source_w: int,
|
| 241 |
+
boxes: torch.Tensor,
|
| 242 |
+
logits: torch.Tensor
|
| 243 |
+
) -> sv.Detections:
|
| 244 |
+
boxes = boxes * torch.Tensor([source_w, source_h, source_w, source_h])
|
| 245 |
+
xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
|
| 246 |
+
confidence = logits.numpy()
|
| 247 |
+
return sv.Detections(xyxy=xyxy, confidence=confidence)
|
| 248 |
+
|
| 249 |
+
@staticmethod
|
| 250 |
+
def phrases2classes(phrases: List[str], classes: List[str]) -> np.ndarray:
|
| 251 |
+
class_ids = []
|
| 252 |
+
for phrase in phrases:
|
| 253 |
+
for class_ in classes:
|
| 254 |
+
if class_ in phrase:
|
| 255 |
+
class_ids.append(classes.index(class_))
|
| 256 |
+
break
|
| 257 |
+
else:
|
| 258 |
+
class_ids.append(None)
|
| 259 |
+
return np.array(class_ids)
|
groundingdino/util/logger.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
import functools
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
from termcolor import colored
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class _ColorfulFormatter(logging.Formatter):
|
| 11 |
+
def __init__(self, *args, **kwargs):
|
| 12 |
+
self._root_name = kwargs.pop("root_name") + "."
|
| 13 |
+
self._abbrev_name = kwargs.pop("abbrev_name", "")
|
| 14 |
+
if len(self._abbrev_name):
|
| 15 |
+
self._abbrev_name = self._abbrev_name + "."
|
| 16 |
+
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
|
| 17 |
+
|
| 18 |
+
def formatMessage(self, record):
|
| 19 |
+
record.name = record.name.replace(self._root_name, self._abbrev_name)
|
| 20 |
+
log = super(_ColorfulFormatter, self).formatMessage(record)
|
| 21 |
+
if record.levelno == logging.WARNING:
|
| 22 |
+
prefix = colored("WARNING", "red", attrs=["blink"])
|
| 23 |
+
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
|
| 24 |
+
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
|
| 25 |
+
else:
|
| 26 |
+
return log
|
| 27 |
+
return prefix + " " + log
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# so that calling setup_logger multiple times won't add many handlers
|
| 31 |
+
@functools.lru_cache()
|
| 32 |
+
def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
|
| 33 |
+
"""
|
| 34 |
+
Initialize the detectron2 logger and set its verbosity level to "INFO".
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
output (str): a file name or a directory to save log. If None, will not save log file.
|
| 38 |
+
If ends with ".txt" or ".log", assumed to be a file name.
|
| 39 |
+
Otherwise, logs will be saved to `output/log.txt`.
|
| 40 |
+
name (str): the root module name of this logger
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
logging.Logger: a logger
|
| 44 |
+
"""
|
| 45 |
+
logger = logging.getLogger(name)
|
| 46 |
+
logger.setLevel(logging.DEBUG)
|
| 47 |
+
logger.propagate = False
|
| 48 |
+
|
| 49 |
+
if abbrev_name is None:
|
| 50 |
+
abbrev_name = name
|
| 51 |
+
|
| 52 |
+
plain_formatter = logging.Formatter(
|
| 53 |
+
"[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
|
| 54 |
+
)
|
| 55 |
+
# stdout logging: master only
|
| 56 |
+
if distributed_rank == 0:
|
| 57 |
+
ch = logging.StreamHandler(stream=sys.stdout)
|
| 58 |
+
ch.setLevel(logging.DEBUG)
|
| 59 |
+
if color:
|
| 60 |
+
formatter = _ColorfulFormatter(
|
| 61 |
+
colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
|
| 62 |
+
datefmt="%m/%d %H:%M:%S",
|
| 63 |
+
root_name=name,
|
| 64 |
+
abbrev_name=str(abbrev_name),
|
| 65 |
+
)
|
| 66 |
+
else:
|
| 67 |
+
formatter = plain_formatter
|
| 68 |
+
ch.setFormatter(formatter)
|
| 69 |
+
logger.addHandler(ch)
|
| 70 |
+
|
| 71 |
+
# file logging: all workers
|
| 72 |
+
if output is not None:
|
| 73 |
+
if output.endswith(".txt") or output.endswith(".log"):
|
| 74 |
+
filename = output
|
| 75 |
+
else:
|
| 76 |
+
filename = os.path.join(output, "log.txt")
|
| 77 |
+
if distributed_rank > 0:
|
| 78 |
+
filename = filename + f".rank{distributed_rank}"
|
| 79 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
| 80 |
+
|
| 81 |
+
fh = logging.StreamHandler(_cached_log_stream(filename))
|
| 82 |
+
fh.setLevel(logging.DEBUG)
|
| 83 |
+
fh.setFormatter(plain_formatter)
|
| 84 |
+
logger.addHandler(fh)
|
| 85 |
+
|
| 86 |
+
return logger
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# cache the opened file object, so that different calls to `setup_logger`
|
| 90 |
+
# with the same file name can safely write to the same file.
|
| 91 |
+
@functools.lru_cache(maxsize=None)
|
| 92 |
+
def _cached_log_stream(filename):
|
| 93 |
+
return open(filename, "a")
|
groundingdino/util/misc.py
ADDED
|
@@ -0,0 +1,717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Misc functions, including distributed helpers.
|
| 4 |
+
|
| 5 |
+
Mostly copy-paste from torchvision references.
|
| 6 |
+
"""
|
| 7 |
+
import colorsys
|
| 8 |
+
import datetime
|
| 9 |
+
import functools
|
| 10 |
+
import io
|
| 11 |
+
import json
|
| 12 |
+
import os
|
| 13 |
+
import pickle
|
| 14 |
+
import subprocess
|
| 15 |
+
import time
|
| 16 |
+
from collections import OrderedDict, defaultdict, deque
|
| 17 |
+
from typing import List, Optional
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
import torch.distributed as dist
|
| 22 |
+
|
| 23 |
+
# needed due to empty tensor bug in pytorch and torchvision 0.5
|
| 24 |
+
import torchvision
|
| 25 |
+
from torch import Tensor
|
| 26 |
+
|
| 27 |
+
__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7
|
| 28 |
+
if __torchvision_need_compat_flag:
|
| 29 |
+
from torchvision.ops import _new_empty_tensor
|
| 30 |
+
from torchvision.ops.misc import _output_size
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class SmoothedValue(object):
|
| 34 |
+
"""Track a series of values and provide access to smoothed values over a
|
| 35 |
+
window or the global series average.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, window_size=20, fmt=None):
|
| 39 |
+
if fmt is None:
|
| 40 |
+
fmt = "{median:.4f} ({global_avg:.4f})"
|
| 41 |
+
self.deque = deque(maxlen=window_size)
|
| 42 |
+
self.total = 0.0
|
| 43 |
+
self.count = 0
|
| 44 |
+
self.fmt = fmt
|
| 45 |
+
|
| 46 |
+
def update(self, value, n=1):
|
| 47 |
+
self.deque.append(value)
|
| 48 |
+
self.count += n
|
| 49 |
+
self.total += value * n
|
| 50 |
+
|
| 51 |
+
def synchronize_between_processes(self):
|
| 52 |
+
"""
|
| 53 |
+
Warning: does not synchronize the deque!
|
| 54 |
+
"""
|
| 55 |
+
if not is_dist_avail_and_initialized():
|
| 56 |
+
return
|
| 57 |
+
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
|
| 58 |
+
dist.barrier()
|
| 59 |
+
dist.all_reduce(t)
|
| 60 |
+
t = t.tolist()
|
| 61 |
+
self.count = int(t[0])
|
| 62 |
+
self.total = t[1]
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def median(self):
|
| 66 |
+
d = torch.tensor(list(self.deque))
|
| 67 |
+
if d.shape[0] == 0:
|
| 68 |
+
return 0
|
| 69 |
+
return d.median().item()
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def avg(self):
|
| 73 |
+
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
| 74 |
+
return d.mean().item()
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def global_avg(self):
|
| 78 |
+
if os.environ.get("SHILONG_AMP", None) == "1":
|
| 79 |
+
eps = 1e-4
|
| 80 |
+
else:
|
| 81 |
+
eps = 1e-6
|
| 82 |
+
return self.total / (self.count + eps)
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def max(self):
|
| 86 |
+
return max(self.deque)
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def value(self):
|
| 90 |
+
return self.deque[-1]
|
| 91 |
+
|
| 92 |
+
def __str__(self):
|
| 93 |
+
return self.fmt.format(
|
| 94 |
+
median=self.median,
|
| 95 |
+
avg=self.avg,
|
| 96 |
+
global_avg=self.global_avg,
|
| 97 |
+
max=self.max,
|
| 98 |
+
value=self.value,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@functools.lru_cache()
|
| 103 |
+
def _get_global_gloo_group():
|
| 104 |
+
"""
|
| 105 |
+
Return a process group based on gloo backend, containing all the ranks
|
| 106 |
+
The result is cached.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
if dist.get_backend() == "nccl":
|
| 110 |
+
return dist.new_group(backend="gloo")
|
| 111 |
+
|
| 112 |
+
return dist.group.WORLD
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def all_gather_cpu(data):
|
| 116 |
+
"""
|
| 117 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
| 118 |
+
Args:
|
| 119 |
+
data: any picklable object
|
| 120 |
+
Returns:
|
| 121 |
+
list[data]: list of data gathered from each rank
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
world_size = get_world_size()
|
| 125 |
+
if world_size == 1:
|
| 126 |
+
return [data]
|
| 127 |
+
|
| 128 |
+
cpu_group = _get_global_gloo_group()
|
| 129 |
+
|
| 130 |
+
buffer = io.BytesIO()
|
| 131 |
+
torch.save(data, buffer)
|
| 132 |
+
data_view = buffer.getbuffer()
|
| 133 |
+
device = "cuda" if cpu_group is None else "cpu"
|
| 134 |
+
tensor = torch.ByteTensor(data_view).to(device)
|
| 135 |
+
|
| 136 |
+
# obtain Tensor size of each rank
|
| 137 |
+
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
|
| 138 |
+
size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
|
| 139 |
+
if cpu_group is None:
|
| 140 |
+
dist.all_gather(size_list, local_size)
|
| 141 |
+
else:
|
| 142 |
+
print("gathering on cpu")
|
| 143 |
+
dist.all_gather(size_list, local_size, group=cpu_group)
|
| 144 |
+
size_list = [int(size.item()) for size in size_list]
|
| 145 |
+
max_size = max(size_list)
|
| 146 |
+
assert isinstance(local_size.item(), int)
|
| 147 |
+
local_size = int(local_size.item())
|
| 148 |
+
|
| 149 |
+
# receiving Tensor from all ranks
|
| 150 |
+
# we pad the tensor because torch all_gather does not support
|
| 151 |
+
# gathering tensors of different shapes
|
| 152 |
+
tensor_list = []
|
| 153 |
+
for _ in size_list:
|
| 154 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
|
| 155 |
+
if local_size != max_size:
|
| 156 |
+
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device)
|
| 157 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
| 158 |
+
if cpu_group is None:
|
| 159 |
+
dist.all_gather(tensor_list, tensor)
|
| 160 |
+
else:
|
| 161 |
+
dist.all_gather(tensor_list, tensor, group=cpu_group)
|
| 162 |
+
|
| 163 |
+
data_list = []
|
| 164 |
+
for size, tensor in zip(size_list, tensor_list):
|
| 165 |
+
tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
|
| 166 |
+
buffer = io.BytesIO(tensor.cpu().numpy())
|
| 167 |
+
obj = torch.load(buffer)
|
| 168 |
+
data_list.append(obj)
|
| 169 |
+
|
| 170 |
+
return data_list
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def all_gather(data):
|
| 174 |
+
"""
|
| 175 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
| 176 |
+
Args:
|
| 177 |
+
data: any picklable object
|
| 178 |
+
Returns:
|
| 179 |
+
list[data]: list of data gathered from each rank
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
if os.getenv("CPU_REDUCE") == "1":
|
| 183 |
+
return all_gather_cpu(data)
|
| 184 |
+
|
| 185 |
+
world_size = get_world_size()
|
| 186 |
+
if world_size == 1:
|
| 187 |
+
return [data]
|
| 188 |
+
|
| 189 |
+
# serialized to a Tensor
|
| 190 |
+
buffer = pickle.dumps(data)
|
| 191 |
+
storage = torch.ByteStorage.from_buffer(buffer)
|
| 192 |
+
tensor = torch.ByteTensor(storage).to("cuda")
|
| 193 |
+
|
| 194 |
+
# obtain Tensor size of each rank
|
| 195 |
+
local_size = torch.tensor([tensor.numel()], device="cuda")
|
| 196 |
+
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
|
| 197 |
+
dist.all_gather(size_list, local_size)
|
| 198 |
+
size_list = [int(size.item()) for size in size_list]
|
| 199 |
+
max_size = max(size_list)
|
| 200 |
+
|
| 201 |
+
# receiving Tensor from all ranks
|
| 202 |
+
# we pad the tensor because torch all_gather does not support
|
| 203 |
+
# gathering tensors of different shapes
|
| 204 |
+
tensor_list = []
|
| 205 |
+
for _ in size_list:
|
| 206 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
|
| 207 |
+
if local_size != max_size:
|
| 208 |
+
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
|
| 209 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
| 210 |
+
dist.all_gather(tensor_list, tensor)
|
| 211 |
+
|
| 212 |
+
data_list = []
|
| 213 |
+
for size, tensor in zip(size_list, tensor_list):
|
| 214 |
+
buffer = tensor.cpu().numpy().tobytes()[:size]
|
| 215 |
+
data_list.append(pickle.loads(buffer))
|
| 216 |
+
|
| 217 |
+
return data_list
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def reduce_dict(input_dict, average=True):
|
| 221 |
+
"""
|
| 222 |
+
Args:
|
| 223 |
+
input_dict (dict): all the values will be reduced
|
| 224 |
+
average (bool): whether to do average or sum
|
| 225 |
+
Reduce the values in the dictionary from all processes so that all processes
|
| 226 |
+
have the averaged results. Returns a dict with the same fields as
|
| 227 |
+
input_dict, after reduction.
|
| 228 |
+
"""
|
| 229 |
+
world_size = get_world_size()
|
| 230 |
+
if world_size < 2:
|
| 231 |
+
return input_dict
|
| 232 |
+
with torch.no_grad():
|
| 233 |
+
names = []
|
| 234 |
+
values = []
|
| 235 |
+
# sort the keys so that they are consistent across processes
|
| 236 |
+
for k in sorted(input_dict.keys()):
|
| 237 |
+
names.append(k)
|
| 238 |
+
values.append(input_dict[k])
|
| 239 |
+
values = torch.stack(values, dim=0)
|
| 240 |
+
dist.all_reduce(values)
|
| 241 |
+
if average:
|
| 242 |
+
values /= world_size
|
| 243 |
+
reduced_dict = {k: v for k, v in zip(names, values)}
|
| 244 |
+
return reduced_dict
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class MetricLogger(object):
|
| 248 |
+
def __init__(self, delimiter="\t"):
|
| 249 |
+
self.meters = defaultdict(SmoothedValue)
|
| 250 |
+
self.delimiter = delimiter
|
| 251 |
+
|
| 252 |
+
def update(self, **kwargs):
|
| 253 |
+
for k, v in kwargs.items():
|
| 254 |
+
if isinstance(v, torch.Tensor):
|
| 255 |
+
v = v.item()
|
| 256 |
+
assert isinstance(v, (float, int))
|
| 257 |
+
self.meters[k].update(v)
|
| 258 |
+
|
| 259 |
+
def __getattr__(self, attr):
|
| 260 |
+
if attr in self.meters:
|
| 261 |
+
return self.meters[attr]
|
| 262 |
+
if attr in self.__dict__:
|
| 263 |
+
return self.__dict__[attr]
|
| 264 |
+
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
|
| 265 |
+
|
| 266 |
+
def __str__(self):
|
| 267 |
+
loss_str = []
|
| 268 |
+
for name, meter in self.meters.items():
|
| 269 |
+
# print(name, str(meter))
|
| 270 |
+
# import ipdb;ipdb.set_trace()
|
| 271 |
+
if meter.count > 0:
|
| 272 |
+
loss_str.append("{}: {}".format(name, str(meter)))
|
| 273 |
+
return self.delimiter.join(loss_str)
|
| 274 |
+
|
| 275 |
+
def synchronize_between_processes(self):
|
| 276 |
+
for meter in self.meters.values():
|
| 277 |
+
meter.synchronize_between_processes()
|
| 278 |
+
|
| 279 |
+
def add_meter(self, name, meter):
|
| 280 |
+
self.meters[name] = meter
|
| 281 |
+
|
| 282 |
+
def log_every(self, iterable, print_freq, header=None, logger=None):
|
| 283 |
+
if logger is None:
|
| 284 |
+
print_func = print
|
| 285 |
+
else:
|
| 286 |
+
print_func = logger.info
|
| 287 |
+
|
| 288 |
+
i = 0
|
| 289 |
+
if not header:
|
| 290 |
+
header = ""
|
| 291 |
+
start_time = time.time()
|
| 292 |
+
end = time.time()
|
| 293 |
+
iter_time = SmoothedValue(fmt="{avg:.4f}")
|
| 294 |
+
data_time = SmoothedValue(fmt="{avg:.4f}")
|
| 295 |
+
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
|
| 296 |
+
if torch.cuda.is_available():
|
| 297 |
+
log_msg = self.delimiter.join(
|
| 298 |
+
[
|
| 299 |
+
header,
|
| 300 |
+
"[{0" + space_fmt + "}/{1}]",
|
| 301 |
+
"eta: {eta}",
|
| 302 |
+
"{meters}",
|
| 303 |
+
"time: {time}",
|
| 304 |
+
"data: {data}",
|
| 305 |
+
"max mem: {memory:.0f}",
|
| 306 |
+
]
|
| 307 |
+
)
|
| 308 |
+
else:
|
| 309 |
+
log_msg = self.delimiter.join(
|
| 310 |
+
[
|
| 311 |
+
header,
|
| 312 |
+
"[{0" + space_fmt + "}/{1}]",
|
| 313 |
+
"eta: {eta}",
|
| 314 |
+
"{meters}",
|
| 315 |
+
"time: {time}",
|
| 316 |
+
"data: {data}",
|
| 317 |
+
]
|
| 318 |
+
)
|
| 319 |
+
MB = 1024.0 * 1024.0
|
| 320 |
+
for obj in iterable:
|
| 321 |
+
data_time.update(time.time() - end)
|
| 322 |
+
yield obj
|
| 323 |
+
# import ipdb; ipdb.set_trace()
|
| 324 |
+
iter_time.update(time.time() - end)
|
| 325 |
+
if i % print_freq == 0 or i == len(iterable) - 1:
|
| 326 |
+
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
| 327 |
+
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 328 |
+
if torch.cuda.is_available():
|
| 329 |
+
print_func(
|
| 330 |
+
log_msg.format(
|
| 331 |
+
i,
|
| 332 |
+
len(iterable),
|
| 333 |
+
eta=eta_string,
|
| 334 |
+
meters=str(self),
|
| 335 |
+
time=str(iter_time),
|
| 336 |
+
data=str(data_time),
|
| 337 |
+
memory=torch.cuda.max_memory_allocated() / MB,
|
| 338 |
+
)
|
| 339 |
+
)
|
| 340 |
+
else:
|
| 341 |
+
print_func(
|
| 342 |
+
log_msg.format(
|
| 343 |
+
i,
|
| 344 |
+
len(iterable),
|
| 345 |
+
eta=eta_string,
|
| 346 |
+
meters=str(self),
|
| 347 |
+
time=str(iter_time),
|
| 348 |
+
data=str(data_time),
|
| 349 |
+
)
|
| 350 |
+
)
|
| 351 |
+
i += 1
|
| 352 |
+
end = time.time()
|
| 353 |
+
total_time = time.time() - start_time
|
| 354 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 355 |
+
print_func(
|
| 356 |
+
"{} Total time: {} ({:.4f} s / it)".format(
|
| 357 |
+
header, total_time_str, total_time / len(iterable)
|
| 358 |
+
)
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def get_sha():
|
| 363 |
+
cwd = os.path.dirname(os.path.abspath(__file__))
|
| 364 |
+
|
| 365 |
+
def _run(command):
|
| 366 |
+
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
|
| 367 |
+
|
| 368 |
+
sha = "N/A"
|
| 369 |
+
diff = "clean"
|
| 370 |
+
branch = "N/A"
|
| 371 |
+
try:
|
| 372 |
+
sha = _run(["git", "rev-parse", "HEAD"])
|
| 373 |
+
subprocess.check_output(["git", "diff"], cwd=cwd)
|
| 374 |
+
diff = _run(["git", "diff-index", "HEAD"])
|
| 375 |
+
diff = "has uncommited changes" if diff else "clean"
|
| 376 |
+
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
|
| 377 |
+
except Exception:
|
| 378 |
+
pass
|
| 379 |
+
message = f"sha: {sha}, status: {diff}, branch: {branch}"
|
| 380 |
+
return message
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def collate_fn(batch):
|
| 384 |
+
# import ipdb; ipdb.set_trace()
|
| 385 |
+
batch = list(zip(*batch))
|
| 386 |
+
batch[0] = nested_tensor_from_tensor_list(batch[0])
|
| 387 |
+
return tuple(batch)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _max_by_axis(the_list):
|
| 391 |
+
# type: (List[List[int]]) -> List[int]
|
| 392 |
+
maxes = the_list[0]
|
| 393 |
+
for sublist in the_list[1:]:
|
| 394 |
+
for index, item in enumerate(sublist):
|
| 395 |
+
maxes[index] = max(maxes[index], item)
|
| 396 |
+
return maxes
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class NestedTensor(object):
|
| 400 |
+
def __init__(self, tensors, mask: Optional[Tensor]):
|
| 401 |
+
self.tensors = tensors
|
| 402 |
+
self.mask = mask
|
| 403 |
+
if mask == "auto":
|
| 404 |
+
self.mask = torch.zeros_like(tensors).to(tensors.device)
|
| 405 |
+
if self.mask.dim() == 3:
|
| 406 |
+
self.mask = self.mask.sum(0).to(bool)
|
| 407 |
+
elif self.mask.dim() == 4:
|
| 408 |
+
self.mask = self.mask.sum(1).to(bool)
|
| 409 |
+
else:
|
| 410 |
+
raise ValueError(
|
| 411 |
+
"tensors dim must be 3 or 4 but {}({})".format(
|
| 412 |
+
self.tensors.dim(), self.tensors.shape
|
| 413 |
+
)
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
def imgsize(self):
|
| 417 |
+
res = []
|
| 418 |
+
for i in range(self.tensors.shape[0]):
|
| 419 |
+
mask = self.mask[i]
|
| 420 |
+
maxH = (~mask).sum(0).max()
|
| 421 |
+
maxW = (~mask).sum(1).max()
|
| 422 |
+
res.append(torch.Tensor([maxH, maxW]))
|
| 423 |
+
return res
|
| 424 |
+
|
| 425 |
+
def to(self, device):
|
| 426 |
+
# type: (Device) -> NestedTensor # noqa
|
| 427 |
+
cast_tensor = self.tensors.to(device)
|
| 428 |
+
mask = self.mask
|
| 429 |
+
if mask is not None:
|
| 430 |
+
assert mask is not None
|
| 431 |
+
cast_mask = mask.to(device)
|
| 432 |
+
else:
|
| 433 |
+
cast_mask = None
|
| 434 |
+
return NestedTensor(cast_tensor, cast_mask)
|
| 435 |
+
|
| 436 |
+
def to_img_list_single(self, tensor, mask):
|
| 437 |
+
assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim())
|
| 438 |
+
maxH = (~mask).sum(0).max()
|
| 439 |
+
maxW = (~mask).sum(1).max()
|
| 440 |
+
img = tensor[:, :maxH, :maxW]
|
| 441 |
+
return img
|
| 442 |
+
|
| 443 |
+
def to_img_list(self):
|
| 444 |
+
"""remove the padding and convert to img list
|
| 445 |
+
|
| 446 |
+
Returns:
|
| 447 |
+
[type]: [description]
|
| 448 |
+
"""
|
| 449 |
+
if self.tensors.dim() == 3:
|
| 450 |
+
return self.to_img_list_single(self.tensors, self.mask)
|
| 451 |
+
else:
|
| 452 |
+
res = []
|
| 453 |
+
for i in range(self.tensors.shape[0]):
|
| 454 |
+
tensor_i = self.tensors[i]
|
| 455 |
+
mask_i = self.mask[i]
|
| 456 |
+
res.append(self.to_img_list_single(tensor_i, mask_i))
|
| 457 |
+
return res
|
| 458 |
+
|
| 459 |
+
@property
|
| 460 |
+
def device(self):
|
| 461 |
+
return self.tensors.device
|
| 462 |
+
|
| 463 |
+
def decompose(self):
|
| 464 |
+
return self.tensors, self.mask
|
| 465 |
+
|
| 466 |
+
def __repr__(self):
|
| 467 |
+
return str(self.tensors)
|
| 468 |
+
|
| 469 |
+
@property
|
| 470 |
+
def shape(self):
|
| 471 |
+
return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape}
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
| 475 |
+
# TODO make this more general
|
| 476 |
+
if tensor_list[0].ndim == 3:
|
| 477 |
+
if torchvision._is_tracing():
|
| 478 |
+
# nested_tensor_from_tensor_list() does not export well to ONNX
|
| 479 |
+
# call _onnx_nested_tensor_from_tensor_list() instead
|
| 480 |
+
return _onnx_nested_tensor_from_tensor_list(tensor_list)
|
| 481 |
+
|
| 482 |
+
# TODO make it support different-sized images
|
| 483 |
+
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
|
| 484 |
+
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
| 485 |
+
batch_shape = [len(tensor_list)] + max_size
|
| 486 |
+
b, c, h, w = batch_shape
|
| 487 |
+
dtype = tensor_list[0].dtype
|
| 488 |
+
device = tensor_list[0].device
|
| 489 |
+
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
| 490 |
+
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
|
| 491 |
+
for img, pad_img, m in zip(tensor_list, tensor, mask):
|
| 492 |
+
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
| 493 |
+
m[: img.shape[1], : img.shape[2]] = False
|
| 494 |
+
else:
|
| 495 |
+
raise ValueError("not supported")
|
| 496 |
+
return NestedTensor(tensor, mask)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
# _onnx_nested_tensor_from_tensor_list() is an implementation of
|
| 500 |
+
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
|
| 501 |
+
@torch.jit.unused
|
| 502 |
+
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
|
| 503 |
+
max_size = []
|
| 504 |
+
for i in range(tensor_list[0].dim()):
|
| 505 |
+
max_size_i = torch.max(
|
| 506 |
+
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
|
| 507 |
+
).to(torch.int64)
|
| 508 |
+
max_size.append(max_size_i)
|
| 509 |
+
max_size = tuple(max_size)
|
| 510 |
+
|
| 511 |
+
# work around for
|
| 512 |
+
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
| 513 |
+
# m[: img.shape[1], :img.shape[2]] = False
|
| 514 |
+
# which is not yet supported in onnx
|
| 515 |
+
padded_imgs = []
|
| 516 |
+
padded_masks = []
|
| 517 |
+
for img in tensor_list:
|
| 518 |
+
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
|
| 519 |
+
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
|
| 520 |
+
padded_imgs.append(padded_img)
|
| 521 |
+
|
| 522 |
+
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
|
| 523 |
+
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
|
| 524 |
+
padded_masks.append(padded_mask.to(torch.bool))
|
| 525 |
+
|
| 526 |
+
tensor = torch.stack(padded_imgs)
|
| 527 |
+
mask = torch.stack(padded_masks)
|
| 528 |
+
|
| 529 |
+
return NestedTensor(tensor, mask=mask)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def setup_for_distributed(is_master):
|
| 533 |
+
"""
|
| 534 |
+
This function disables printing when not in master process
|
| 535 |
+
"""
|
| 536 |
+
import builtins as __builtin__
|
| 537 |
+
|
| 538 |
+
builtin_print = __builtin__.print
|
| 539 |
+
|
| 540 |
+
def print(*args, **kwargs):
|
| 541 |
+
force = kwargs.pop("force", False)
|
| 542 |
+
if is_master or force:
|
| 543 |
+
builtin_print(*args, **kwargs)
|
| 544 |
+
|
| 545 |
+
__builtin__.print = print
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
def is_dist_avail_and_initialized():
|
| 549 |
+
if not dist.is_available():
|
| 550 |
+
return False
|
| 551 |
+
if not dist.is_initialized():
|
| 552 |
+
return False
|
| 553 |
+
return True
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def get_world_size():
|
| 557 |
+
if not is_dist_avail_and_initialized():
|
| 558 |
+
return 1
|
| 559 |
+
return dist.get_world_size()
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def get_rank():
|
| 563 |
+
if not is_dist_avail_and_initialized():
|
| 564 |
+
return 0
|
| 565 |
+
return dist.get_rank()
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def is_main_process():
|
| 569 |
+
return get_rank() == 0
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def save_on_master(*args, **kwargs):
|
| 573 |
+
if is_main_process():
|
| 574 |
+
torch.save(*args, **kwargs)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def init_distributed_mode(args):
|
| 578 |
+
if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and
|
| 579 |
+
args.rank = int(os.environ["RANK"])
|
| 580 |
+
args.world_size = int(os.environ["WORLD_SIZE"])
|
| 581 |
+
args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"])
|
| 582 |
+
|
| 583 |
+
# launch by torch.distributed.launch
|
| 584 |
+
# Single node
|
| 585 |
+
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
|
| 586 |
+
# Multi nodes
|
| 587 |
+
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
|
| 588 |
+
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
|
| 589 |
+
# args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK'))
|
| 590 |
+
# local_world_size = int(os.environ['GPU_PER_NODE_COUNT'])
|
| 591 |
+
# args.world_size = args.world_size * local_world_size
|
| 592 |
+
# args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
|
| 593 |
+
# args.rank = args.rank * local_world_size + args.local_rank
|
| 594 |
+
print(
|
| 595 |
+
"world size: {}, rank: {}, local rank: {}".format(
|
| 596 |
+
args.world_size, args.rank, args.local_rank
|
| 597 |
+
)
|
| 598 |
+
)
|
| 599 |
+
print(json.dumps(dict(os.environ), indent=2))
|
| 600 |
+
elif "SLURM_PROCID" in os.environ:
|
| 601 |
+
args.rank = int(os.environ["SLURM_PROCID"])
|
| 602 |
+
args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"])
|
| 603 |
+
args.world_size = int(os.environ["SLURM_NPROCS"])
|
| 604 |
+
|
| 605 |
+
print(
|
| 606 |
+
"world size: {}, world rank: {}, local rank: {}, device_count: {}".format(
|
| 607 |
+
args.world_size, args.rank, args.local_rank, torch.cuda.device_count()
|
| 608 |
+
)
|
| 609 |
+
)
|
| 610 |
+
else:
|
| 611 |
+
print("Not using distributed mode")
|
| 612 |
+
args.distributed = False
|
| 613 |
+
args.world_size = 1
|
| 614 |
+
args.rank = 0
|
| 615 |
+
args.local_rank = 0
|
| 616 |
+
return
|
| 617 |
+
|
| 618 |
+
print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank))
|
| 619 |
+
args.distributed = True
|
| 620 |
+
torch.cuda.set_device(args.local_rank)
|
| 621 |
+
args.dist_backend = "nccl"
|
| 622 |
+
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
|
| 623 |
+
|
| 624 |
+
torch.distributed.init_process_group(
|
| 625 |
+
backend=args.dist_backend,
|
| 626 |
+
world_size=args.world_size,
|
| 627 |
+
rank=args.rank,
|
| 628 |
+
init_method=args.dist_url,
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
print("Before torch.distributed.barrier()")
|
| 632 |
+
torch.distributed.barrier()
|
| 633 |
+
print("End torch.distributed.barrier()")
|
| 634 |
+
setup_for_distributed(args.rank == 0)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
@torch.no_grad()
|
| 638 |
+
def accuracy(output, target, topk=(1,)):
|
| 639 |
+
"""Computes the precision@k for the specified values of k"""
|
| 640 |
+
if target.numel() == 0:
|
| 641 |
+
return [torch.zeros([], device=output.device)]
|
| 642 |
+
maxk = max(topk)
|
| 643 |
+
batch_size = target.size(0)
|
| 644 |
+
|
| 645 |
+
_, pred = output.topk(maxk, 1, True, True)
|
| 646 |
+
pred = pred.t()
|
| 647 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 648 |
+
|
| 649 |
+
res = []
|
| 650 |
+
for k in topk:
|
| 651 |
+
correct_k = correct[:k].view(-1).float().sum(0)
|
| 652 |
+
res.append(correct_k.mul_(100.0 / batch_size))
|
| 653 |
+
return res
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
@torch.no_grad()
|
| 657 |
+
def accuracy_onehot(pred, gt):
|
| 658 |
+
"""_summary_
|
| 659 |
+
|
| 660 |
+
Args:
|
| 661 |
+
pred (_type_): n, c
|
| 662 |
+
gt (_type_): n, c
|
| 663 |
+
"""
|
| 664 |
+
tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum()
|
| 665 |
+
acc = tp / gt.shape[0] * 100
|
| 666 |
+
return acc
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
| 670 |
+
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
|
| 671 |
+
"""
|
| 672 |
+
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
|
| 673 |
+
This will eventually be supported natively by PyTorch, and this
|
| 674 |
+
class can go away.
|
| 675 |
+
"""
|
| 676 |
+
if __torchvision_need_compat_flag < 0.7:
|
| 677 |
+
if input.numel() > 0:
|
| 678 |
+
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
|
| 679 |
+
|
| 680 |
+
output_shape = _output_size(2, input, size, scale_factor)
|
| 681 |
+
output_shape = list(input.shape[:-2]) + list(output_shape)
|
| 682 |
+
return _new_empty_tensor(input, output_shape)
|
| 683 |
+
else:
|
| 684 |
+
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class color_sys:
|
| 688 |
+
def __init__(self, num_colors) -> None:
|
| 689 |
+
self.num_colors = num_colors
|
| 690 |
+
colors = []
|
| 691 |
+
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
|
| 692 |
+
hue = i / 360.0
|
| 693 |
+
lightness = (50 + np.random.rand() * 10) / 100.0
|
| 694 |
+
saturation = (90 + np.random.rand() * 10) / 100.0
|
| 695 |
+
colors.append(
|
| 696 |
+
tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)])
|
| 697 |
+
)
|
| 698 |
+
self.colors = colors
|
| 699 |
+
|
| 700 |
+
def __call__(self, idx):
|
| 701 |
+
return self.colors[idx]
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def inverse_sigmoid(x, eps=1e-3):
|
| 705 |
+
x = x.clamp(min=0, max=1)
|
| 706 |
+
x1 = x.clamp(min=eps)
|
| 707 |
+
x2 = (1 - x).clamp(min=eps)
|
| 708 |
+
return torch.log(x1 / x2)
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
def clean_state_dict(state_dict):
|
| 712 |
+
new_state_dict = OrderedDict()
|
| 713 |
+
for k, v in state_dict.items():
|
| 714 |
+
if k[:7] == "module.":
|
| 715 |
+
k = k[7:] # remove `module.`
|
| 716 |
+
new_state_dict[k] = v
|
| 717 |
+
return new_state_dict
|
groundingdino/util/slconfig.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==========================================================
|
| 2 |
+
# Modified from mmcv
|
| 3 |
+
# ==========================================================
|
| 4 |
+
import ast
|
| 5 |
+
import os
|
| 6 |
+
import os.path as osp
|
| 7 |
+
import shutil
|
| 8 |
+
import sys
|
| 9 |
+
import tempfile
|
| 10 |
+
from argparse import Action
|
| 11 |
+
from importlib import import_module
|
| 12 |
+
|
| 13 |
+
from addict import Dict
|
| 14 |
+
from yapf.yapflib.yapf_api import FormatCode
|
| 15 |
+
|
| 16 |
+
BASE_KEY = "_base_"
|
| 17 |
+
DELETE_KEY = "_delete_"
|
| 18 |
+
RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
|
| 22 |
+
if not osp.isfile(filename):
|
| 23 |
+
raise FileNotFoundError(msg_tmpl.format(filename))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ConfigDict(Dict):
|
| 27 |
+
def __missing__(self, name):
|
| 28 |
+
raise KeyError(name)
|
| 29 |
+
|
| 30 |
+
def __getattr__(self, name):
|
| 31 |
+
try:
|
| 32 |
+
value = super(ConfigDict, self).__getattr__(name)
|
| 33 |
+
except KeyError:
|
| 34 |
+
ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'")
|
| 35 |
+
except Exception as e:
|
| 36 |
+
ex = e
|
| 37 |
+
else:
|
| 38 |
+
return value
|
| 39 |
+
raise ex
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class SLConfig(object):
|
| 43 |
+
"""
|
| 44 |
+
config files.
|
| 45 |
+
only support .py file as config now.
|
| 46 |
+
|
| 47 |
+
ref: mmcv.utils.config
|
| 48 |
+
|
| 49 |
+
Example:
|
| 50 |
+
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
|
| 51 |
+
>>> cfg.a
|
| 52 |
+
1
|
| 53 |
+
>>> cfg.b
|
| 54 |
+
{'b1': [0, 1]}
|
| 55 |
+
>>> cfg.b.b1
|
| 56 |
+
[0, 1]
|
| 57 |
+
>>> cfg = Config.fromfile('tests/data/config/a.py')
|
| 58 |
+
>>> cfg.filename
|
| 59 |
+
"/home/kchen/projects/mmcv/tests/data/config/a.py"
|
| 60 |
+
>>> cfg.item4
|
| 61 |
+
'test'
|
| 62 |
+
>>> cfg
|
| 63 |
+
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
|
| 64 |
+
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def _validate_py_syntax(filename):
|
| 69 |
+
with open(filename) as f:
|
| 70 |
+
content = f.read()
|
| 71 |
+
try:
|
| 72 |
+
ast.parse(content)
|
| 73 |
+
except SyntaxError:
|
| 74 |
+
raise SyntaxError("There are syntax errors in config " f"file {filename}")
|
| 75 |
+
|
| 76 |
+
@staticmethod
|
| 77 |
+
def _file2dict(filename):
|
| 78 |
+
filename = osp.abspath(osp.expanduser(filename))
|
| 79 |
+
check_file_exist(filename)
|
| 80 |
+
if filename.lower().endswith(".py"):
|
| 81 |
+
with tempfile.TemporaryDirectory() as temp_config_dir:
|
| 82 |
+
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py")
|
| 83 |
+
temp_config_name = osp.basename(temp_config_file.name)
|
| 84 |
+
if os.name == 'nt':
|
| 85 |
+
temp_config_file.close()
|
| 86 |
+
shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name))
|
| 87 |
+
temp_module_name = osp.splitext(temp_config_name)[0]
|
| 88 |
+
sys.path.insert(0, temp_config_dir)
|
| 89 |
+
SLConfig._validate_py_syntax(filename)
|
| 90 |
+
mod = import_module(temp_module_name)
|
| 91 |
+
sys.path.pop(0)
|
| 92 |
+
cfg_dict = {
|
| 93 |
+
name: value for name, value in mod.__dict__.items() if not name.startswith("__")
|
| 94 |
+
}
|
| 95 |
+
# delete imported module
|
| 96 |
+
del sys.modules[temp_module_name]
|
| 97 |
+
# close temp file
|
| 98 |
+
temp_config_file.close()
|
| 99 |
+
elif filename.lower().endswith((".yml", ".yaml", ".json")):
|
| 100 |
+
from .slio import slload
|
| 101 |
+
|
| 102 |
+
cfg_dict = slload(filename)
|
| 103 |
+
else:
|
| 104 |
+
raise IOError("Only py/yml/yaml/json type are supported now!")
|
| 105 |
+
|
| 106 |
+
cfg_text = filename + "\n"
|
| 107 |
+
with open(filename, "r") as f:
|
| 108 |
+
cfg_text += f.read()
|
| 109 |
+
|
| 110 |
+
# parse the base file
|
| 111 |
+
if BASE_KEY in cfg_dict:
|
| 112 |
+
cfg_dir = osp.dirname(filename)
|
| 113 |
+
base_filename = cfg_dict.pop(BASE_KEY)
|
| 114 |
+
base_filename = base_filename if isinstance(base_filename, list) else [base_filename]
|
| 115 |
+
|
| 116 |
+
cfg_dict_list = list()
|
| 117 |
+
cfg_text_list = list()
|
| 118 |
+
for f in base_filename:
|
| 119 |
+
_cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))
|
| 120 |
+
cfg_dict_list.append(_cfg_dict)
|
| 121 |
+
cfg_text_list.append(_cfg_text)
|
| 122 |
+
|
| 123 |
+
base_cfg_dict = dict()
|
| 124 |
+
for c in cfg_dict_list:
|
| 125 |
+
if len(base_cfg_dict.keys() & c.keys()) > 0:
|
| 126 |
+
raise KeyError("Duplicate key is not allowed among bases")
|
| 127 |
+
# TODO Allow the duplicate key while warnning user
|
| 128 |
+
base_cfg_dict.update(c)
|
| 129 |
+
|
| 130 |
+
base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)
|
| 131 |
+
cfg_dict = base_cfg_dict
|
| 132 |
+
|
| 133 |
+
# merge cfg_text
|
| 134 |
+
cfg_text_list.append(cfg_text)
|
| 135 |
+
cfg_text = "\n".join(cfg_text_list)
|
| 136 |
+
|
| 137 |
+
return cfg_dict, cfg_text
|
| 138 |
+
|
| 139 |
+
@staticmethod
|
| 140 |
+
def _merge_a_into_b(a, b):
|
| 141 |
+
"""merge dict `a` into dict `b` (non-inplace).
|
| 142 |
+
values in `a` will overwrite `b`.
|
| 143 |
+
copy first to avoid inplace modification
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
a ([type]): [description]
|
| 147 |
+
b ([type]): [description]
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
[dict]: [description]
|
| 151 |
+
"""
|
| 152 |
+
# import ipdb; ipdb.set_trace()
|
| 153 |
+
if not isinstance(a, dict):
|
| 154 |
+
return a
|
| 155 |
+
|
| 156 |
+
b = b.copy()
|
| 157 |
+
for k, v in a.items():
|
| 158 |
+
if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
|
| 159 |
+
|
| 160 |
+
if not isinstance(b[k], dict) and not isinstance(b[k], list):
|
| 161 |
+
# if :
|
| 162 |
+
# import ipdb; ipdb.set_trace()
|
| 163 |
+
raise TypeError(
|
| 164 |
+
f"{k}={v} in child config cannot inherit from base "
|
| 165 |
+
f"because {k} is a dict in the child config but is of "
|
| 166 |
+
f"type {type(b[k])} in base config. You may set "
|
| 167 |
+
f"`{DELETE_KEY}=True` to ignore the base config"
|
| 168 |
+
)
|
| 169 |
+
b[k] = SLConfig._merge_a_into_b(v, b[k])
|
| 170 |
+
elif isinstance(b, list):
|
| 171 |
+
try:
|
| 172 |
+
_ = int(k)
|
| 173 |
+
except:
|
| 174 |
+
raise TypeError(
|
| 175 |
+
f"b is a list, " f"index {k} should be an int when input but {type(k)}"
|
| 176 |
+
)
|
| 177 |
+
b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])
|
| 178 |
+
else:
|
| 179 |
+
b[k] = v
|
| 180 |
+
|
| 181 |
+
return b
|
| 182 |
+
|
| 183 |
+
@staticmethod
|
| 184 |
+
def fromfile(filename):
|
| 185 |
+
cfg_dict, cfg_text = SLConfig._file2dict(filename)
|
| 186 |
+
return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)
|
| 187 |
+
|
| 188 |
+
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
|
| 189 |
+
if cfg_dict is None:
|
| 190 |
+
cfg_dict = dict()
|
| 191 |
+
elif not isinstance(cfg_dict, dict):
|
| 192 |
+
raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}")
|
| 193 |
+
for key in cfg_dict:
|
| 194 |
+
if key in RESERVED_KEYS:
|
| 195 |
+
raise KeyError(f"{key} is reserved for config file")
|
| 196 |
+
|
| 197 |
+
super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
|
| 198 |
+
super(SLConfig, self).__setattr__("_filename", filename)
|
| 199 |
+
if cfg_text:
|
| 200 |
+
text = cfg_text
|
| 201 |
+
elif filename:
|
| 202 |
+
with open(filename, "r") as f:
|
| 203 |
+
text = f.read()
|
| 204 |
+
else:
|
| 205 |
+
text = ""
|
| 206 |
+
super(SLConfig, self).__setattr__("_text", text)
|
| 207 |
+
|
| 208 |
+
@property
|
| 209 |
+
def filename(self):
|
| 210 |
+
return self._filename
|
| 211 |
+
|
| 212 |
+
@property
|
| 213 |
+
def text(self):
|
| 214 |
+
return self._text
|
| 215 |
+
|
| 216 |
+
@property
|
| 217 |
+
def pretty_text(self):
|
| 218 |
+
|
| 219 |
+
indent = 4
|
| 220 |
+
|
| 221 |
+
def _indent(s_, num_spaces):
|
| 222 |
+
s = s_.split("\n")
|
| 223 |
+
if len(s) == 1:
|
| 224 |
+
return s_
|
| 225 |
+
first = s.pop(0)
|
| 226 |
+
s = [(num_spaces * " ") + line for line in s]
|
| 227 |
+
s = "\n".join(s)
|
| 228 |
+
s = first + "\n" + s
|
| 229 |
+
return s
|
| 230 |
+
|
| 231 |
+
def _format_basic_types(k, v, use_mapping=False):
|
| 232 |
+
if isinstance(v, str):
|
| 233 |
+
v_str = f"'{v}'"
|
| 234 |
+
else:
|
| 235 |
+
v_str = str(v)
|
| 236 |
+
|
| 237 |
+
if use_mapping:
|
| 238 |
+
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
| 239 |
+
attr_str = f"{k_str}: {v_str}"
|
| 240 |
+
else:
|
| 241 |
+
attr_str = f"{str(k)}={v_str}"
|
| 242 |
+
attr_str = _indent(attr_str, indent)
|
| 243 |
+
|
| 244 |
+
return attr_str
|
| 245 |
+
|
| 246 |
+
def _format_list(k, v, use_mapping=False):
|
| 247 |
+
# check if all items in the list are dict
|
| 248 |
+
if all(isinstance(_, dict) for _ in v):
|
| 249 |
+
v_str = "[\n"
|
| 250 |
+
v_str += "\n".join(
|
| 251 |
+
f"dict({_indent(_format_dict(v_), indent)})," for v_ in v
|
| 252 |
+
).rstrip(",")
|
| 253 |
+
if use_mapping:
|
| 254 |
+
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
| 255 |
+
attr_str = f"{k_str}: {v_str}"
|
| 256 |
+
else:
|
| 257 |
+
attr_str = f"{str(k)}={v_str}"
|
| 258 |
+
attr_str = _indent(attr_str, indent) + "]"
|
| 259 |
+
else:
|
| 260 |
+
attr_str = _format_basic_types(k, v, use_mapping)
|
| 261 |
+
return attr_str
|
| 262 |
+
|
| 263 |
+
def _contain_invalid_identifier(dict_str):
|
| 264 |
+
contain_invalid_identifier = False
|
| 265 |
+
for key_name in dict_str:
|
| 266 |
+
contain_invalid_identifier |= not str(key_name).isidentifier()
|
| 267 |
+
return contain_invalid_identifier
|
| 268 |
+
|
| 269 |
+
def _format_dict(input_dict, outest_level=False):
|
| 270 |
+
r = ""
|
| 271 |
+
s = []
|
| 272 |
+
|
| 273 |
+
use_mapping = _contain_invalid_identifier(input_dict)
|
| 274 |
+
if use_mapping:
|
| 275 |
+
r += "{"
|
| 276 |
+
for idx, (k, v) in enumerate(input_dict.items()):
|
| 277 |
+
is_last = idx >= len(input_dict) - 1
|
| 278 |
+
end = "" if outest_level or is_last else ","
|
| 279 |
+
if isinstance(v, dict):
|
| 280 |
+
v_str = "\n" + _format_dict(v)
|
| 281 |
+
if use_mapping:
|
| 282 |
+
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
| 283 |
+
attr_str = f"{k_str}: dict({v_str}"
|
| 284 |
+
else:
|
| 285 |
+
attr_str = f"{str(k)}=dict({v_str}"
|
| 286 |
+
attr_str = _indent(attr_str, indent) + ")" + end
|
| 287 |
+
elif isinstance(v, list):
|
| 288 |
+
attr_str = _format_list(k, v, use_mapping) + end
|
| 289 |
+
else:
|
| 290 |
+
attr_str = _format_basic_types(k, v, use_mapping) + end
|
| 291 |
+
|
| 292 |
+
s.append(attr_str)
|
| 293 |
+
r += "\n".join(s)
|
| 294 |
+
if use_mapping:
|
| 295 |
+
r += "}"
|
| 296 |
+
return r
|
| 297 |
+
|
| 298 |
+
cfg_dict = self._cfg_dict.to_dict()
|
| 299 |
+
text = _format_dict(cfg_dict, outest_level=True)
|
| 300 |
+
# copied from setup.cfg
|
| 301 |
+
yapf_style = dict(
|
| 302 |
+
based_on_style="pep8",
|
| 303 |
+
blank_line_before_nested_class_or_def=True,
|
| 304 |
+
split_before_expression_after_opening_paren=True,
|
| 305 |
+
)
|
| 306 |
+
text, _ = FormatCode(text, style_config=yapf_style, verify=True)
|
| 307 |
+
|
| 308 |
+
return text
|
| 309 |
+
|
| 310 |
+
def __repr__(self):
|
| 311 |
+
return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}"
|
| 312 |
+
|
| 313 |
+
def __len__(self):
|
| 314 |
+
return len(self._cfg_dict)
|
| 315 |
+
|
| 316 |
+
def __getattr__(self, name):
|
| 317 |
+
# # debug
|
| 318 |
+
# print('+'*15)
|
| 319 |
+
# print('name=%s' % name)
|
| 320 |
+
# print("addr:", id(self))
|
| 321 |
+
# # print('type(self):', type(self))
|
| 322 |
+
# print(self.__dict__)
|
| 323 |
+
# print('+'*15)
|
| 324 |
+
# if self.__dict__ == {}:
|
| 325 |
+
# raise ValueError
|
| 326 |
+
|
| 327 |
+
return getattr(self._cfg_dict, name)
|
| 328 |
+
|
| 329 |
+
def __getitem__(self, name):
|
| 330 |
+
return self._cfg_dict.__getitem__(name)
|
| 331 |
+
|
| 332 |
+
def __setattr__(self, name, value):
|
| 333 |
+
if isinstance(value, dict):
|
| 334 |
+
value = ConfigDict(value)
|
| 335 |
+
self._cfg_dict.__setattr__(name, value)
|
| 336 |
+
|
| 337 |
+
def __setitem__(self, name, value):
|
| 338 |
+
if isinstance(value, dict):
|
| 339 |
+
value = ConfigDict(value)
|
| 340 |
+
self._cfg_dict.__setitem__(name, value)
|
| 341 |
+
|
| 342 |
+
def __iter__(self):
|
| 343 |
+
return iter(self._cfg_dict)
|
| 344 |
+
|
| 345 |
+
def dump(self, file=None):
|
| 346 |
+
# import ipdb; ipdb.set_trace()
|
| 347 |
+
if file is None:
|
| 348 |
+
return self.pretty_text
|
| 349 |
+
else:
|
| 350 |
+
with open(file, "w") as f:
|
| 351 |
+
f.write(self.pretty_text)
|
| 352 |
+
|
| 353 |
+
def merge_from_dict(self, options):
|
| 354 |
+
"""Merge list into cfg_dict
|
| 355 |
+
|
| 356 |
+
Merge the dict parsed by MultipleKVAction into this cfg.
|
| 357 |
+
|
| 358 |
+
Examples:
|
| 359 |
+
>>> options = {'model.backbone.depth': 50,
|
| 360 |
+
... 'model.backbone.with_cp':True}
|
| 361 |
+
>>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
|
| 362 |
+
>>> cfg.merge_from_dict(options)
|
| 363 |
+
>>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
|
| 364 |
+
>>> assert cfg_dict == dict(
|
| 365 |
+
... model=dict(backbone=dict(depth=50, with_cp=True)))
|
| 366 |
+
|
| 367 |
+
Args:
|
| 368 |
+
options (dict): dict of configs to merge from.
|
| 369 |
+
"""
|
| 370 |
+
option_cfg_dict = {}
|
| 371 |
+
for full_key, v in options.items():
|
| 372 |
+
d = option_cfg_dict
|
| 373 |
+
key_list = full_key.split(".")
|
| 374 |
+
for subkey in key_list[:-1]:
|
| 375 |
+
d.setdefault(subkey, ConfigDict())
|
| 376 |
+
d = d[subkey]
|
| 377 |
+
subkey = key_list[-1]
|
| 378 |
+
d[subkey] = v
|
| 379 |
+
|
| 380 |
+
cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict")
|
| 381 |
+
super(SLConfig, self).__setattr__(
|
| 382 |
+
"_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# for multiprocess
|
| 386 |
+
def __setstate__(self, state):
|
| 387 |
+
self.__init__(state)
|
| 388 |
+
|
| 389 |
+
def copy(self):
|
| 390 |
+
return SLConfig(self._cfg_dict.copy())
|
| 391 |
+
|
| 392 |
+
def deepcopy(self):
|
| 393 |
+
return SLConfig(self._cfg_dict.deepcopy())
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
class DictAction(Action):
|
| 397 |
+
"""
|
| 398 |
+
argparse action to split an argument into KEY=VALUE form
|
| 399 |
+
on the first = and append to a dictionary. List options should
|
| 400 |
+
be passed as comma separated values, i.e KEY=V1,V2,V3
|
| 401 |
+
"""
|
| 402 |
+
|
| 403 |
+
@staticmethod
|
| 404 |
+
def _parse_int_float_bool(val):
|
| 405 |
+
try:
|
| 406 |
+
return int(val)
|
| 407 |
+
except ValueError:
|
| 408 |
+
pass
|
| 409 |
+
try:
|
| 410 |
+
return float(val)
|
| 411 |
+
except ValueError:
|
| 412 |
+
pass
|
| 413 |
+
if val.lower() in ["true", "false"]:
|
| 414 |
+
return True if val.lower() == "true" else False
|
| 415 |
+
if val.lower() in ["none", "null"]:
|
| 416 |
+
return None
|
| 417 |
+
return val
|
| 418 |
+
|
| 419 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 420 |
+
options = {}
|
| 421 |
+
for kv in values:
|
| 422 |
+
key, val = kv.split("=", maxsplit=1)
|
| 423 |
+
val = [self._parse_int_float_bool(v) for v in val.split(",")]
|
| 424 |
+
if len(val) == 1:
|
| 425 |
+
val = val[0]
|
| 426 |
+
options[key] = val
|
| 427 |
+
setattr(namespace, self.dest, options)
|
groundingdino/util/slio.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==========================================================
|
| 2 |
+
# Modified from mmcv
|
| 3 |
+
# ==========================================================
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import pickle
|
| 7 |
+
from abc import ABCMeta, abstractmethod
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
import yaml
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from yaml import CLoader as Loader, CDumper as Dumper
|
| 14 |
+
except ImportError:
|
| 15 |
+
from yaml import Loader, Dumper
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ===========================
|
| 19 |
+
# Rigister handler
|
| 20 |
+
# ===========================
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BaseFileHandler(metaclass=ABCMeta):
|
| 24 |
+
@abstractmethod
|
| 25 |
+
def load_from_fileobj(self, file, **kwargs):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
@abstractmethod
|
| 29 |
+
def dump_to_fileobj(self, obj, file, **kwargs):
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
@abstractmethod
|
| 33 |
+
def dump_to_str(self, obj, **kwargs):
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
def load_from_path(self, filepath, mode="r", **kwargs):
|
| 37 |
+
with open(filepath, mode) as f:
|
| 38 |
+
return self.load_from_fileobj(f, **kwargs)
|
| 39 |
+
|
| 40 |
+
def dump_to_path(self, obj, filepath, mode="w", **kwargs):
|
| 41 |
+
with open(filepath, mode) as f:
|
| 42 |
+
self.dump_to_fileobj(obj, f, **kwargs)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class JsonHandler(BaseFileHandler):
|
| 46 |
+
def load_from_fileobj(self, file):
|
| 47 |
+
return json.load(file)
|
| 48 |
+
|
| 49 |
+
def dump_to_fileobj(self, obj, file, **kwargs):
|
| 50 |
+
json.dump(obj, file, **kwargs)
|
| 51 |
+
|
| 52 |
+
def dump_to_str(self, obj, **kwargs):
|
| 53 |
+
return json.dumps(obj, **kwargs)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class PickleHandler(BaseFileHandler):
|
| 57 |
+
def load_from_fileobj(self, file, **kwargs):
|
| 58 |
+
return pickle.load(file, **kwargs)
|
| 59 |
+
|
| 60 |
+
def load_from_path(self, filepath, **kwargs):
|
| 61 |
+
return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs)
|
| 62 |
+
|
| 63 |
+
def dump_to_str(self, obj, **kwargs):
|
| 64 |
+
kwargs.setdefault("protocol", 2)
|
| 65 |
+
return pickle.dumps(obj, **kwargs)
|
| 66 |
+
|
| 67 |
+
def dump_to_fileobj(self, obj, file, **kwargs):
|
| 68 |
+
kwargs.setdefault("protocol", 2)
|
| 69 |
+
pickle.dump(obj, file, **kwargs)
|
| 70 |
+
|
| 71 |
+
def dump_to_path(self, obj, filepath, **kwargs):
|
| 72 |
+
super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class YamlHandler(BaseFileHandler):
|
| 76 |
+
def load_from_fileobj(self, file, **kwargs):
|
| 77 |
+
kwargs.setdefault("Loader", Loader)
|
| 78 |
+
return yaml.load(file, **kwargs)
|
| 79 |
+
|
| 80 |
+
def dump_to_fileobj(self, obj, file, **kwargs):
|
| 81 |
+
kwargs.setdefault("Dumper", Dumper)
|
| 82 |
+
yaml.dump(obj, file, **kwargs)
|
| 83 |
+
|
| 84 |
+
def dump_to_str(self, obj, **kwargs):
|
| 85 |
+
kwargs.setdefault("Dumper", Dumper)
|
| 86 |
+
return yaml.dump(obj, **kwargs)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
file_handlers = {
|
| 90 |
+
"json": JsonHandler(),
|
| 91 |
+
"yaml": YamlHandler(),
|
| 92 |
+
"yml": YamlHandler(),
|
| 93 |
+
"pickle": PickleHandler(),
|
| 94 |
+
"pkl": PickleHandler(),
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
# ===========================
|
| 98 |
+
# load and dump
|
| 99 |
+
# ===========================
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def is_str(x):
|
| 103 |
+
"""Whether the input is an string instance.
|
| 104 |
+
|
| 105 |
+
Note: This method is deprecated since python 2 is no longer supported.
|
| 106 |
+
"""
|
| 107 |
+
return isinstance(x, str)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def slload(file, file_format=None, **kwargs):
|
| 111 |
+
"""Load data from json/yaml/pickle files.
|
| 112 |
+
|
| 113 |
+
This method provides a unified api for loading data from serialized files.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
file (str or :obj:`Path` or file-like object): Filename or a file-like
|
| 117 |
+
object.
|
| 118 |
+
file_format (str, optional): If not specified, the file format will be
|
| 119 |
+
inferred from the file extension, otherwise use the specified one.
|
| 120 |
+
Currently supported formats include "json", "yaml/yml" and
|
| 121 |
+
"pickle/pkl".
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
The content from the file.
|
| 125 |
+
"""
|
| 126 |
+
if isinstance(file, Path):
|
| 127 |
+
file = str(file)
|
| 128 |
+
if file_format is None and is_str(file):
|
| 129 |
+
file_format = file.split(".")[-1]
|
| 130 |
+
if file_format not in file_handlers:
|
| 131 |
+
raise TypeError(f"Unsupported format: {file_format}")
|
| 132 |
+
|
| 133 |
+
handler = file_handlers[file_format]
|
| 134 |
+
if is_str(file):
|
| 135 |
+
obj = handler.load_from_path(file, **kwargs)
|
| 136 |
+
elif hasattr(file, "read"):
|
| 137 |
+
obj = handler.load_from_fileobj(file, **kwargs)
|
| 138 |
+
else:
|
| 139 |
+
raise TypeError('"file" must be a filepath str or a file-object')
|
| 140 |
+
return obj
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def sldump(obj, file=None, file_format=None, **kwargs):
|
| 144 |
+
"""Dump data to json/yaml/pickle strings or files.
|
| 145 |
+
|
| 146 |
+
This method provides a unified api for dumping data as strings or to files,
|
| 147 |
+
and also supports custom arguments for each file format.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
obj (any): The python object to be dumped.
|
| 151 |
+
file (str or :obj:`Path` or file-like object, optional): If not
|
| 152 |
+
specified, then the object is dump to a str, otherwise to a file
|
| 153 |
+
specified by the filename or file-like object.
|
| 154 |
+
file_format (str, optional): Same as :func:`load`.
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
bool: True for success, False otherwise.
|
| 158 |
+
"""
|
| 159 |
+
if isinstance(file, Path):
|
| 160 |
+
file = str(file)
|
| 161 |
+
if file_format is None:
|
| 162 |
+
if is_str(file):
|
| 163 |
+
file_format = file.split(".")[-1]
|
| 164 |
+
elif file is None:
|
| 165 |
+
raise ValueError("file_format must be specified since file is None")
|
| 166 |
+
if file_format not in file_handlers:
|
| 167 |
+
raise TypeError(f"Unsupported format: {file_format}")
|
| 168 |
+
|
| 169 |
+
handler = file_handlers[file_format]
|
| 170 |
+
if file is None:
|
| 171 |
+
return handler.dump_to_str(obj, **kwargs)
|
| 172 |
+
elif is_str(file):
|
| 173 |
+
handler.dump_to_path(obj, file, **kwargs)
|
| 174 |
+
elif hasattr(file, "write"):
|
| 175 |
+
handler.dump_to_fileobj(obj, file, **kwargs)
|
| 176 |
+
else:
|
| 177 |
+
raise TypeError('"file" must be a filename str or a file-object')
|
groundingdino/util/time_counter.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TimeCounter:
|
| 6 |
+
def __init__(self) -> None:
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
def clear(self):
|
| 10 |
+
self.timedict = {}
|
| 11 |
+
self.basetime = time.perf_counter()
|
| 12 |
+
|
| 13 |
+
def timeit(self, name):
|
| 14 |
+
nowtime = time.perf_counter() - self.basetime
|
| 15 |
+
self.timedict[name] = nowtime
|
| 16 |
+
self.basetime = time.perf_counter()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TimeHolder:
|
| 20 |
+
def __init__(self) -> None:
|
| 21 |
+
self.timedict = {}
|
| 22 |
+
|
| 23 |
+
def update(self, _timedict: dict):
|
| 24 |
+
for k, v in _timedict.items():
|
| 25 |
+
if k not in self.timedict:
|
| 26 |
+
self.timedict[k] = AverageMeter(name=k, val_only=True)
|
| 27 |
+
self.timedict[k].update(val=v)
|
| 28 |
+
|
| 29 |
+
def final_res(self):
|
| 30 |
+
return {k: v.avg for k, v in self.timedict.items()}
|
| 31 |
+
|
| 32 |
+
def __str__(self):
|
| 33 |
+
return json.dumps(self.final_res(), indent=2)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class AverageMeter(object):
|
| 37 |
+
"""Computes and stores the average and current value"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, name, fmt=":f", val_only=False):
|
| 40 |
+
self.name = name
|
| 41 |
+
self.fmt = fmt
|
| 42 |
+
self.val_only = val_only
|
| 43 |
+
self.reset()
|
| 44 |
+
|
| 45 |
+
def reset(self):
|
| 46 |
+
self.val = 0
|
| 47 |
+
self.avg = 0
|
| 48 |
+
self.sum = 0
|
| 49 |
+
self.count = 0
|
| 50 |
+
|
| 51 |
+
def update(self, val, n=1):
|
| 52 |
+
self.val = val
|
| 53 |
+
self.sum += val * n
|
| 54 |
+
self.count += n
|
| 55 |
+
self.avg = self.sum / self.count
|
| 56 |
+
|
| 57 |
+
def __str__(self):
|
| 58 |
+
if self.val_only:
|
| 59 |
+
fmtstr = "{name} {val" + self.fmt + "}"
|
| 60 |
+
else:
|
| 61 |
+
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
|
| 62 |
+
return fmtstr.format(**self.__dict__)
|
groundingdino/util/utils.py
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import warnings
|
| 4 |
+
from collections import OrderedDict
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
from typing import Any, Dict, List
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from transformers import AutoTokenizer
|
| 11 |
+
|
| 12 |
+
from groundingdino.util.slconfig import SLConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def slprint(x, name="x"):
|
| 16 |
+
if isinstance(x, (torch.Tensor, np.ndarray)):
|
| 17 |
+
print(f"{name}.shape:", x.shape)
|
| 18 |
+
elif isinstance(x, (tuple, list)):
|
| 19 |
+
print("type x:", type(x))
|
| 20 |
+
for i in range(min(10, len(x))):
|
| 21 |
+
slprint(x[i], f"{name}[{i}]")
|
| 22 |
+
elif isinstance(x, dict):
|
| 23 |
+
for k, v in x.items():
|
| 24 |
+
slprint(v, f"{name}[{k}]")
|
| 25 |
+
else:
|
| 26 |
+
print(f"{name}.type:", type(x))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def clean_state_dict(state_dict):
|
| 30 |
+
new_state_dict = OrderedDict()
|
| 31 |
+
for k, v in state_dict.items():
|
| 32 |
+
if k[:7] == "module.":
|
| 33 |
+
k = k[7:] # remove `module.`
|
| 34 |
+
new_state_dict[k] = v
|
| 35 |
+
return new_state_dict
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def renorm(
|
| 39 |
+
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
| 40 |
+
) -> torch.FloatTensor:
|
| 41 |
+
# img: tensor(3,H,W) or tensor(B,3,H,W)
|
| 42 |
+
# return: same as img
|
| 43 |
+
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
|
| 44 |
+
if img.dim() == 3:
|
| 45 |
+
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
|
| 46 |
+
img.size(0),
|
| 47 |
+
str(img.size()),
|
| 48 |
+
)
|
| 49 |
+
img_perm = img.permute(1, 2, 0)
|
| 50 |
+
mean = torch.Tensor(mean)
|
| 51 |
+
std = torch.Tensor(std)
|
| 52 |
+
img_res = img_perm * std + mean
|
| 53 |
+
return img_res.permute(2, 0, 1)
|
| 54 |
+
else: # img.dim() == 4
|
| 55 |
+
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
|
| 56 |
+
img.size(1),
|
| 57 |
+
str(img.size()),
|
| 58 |
+
)
|
| 59 |
+
img_perm = img.permute(0, 2, 3, 1)
|
| 60 |
+
mean = torch.Tensor(mean)
|
| 61 |
+
std = torch.Tensor(std)
|
| 62 |
+
img_res = img_perm * std + mean
|
| 63 |
+
return img_res.permute(0, 3, 1, 2)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class CocoClassMapper:
|
| 67 |
+
def __init__(self) -> None:
|
| 68 |
+
self.category_map_str = {
|
| 69 |
+
"1": 1,
|
| 70 |
+
"2": 2,
|
| 71 |
+
"3": 3,
|
| 72 |
+
"4": 4,
|
| 73 |
+
"5": 5,
|
| 74 |
+
"6": 6,
|
| 75 |
+
"7": 7,
|
| 76 |
+
"8": 8,
|
| 77 |
+
"9": 9,
|
| 78 |
+
"10": 10,
|
| 79 |
+
"11": 11,
|
| 80 |
+
"13": 12,
|
| 81 |
+
"14": 13,
|
| 82 |
+
"15": 14,
|
| 83 |
+
"16": 15,
|
| 84 |
+
"17": 16,
|
| 85 |
+
"18": 17,
|
| 86 |
+
"19": 18,
|
| 87 |
+
"20": 19,
|
| 88 |
+
"21": 20,
|
| 89 |
+
"22": 21,
|
| 90 |
+
"23": 22,
|
| 91 |
+
"24": 23,
|
| 92 |
+
"25": 24,
|
| 93 |
+
"27": 25,
|
| 94 |
+
"28": 26,
|
| 95 |
+
"31": 27,
|
| 96 |
+
"32": 28,
|
| 97 |
+
"33": 29,
|
| 98 |
+
"34": 30,
|
| 99 |
+
"35": 31,
|
| 100 |
+
"36": 32,
|
| 101 |
+
"37": 33,
|
| 102 |
+
"38": 34,
|
| 103 |
+
"39": 35,
|
| 104 |
+
"40": 36,
|
| 105 |
+
"41": 37,
|
| 106 |
+
"42": 38,
|
| 107 |
+
"43": 39,
|
| 108 |
+
"44": 40,
|
| 109 |
+
"46": 41,
|
| 110 |
+
"47": 42,
|
| 111 |
+
"48": 43,
|
| 112 |
+
"49": 44,
|
| 113 |
+
"50": 45,
|
| 114 |
+
"51": 46,
|
| 115 |
+
"52": 47,
|
| 116 |
+
"53": 48,
|
| 117 |
+
"54": 49,
|
| 118 |
+
"55": 50,
|
| 119 |
+
"56": 51,
|
| 120 |
+
"57": 52,
|
| 121 |
+
"58": 53,
|
| 122 |
+
"59": 54,
|
| 123 |
+
"60": 55,
|
| 124 |
+
"61": 56,
|
| 125 |
+
"62": 57,
|
| 126 |
+
"63": 58,
|
| 127 |
+
"64": 59,
|
| 128 |
+
"65": 60,
|
| 129 |
+
"67": 61,
|
| 130 |
+
"70": 62,
|
| 131 |
+
"72": 63,
|
| 132 |
+
"73": 64,
|
| 133 |
+
"74": 65,
|
| 134 |
+
"75": 66,
|
| 135 |
+
"76": 67,
|
| 136 |
+
"77": 68,
|
| 137 |
+
"78": 69,
|
| 138 |
+
"79": 70,
|
| 139 |
+
"80": 71,
|
| 140 |
+
"81": 72,
|
| 141 |
+
"82": 73,
|
| 142 |
+
"84": 74,
|
| 143 |
+
"85": 75,
|
| 144 |
+
"86": 76,
|
| 145 |
+
"87": 77,
|
| 146 |
+
"88": 78,
|
| 147 |
+
"89": 79,
|
| 148 |
+
"90": 80,
|
| 149 |
+
}
|
| 150 |
+
self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
|
| 151 |
+
self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
|
| 152 |
+
|
| 153 |
+
def origin2compact(self, idx):
|
| 154 |
+
return self.origin2compact_mapper[int(idx)]
|
| 155 |
+
|
| 156 |
+
def compact2origin(self, idx):
|
| 157 |
+
return self.compact2origin_mapper[int(idx)]
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def to_device(item, device):
|
| 161 |
+
if isinstance(item, torch.Tensor):
|
| 162 |
+
return item.to(device)
|
| 163 |
+
elif isinstance(item, list):
|
| 164 |
+
return [to_device(i, device) for i in item]
|
| 165 |
+
elif isinstance(item, dict):
|
| 166 |
+
return {k: to_device(v, device) for k, v in item.items()}
|
| 167 |
+
else:
|
| 168 |
+
raise NotImplementedError(
|
| 169 |
+
"Call Shilong if you use other containers! type: {}".format(type(item))
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
#
|
| 174 |
+
def get_gaussian_mean(x, axis, other_axis, softmax=True):
|
| 175 |
+
"""
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
x (float): Input images(BxCxHxW)
|
| 179 |
+
axis (int): The index for weighted mean
|
| 180 |
+
other_axis (int): The other index
|
| 181 |
+
|
| 182 |
+
Returns: weighted index for axis, BxC
|
| 183 |
+
|
| 184 |
+
"""
|
| 185 |
+
mat2line = torch.sum(x, axis=other_axis)
|
| 186 |
+
# mat2line = mat2line / mat2line.mean() * 10
|
| 187 |
+
if softmax:
|
| 188 |
+
u = torch.softmax(mat2line, axis=2)
|
| 189 |
+
else:
|
| 190 |
+
u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
|
| 191 |
+
size = x.shape[axis]
|
| 192 |
+
ind = torch.linspace(0, 1, size).to(x.device)
|
| 193 |
+
batch = x.shape[0]
|
| 194 |
+
channel = x.shape[1]
|
| 195 |
+
index = ind.repeat([batch, channel, 1])
|
| 196 |
+
mean_position = torch.sum(index * u, dim=2)
|
| 197 |
+
return mean_position
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def get_expected_points_from_map(hm, softmax=True):
|
| 201 |
+
"""get_gaussian_map_from_points
|
| 202 |
+
B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
|
| 203 |
+
softargmax function
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
hm (float): Input images(BxCxHxW)
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
weighted index for axis, BxCx2. float between 0 and 1.
|
| 210 |
+
|
| 211 |
+
"""
|
| 212 |
+
# hm = 10*hm
|
| 213 |
+
B, C, H, W = hm.shape
|
| 214 |
+
y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
|
| 215 |
+
x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
|
| 216 |
+
# return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
|
| 217 |
+
return torch.stack([x_mean, y_mean], dim=2)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
# Positional encoding (section 5.1)
|
| 221 |
+
# borrow from nerf
|
| 222 |
+
class Embedder:
|
| 223 |
+
def __init__(self, **kwargs):
|
| 224 |
+
self.kwargs = kwargs
|
| 225 |
+
self.create_embedding_fn()
|
| 226 |
+
|
| 227 |
+
def create_embedding_fn(self):
|
| 228 |
+
embed_fns = []
|
| 229 |
+
d = self.kwargs["input_dims"]
|
| 230 |
+
out_dim = 0
|
| 231 |
+
if self.kwargs["include_input"]:
|
| 232 |
+
embed_fns.append(lambda x: x)
|
| 233 |
+
out_dim += d
|
| 234 |
+
|
| 235 |
+
max_freq = self.kwargs["max_freq_log2"]
|
| 236 |
+
N_freqs = self.kwargs["num_freqs"]
|
| 237 |
+
|
| 238 |
+
if self.kwargs["log_sampling"]:
|
| 239 |
+
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
|
| 240 |
+
else:
|
| 241 |
+
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
|
| 242 |
+
|
| 243 |
+
for freq in freq_bands:
|
| 244 |
+
for p_fn in self.kwargs["periodic_fns"]:
|
| 245 |
+
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
|
| 246 |
+
out_dim += d
|
| 247 |
+
|
| 248 |
+
self.embed_fns = embed_fns
|
| 249 |
+
self.out_dim = out_dim
|
| 250 |
+
|
| 251 |
+
def embed(self, inputs):
|
| 252 |
+
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def get_embedder(multires, i=0):
|
| 256 |
+
import torch.nn as nn
|
| 257 |
+
|
| 258 |
+
if i == -1:
|
| 259 |
+
return nn.Identity(), 3
|
| 260 |
+
|
| 261 |
+
embed_kwargs = {
|
| 262 |
+
"include_input": True,
|
| 263 |
+
"input_dims": 3,
|
| 264 |
+
"max_freq_log2": multires - 1,
|
| 265 |
+
"num_freqs": multires,
|
| 266 |
+
"log_sampling": True,
|
| 267 |
+
"periodic_fns": [torch.sin, torch.cos],
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
embedder_obj = Embedder(**embed_kwargs)
|
| 271 |
+
embed = lambda x, eo=embedder_obj: eo.embed(x)
|
| 272 |
+
return embed, embedder_obj.out_dim
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class APOPMeter:
|
| 276 |
+
def __init__(self) -> None:
|
| 277 |
+
self.tp = 0
|
| 278 |
+
self.fp = 0
|
| 279 |
+
self.tn = 0
|
| 280 |
+
self.fn = 0
|
| 281 |
+
|
| 282 |
+
def update(self, pred, gt):
|
| 283 |
+
"""
|
| 284 |
+
Input:
|
| 285 |
+
pred, gt: Tensor()
|
| 286 |
+
"""
|
| 287 |
+
assert pred.shape == gt.shape
|
| 288 |
+
self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
|
| 289 |
+
self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
|
| 290 |
+
self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
|
| 291 |
+
self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
|
| 292 |
+
|
| 293 |
+
def update_cm(self, tp, fp, tn, fn):
|
| 294 |
+
self.tp += tp
|
| 295 |
+
self.fp += fp
|
| 296 |
+
self.tn += tn
|
| 297 |
+
self.tn += fn
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def inverse_sigmoid(x, eps=1e-5):
|
| 301 |
+
x = x.clamp(min=0, max=1)
|
| 302 |
+
x1 = x.clamp(min=eps)
|
| 303 |
+
x2 = (1 - x).clamp(min=eps)
|
| 304 |
+
return torch.log(x1 / x2)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_raw_dict(args):
|
| 308 |
+
"""
|
| 309 |
+
return the dicf contained in args.
|
| 310 |
+
|
| 311 |
+
e.g:
|
| 312 |
+
>>> with open(path, 'w') as f:
|
| 313 |
+
json.dump(get_raw_dict(args), f, indent=2)
|
| 314 |
+
"""
|
| 315 |
+
if isinstance(args, argparse.Namespace):
|
| 316 |
+
return vars(args)
|
| 317 |
+
elif isinstance(args, dict):
|
| 318 |
+
return args
|
| 319 |
+
elif isinstance(args, SLConfig):
|
| 320 |
+
return args._cfg_dict
|
| 321 |
+
else:
|
| 322 |
+
raise NotImplementedError("Unknown type {}".format(type(args)))
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def stat_tensors(tensor):
|
| 326 |
+
assert tensor.dim() == 1
|
| 327 |
+
tensor_sm = tensor.softmax(0)
|
| 328 |
+
entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
|
| 329 |
+
|
| 330 |
+
return {
|
| 331 |
+
"max": tensor.max(),
|
| 332 |
+
"min": tensor.min(),
|
| 333 |
+
"mean": tensor.mean(),
|
| 334 |
+
"var": tensor.var(),
|
| 335 |
+
"std": tensor.var() ** 0.5,
|
| 336 |
+
"entropy": entropy,
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class NiceRepr:
|
| 341 |
+
"""Inherit from this class and define ``__nice__`` to "nicely" print your
|
| 342 |
+
objects.
|
| 343 |
+
|
| 344 |
+
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
|
| 345 |
+
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
|
| 346 |
+
If the inheriting class has a ``__len__``, method then the default
|
| 347 |
+
``__nice__`` method will return its length.
|
| 348 |
+
|
| 349 |
+
Example:
|
| 350 |
+
>>> class Foo(NiceRepr):
|
| 351 |
+
... def __nice__(self):
|
| 352 |
+
... return 'info'
|
| 353 |
+
>>> foo = Foo()
|
| 354 |
+
>>> assert str(foo) == '<Foo(info)>'
|
| 355 |
+
>>> assert repr(foo).startswith('<Foo(info) at ')
|
| 356 |
+
|
| 357 |
+
Example:
|
| 358 |
+
>>> class Bar(NiceRepr):
|
| 359 |
+
... pass
|
| 360 |
+
>>> bar = Bar()
|
| 361 |
+
>>> import pytest
|
| 362 |
+
>>> with pytest.warns(None) as record:
|
| 363 |
+
>>> assert 'object at' in str(bar)
|
| 364 |
+
>>> assert 'object at' in repr(bar)
|
| 365 |
+
|
| 366 |
+
Example:
|
| 367 |
+
>>> class Baz(NiceRepr):
|
| 368 |
+
... def __len__(self):
|
| 369 |
+
... return 5
|
| 370 |
+
>>> baz = Baz()
|
| 371 |
+
>>> assert str(baz) == '<Baz(5)>'
|
| 372 |
+
"""
|
| 373 |
+
|
| 374 |
+
def __nice__(self):
|
| 375 |
+
"""str: a "nice" summary string describing this module"""
|
| 376 |
+
if hasattr(self, "__len__"):
|
| 377 |
+
# It is a common pattern for objects to use __len__ in __nice__
|
| 378 |
+
# As a convenience we define a default __nice__ for these objects
|
| 379 |
+
return str(len(self))
|
| 380 |
+
else:
|
| 381 |
+
# In all other cases force the subclass to overload __nice__
|
| 382 |
+
raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
|
| 383 |
+
|
| 384 |
+
def __repr__(self):
|
| 385 |
+
"""str: the string of the module"""
|
| 386 |
+
try:
|
| 387 |
+
nice = self.__nice__()
|
| 388 |
+
classname = self.__class__.__name__
|
| 389 |
+
return f"<{classname}({nice}) at {hex(id(self))}>"
|
| 390 |
+
except NotImplementedError as ex:
|
| 391 |
+
warnings.warn(str(ex), category=RuntimeWarning)
|
| 392 |
+
return object.__repr__(self)
|
| 393 |
+
|
| 394 |
+
def __str__(self):
|
| 395 |
+
"""str: the string of the module"""
|
| 396 |
+
try:
|
| 397 |
+
classname = self.__class__.__name__
|
| 398 |
+
nice = self.__nice__()
|
| 399 |
+
return f"<{classname}({nice})>"
|
| 400 |
+
except NotImplementedError as ex:
|
| 401 |
+
warnings.warn(str(ex), category=RuntimeWarning)
|
| 402 |
+
return object.__repr__(self)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def ensure_rng(rng=None):
|
| 406 |
+
"""Coerces input into a random number generator.
|
| 407 |
+
|
| 408 |
+
If the input is None, then a global random state is returned.
|
| 409 |
+
|
| 410 |
+
If the input is a numeric value, then that is used as a seed to construct a
|
| 411 |
+
random state. Otherwise the input is returned as-is.
|
| 412 |
+
|
| 413 |
+
Adapted from [1]_.
|
| 414 |
+
|
| 415 |
+
Args:
|
| 416 |
+
rng (int | numpy.random.RandomState | None):
|
| 417 |
+
if None, then defaults to the global rng. Otherwise this can be an
|
| 418 |
+
integer or a RandomState class
|
| 419 |
+
Returns:
|
| 420 |
+
(numpy.random.RandomState) : rng -
|
| 421 |
+
a numpy random number generator
|
| 422 |
+
|
| 423 |
+
References:
|
| 424 |
+
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
if rng is None:
|
| 428 |
+
rng = np.random.mtrand._rand
|
| 429 |
+
elif isinstance(rng, int):
|
| 430 |
+
rng = np.random.RandomState(rng)
|
| 431 |
+
else:
|
| 432 |
+
rng = rng
|
| 433 |
+
return rng
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def random_boxes(num=1, scale=1, rng=None):
|
| 437 |
+
"""Simple version of ``kwimage.Boxes.random``
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
|
| 441 |
+
|
| 442 |
+
References:
|
| 443 |
+
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
|
| 444 |
+
|
| 445 |
+
Example:
|
| 446 |
+
>>> num = 3
|
| 447 |
+
>>> scale = 512
|
| 448 |
+
>>> rng = 0
|
| 449 |
+
>>> boxes = random_boxes(num, scale, rng)
|
| 450 |
+
>>> print(boxes)
|
| 451 |
+
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
|
| 452 |
+
[216.9113, 330.6978, 224.0446, 456.5878],
|
| 453 |
+
[405.3632, 196.3221, 493.3953, 270.7942]])
|
| 454 |
+
"""
|
| 455 |
+
rng = ensure_rng(rng)
|
| 456 |
+
|
| 457 |
+
tlbr = rng.rand(num, 4).astype(np.float32)
|
| 458 |
+
|
| 459 |
+
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
|
| 460 |
+
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
|
| 461 |
+
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
|
| 462 |
+
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
|
| 463 |
+
|
| 464 |
+
tlbr[:, 0] = tl_x * scale
|
| 465 |
+
tlbr[:, 1] = tl_y * scale
|
| 466 |
+
tlbr[:, 2] = br_x * scale
|
| 467 |
+
tlbr[:, 3] = br_y * scale
|
| 468 |
+
|
| 469 |
+
boxes = torch.from_numpy(tlbr)
|
| 470 |
+
return boxes
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
class ModelEma(torch.nn.Module):
|
| 474 |
+
def __init__(self, model, decay=0.9997, device=None):
|
| 475 |
+
super(ModelEma, self).__init__()
|
| 476 |
+
# make a copy of the model for accumulating moving average of weights
|
| 477 |
+
self.module = deepcopy(model)
|
| 478 |
+
self.module.eval()
|
| 479 |
+
|
| 480 |
+
# import ipdb; ipdb.set_trace()
|
| 481 |
+
|
| 482 |
+
self.decay = decay
|
| 483 |
+
self.device = device # perform ema on different device from model if set
|
| 484 |
+
if self.device is not None:
|
| 485 |
+
self.module.to(device=device)
|
| 486 |
+
|
| 487 |
+
def _update(self, model, update_fn):
|
| 488 |
+
with torch.no_grad():
|
| 489 |
+
for ema_v, model_v in zip(
|
| 490 |
+
self.module.state_dict().values(), model.state_dict().values()
|
| 491 |
+
):
|
| 492 |
+
if self.device is not None:
|
| 493 |
+
model_v = model_v.to(device=self.device)
|
| 494 |
+
ema_v.copy_(update_fn(ema_v, model_v))
|
| 495 |
+
|
| 496 |
+
def update(self, model):
|
| 497 |
+
self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
|
| 498 |
+
|
| 499 |
+
def set(self, model):
|
| 500 |
+
self._update(model, update_fn=lambda e, m: m)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class BestMetricSingle:
|
| 504 |
+
def __init__(self, init_res=0.0, better="large") -> None:
|
| 505 |
+
self.init_res = init_res
|
| 506 |
+
self.best_res = init_res
|
| 507 |
+
self.best_ep = -1
|
| 508 |
+
|
| 509 |
+
self.better = better
|
| 510 |
+
assert better in ["large", "small"]
|
| 511 |
+
|
| 512 |
+
def isbetter(self, new_res, old_res):
|
| 513 |
+
if self.better == "large":
|
| 514 |
+
return new_res > old_res
|
| 515 |
+
if self.better == "small":
|
| 516 |
+
return new_res < old_res
|
| 517 |
+
|
| 518 |
+
def update(self, new_res, ep):
|
| 519 |
+
if self.isbetter(new_res, self.best_res):
|
| 520 |
+
self.best_res = new_res
|
| 521 |
+
self.best_ep = ep
|
| 522 |
+
return True
|
| 523 |
+
return False
|
| 524 |
+
|
| 525 |
+
def __str__(self) -> str:
|
| 526 |
+
return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
|
| 527 |
+
|
| 528 |
+
def __repr__(self) -> str:
|
| 529 |
+
return self.__str__()
|
| 530 |
+
|
| 531 |
+
def summary(self) -> dict:
|
| 532 |
+
return {
|
| 533 |
+
"best_res": self.best_res,
|
| 534 |
+
"best_ep": self.best_ep,
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class BestMetricHolder:
|
| 539 |
+
def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
|
| 540 |
+
self.best_all = BestMetricSingle(init_res, better)
|
| 541 |
+
self.use_ema = use_ema
|
| 542 |
+
if use_ema:
|
| 543 |
+
self.best_ema = BestMetricSingle(init_res, better)
|
| 544 |
+
self.best_regular = BestMetricSingle(init_res, better)
|
| 545 |
+
|
| 546 |
+
def update(self, new_res, epoch, is_ema=False):
|
| 547 |
+
"""
|
| 548 |
+
return if the results is the best.
|
| 549 |
+
"""
|
| 550 |
+
if not self.use_ema:
|
| 551 |
+
return self.best_all.update(new_res, epoch)
|
| 552 |
+
else:
|
| 553 |
+
if is_ema:
|
| 554 |
+
self.best_ema.update(new_res, epoch)
|
| 555 |
+
return self.best_all.update(new_res, epoch)
|
| 556 |
+
else:
|
| 557 |
+
self.best_regular.update(new_res, epoch)
|
| 558 |
+
return self.best_all.update(new_res, epoch)
|
| 559 |
+
|
| 560 |
+
def summary(self):
|
| 561 |
+
if not self.use_ema:
|
| 562 |
+
return self.best_all.summary()
|
| 563 |
+
|
| 564 |
+
res = {}
|
| 565 |
+
res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
|
| 566 |
+
res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
|
| 567 |
+
res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
|
| 568 |
+
return res
|
| 569 |
+
|
| 570 |
+
def __repr__(self) -> str:
|
| 571 |
+
return json.dumps(self.summary(), indent=2)
|
| 572 |
+
|
| 573 |
+
def __str__(self) -> str:
|
| 574 |
+
return self.__repr__()
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
def targets_to(targets: List[Dict[str, Any]], device):
|
| 578 |
+
"""Moves the target dicts to the given device."""
|
| 579 |
+
excluded_keys = [
|
| 580 |
+
"questionId",
|
| 581 |
+
"tokens_positive",
|
| 582 |
+
"strings_positive",
|
| 583 |
+
"tokens",
|
| 584 |
+
"dataset_name",
|
| 585 |
+
"sentence_id",
|
| 586 |
+
"original_img_id",
|
| 587 |
+
"nb_eval",
|
| 588 |
+
"task_id",
|
| 589 |
+
"original_id",
|
| 590 |
+
"token_span",
|
| 591 |
+
"caption",
|
| 592 |
+
"dataset_type",
|
| 593 |
+
]
|
| 594 |
+
return [
|
| 595 |
+
{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets
|
| 596 |
+
]
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def get_phrases_from_posmap(
|
| 600 |
+
posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255
|
| 601 |
+
):
|
| 602 |
+
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
|
| 603 |
+
if posmap.dim() == 1:
|
| 604 |
+
posmap[0: left_idx + 1] = False
|
| 605 |
+
posmap[right_idx:] = False
|
| 606 |
+
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
|
| 607 |
+
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
|
| 608 |
+
return tokenizer.decode(token_ids)
|
| 609 |
+
else:
|
| 610 |
+
raise NotImplementedError("posmap must be 1-dim")
|
groundingdino/util/visualizer.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
@File : visualizer.py
|
| 4 |
+
@Time : 2022/04/05 11:39:33
|
| 5 |
+
@Author : Shilong Liu
|
| 6 |
+
@Contact : [email protected]
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import datetime
|
| 10 |
+
import os
|
| 11 |
+
|
| 12 |
+
import cv2
|
| 13 |
+
import matplotlib.pyplot as plt
|
| 14 |
+
import numpy as np
|
| 15 |
+
import torch
|
| 16 |
+
from matplotlib import transforms
|
| 17 |
+
from matplotlib.collections import PatchCollection
|
| 18 |
+
from matplotlib.patches import Polygon
|
| 19 |
+
from pycocotools import mask as maskUtils
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def renorm(
|
| 23 |
+
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
| 24 |
+
) -> torch.FloatTensor:
|
| 25 |
+
# img: tensor(3,H,W) or tensor(B,3,H,W)
|
| 26 |
+
# return: same as img
|
| 27 |
+
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
|
| 28 |
+
if img.dim() == 3:
|
| 29 |
+
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
|
| 30 |
+
img.size(0),
|
| 31 |
+
str(img.size()),
|
| 32 |
+
)
|
| 33 |
+
img_perm = img.permute(1, 2, 0)
|
| 34 |
+
mean = torch.Tensor(mean)
|
| 35 |
+
std = torch.Tensor(std)
|
| 36 |
+
img_res = img_perm * std + mean
|
| 37 |
+
return img_res.permute(2, 0, 1)
|
| 38 |
+
else: # img.dim() == 4
|
| 39 |
+
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
|
| 40 |
+
img.size(1),
|
| 41 |
+
str(img.size()),
|
| 42 |
+
)
|
| 43 |
+
img_perm = img.permute(0, 2, 3, 1)
|
| 44 |
+
mean = torch.Tensor(mean)
|
| 45 |
+
std = torch.Tensor(std)
|
| 46 |
+
img_res = img_perm * std + mean
|
| 47 |
+
return img_res.permute(0, 3, 1, 2)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class ColorMap:
|
| 51 |
+
def __init__(self, basergb=[255, 255, 0]):
|
| 52 |
+
self.basergb = np.array(basergb)
|
| 53 |
+
|
| 54 |
+
def __call__(self, attnmap):
|
| 55 |
+
# attnmap: h, w. np.uint8.
|
| 56 |
+
# return: h, w, 4. np.uint8.
|
| 57 |
+
assert attnmap.dtype == np.uint8
|
| 58 |
+
h, w = attnmap.shape
|
| 59 |
+
res = self.basergb.copy()
|
| 60 |
+
res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3
|
| 61 |
+
attn1 = attnmap.copy()[..., None] # h, w, 1
|
| 62 |
+
res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)
|
| 63 |
+
return res
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def rainbow_text(x, y, ls, lc, **kw):
|
| 67 |
+
"""
|
| 68 |
+
Take a list of strings ``ls`` and colors ``lc`` and place them next to each
|
| 69 |
+
other, with text ls[i] being shown in color lc[i].
|
| 70 |
+
|
| 71 |
+
This example shows how to do both vertical and horizontal text, and will
|
| 72 |
+
pass all keyword arguments to plt.text, so you can set the font size,
|
| 73 |
+
family, etc.
|
| 74 |
+
"""
|
| 75 |
+
t = plt.gca().transData
|
| 76 |
+
fig = plt.gcf()
|
| 77 |
+
plt.show()
|
| 78 |
+
|
| 79 |
+
# horizontal version
|
| 80 |
+
for s, c in zip(ls, lc):
|
| 81 |
+
text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw)
|
| 82 |
+
text.draw(fig.canvas.get_renderer())
|
| 83 |
+
ex = text.get_window_extent()
|
| 84 |
+
t = transforms.offset_copy(text._transform, x=ex.width, units="dots")
|
| 85 |
+
|
| 86 |
+
# #vertical version
|
| 87 |
+
# for s,c in zip(ls,lc):
|
| 88 |
+
# text = plt.text(x,y," "+s+" ",color=c, transform=t,
|
| 89 |
+
# rotation=90,va='bottom',ha='center',**kw)
|
| 90 |
+
# text.draw(fig.canvas.get_renderer())
|
| 91 |
+
# ex = text.get_window_extent()
|
| 92 |
+
# t = transforms.offset_copy(text._transform, y=ex.height, units='dots')
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class COCOVisualizer:
|
| 96 |
+
def __init__(self, coco=None, tokenlizer=None) -> None:
|
| 97 |
+
self.coco = coco
|
| 98 |
+
|
| 99 |
+
def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"):
|
| 100 |
+
"""
|
| 101 |
+
img: tensor(3, H, W)
|
| 102 |
+
tgt: make sure they are all on cpu.
|
| 103 |
+
must have items: 'image_id', 'boxes', 'size'
|
| 104 |
+
"""
|
| 105 |
+
plt.figure(dpi=dpi)
|
| 106 |
+
plt.rcParams["font.size"] = "5"
|
| 107 |
+
ax = plt.gca()
|
| 108 |
+
img = renorm(img).permute(1, 2, 0)
|
| 109 |
+
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
|
| 110 |
+
# import ipdb; ipdb.set_trace()
|
| 111 |
+
ax.imshow(img)
|
| 112 |
+
|
| 113 |
+
self.addtgt(tgt)
|
| 114 |
+
|
| 115 |
+
if tgt is None:
|
| 116 |
+
image_id = 0
|
| 117 |
+
elif "image_id" not in tgt:
|
| 118 |
+
image_id = 0
|
| 119 |
+
else:
|
| 120 |
+
image_id = tgt["image_id"]
|
| 121 |
+
|
| 122 |
+
if caption is None:
|
| 123 |
+
savename = "{}/{}-{}.png".format(
|
| 124 |
+
savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
|
| 125 |
+
)
|
| 126 |
+
else:
|
| 127 |
+
savename = "{}/{}-{}-{}.png".format(
|
| 128 |
+
savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
|
| 129 |
+
)
|
| 130 |
+
print("savename: {}".format(savename))
|
| 131 |
+
os.makedirs(os.path.dirname(savename), exist_ok=True)
|
| 132 |
+
plt.savefig(savename)
|
| 133 |
+
plt.close()
|
| 134 |
+
|
| 135 |
+
def addtgt(self, tgt):
|
| 136 |
+
""" """
|
| 137 |
+
if tgt is None or not "boxes" in tgt:
|
| 138 |
+
ax = plt.gca()
|
| 139 |
+
|
| 140 |
+
if "caption" in tgt:
|
| 141 |
+
ax.set_title(tgt["caption"], wrap=True)
|
| 142 |
+
|
| 143 |
+
ax.set_axis_off()
|
| 144 |
+
return
|
| 145 |
+
|
| 146 |
+
ax = plt.gca()
|
| 147 |
+
H, W = tgt["size"]
|
| 148 |
+
numbox = tgt["boxes"].shape[0]
|
| 149 |
+
|
| 150 |
+
color = []
|
| 151 |
+
polygons = []
|
| 152 |
+
boxes = []
|
| 153 |
+
for box in tgt["boxes"].cpu():
|
| 154 |
+
unnormbbox = box * torch.Tensor([W, H, W, H])
|
| 155 |
+
unnormbbox[:2] -= unnormbbox[2:] / 2
|
| 156 |
+
[bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()
|
| 157 |
+
boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])
|
| 158 |
+
poly = [
|
| 159 |
+
[bbox_x, bbox_y],
|
| 160 |
+
[bbox_x, bbox_y + bbox_h],
|
| 161 |
+
[bbox_x + bbox_w, bbox_y + bbox_h],
|
| 162 |
+
[bbox_x + bbox_w, bbox_y],
|
| 163 |
+
]
|
| 164 |
+
np_poly = np.array(poly).reshape((4, 2))
|
| 165 |
+
polygons.append(Polygon(np_poly))
|
| 166 |
+
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
|
| 167 |
+
color.append(c)
|
| 168 |
+
|
| 169 |
+
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)
|
| 170 |
+
ax.add_collection(p)
|
| 171 |
+
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
|
| 172 |
+
ax.add_collection(p)
|
| 173 |
+
|
| 174 |
+
if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0:
|
| 175 |
+
assert (
|
| 176 |
+
len(tgt["strings_positive"]) == numbox
|
| 177 |
+
), f"{len(tgt['strings_positive'])} = {numbox}, "
|
| 178 |
+
for idx, strlist in enumerate(tgt["strings_positive"]):
|
| 179 |
+
cate_id = int(tgt["labels"][idx])
|
| 180 |
+
_string = str(cate_id) + ":" + " ".join(strlist)
|
| 181 |
+
bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
|
| 182 |
+
# ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
|
| 183 |
+
ax.text(
|
| 184 |
+
bbox_x,
|
| 185 |
+
bbox_y,
|
| 186 |
+
_string,
|
| 187 |
+
color="black",
|
| 188 |
+
bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if "box_label" in tgt:
|
| 192 |
+
assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, "
|
| 193 |
+
for idx, bl in enumerate(tgt["box_label"]):
|
| 194 |
+
_string = str(bl)
|
| 195 |
+
bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
|
| 196 |
+
# ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
|
| 197 |
+
ax.text(
|
| 198 |
+
bbox_x,
|
| 199 |
+
bbox_y,
|
| 200 |
+
_string,
|
| 201 |
+
color="black",
|
| 202 |
+
bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
if "caption" in tgt:
|
| 206 |
+
ax.set_title(tgt["caption"], wrap=True)
|
| 207 |
+
# plt.figure()
|
| 208 |
+
# rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(),
|
| 209 |
+
# ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])
|
| 210 |
+
|
| 211 |
+
if "attn" in tgt:
|
| 212 |
+
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
|
| 213 |
+
# import ipdb; ipdb.set_trace()
|
| 214 |
+
if isinstance(tgt["attn"], tuple):
|
| 215 |
+
tgt["attn"] = [tgt["attn"]]
|
| 216 |
+
for item in tgt["attn"]:
|
| 217 |
+
attn_map, basergb = item
|
| 218 |
+
attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)
|
| 219 |
+
attn_map = (attn_map * 255).astype(np.uint8)
|
| 220 |
+
cm = ColorMap(basergb)
|
| 221 |
+
heatmap = cm(attn_map)
|
| 222 |
+
ax.imshow(heatmap)
|
| 223 |
+
ax.set_axis_off()
|
| 224 |
+
|
| 225 |
+
def showAnns(self, anns, draw_bbox=False):
|
| 226 |
+
"""
|
| 227 |
+
Display the specified annotations.
|
| 228 |
+
:param anns (array of object): annotations to display
|
| 229 |
+
:return: None
|
| 230 |
+
"""
|
| 231 |
+
if len(anns) == 0:
|
| 232 |
+
return 0
|
| 233 |
+
if "segmentation" in anns[0] or "keypoints" in anns[0]:
|
| 234 |
+
datasetType = "instances"
|
| 235 |
+
elif "caption" in anns[0]:
|
| 236 |
+
datasetType = "captions"
|
| 237 |
+
else:
|
| 238 |
+
raise Exception("datasetType not supported")
|
| 239 |
+
if datasetType == "instances":
|
| 240 |
+
ax = plt.gca()
|
| 241 |
+
ax.set_autoscale_on(False)
|
| 242 |
+
polygons = []
|
| 243 |
+
color = []
|
| 244 |
+
for ann in anns:
|
| 245 |
+
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
|
| 246 |
+
if "segmentation" in ann:
|
| 247 |
+
if type(ann["segmentation"]) == list:
|
| 248 |
+
# polygon
|
| 249 |
+
for seg in ann["segmentation"]:
|
| 250 |
+
poly = np.array(seg).reshape((int(len(seg) / 2), 2))
|
| 251 |
+
polygons.append(Polygon(poly))
|
| 252 |
+
color.append(c)
|
| 253 |
+
else:
|
| 254 |
+
# mask
|
| 255 |
+
t = self.imgs[ann["image_id"]]
|
| 256 |
+
if type(ann["segmentation"]["counts"]) == list:
|
| 257 |
+
rle = maskUtils.frPyObjects(
|
| 258 |
+
[ann["segmentation"]], t["height"], t["width"]
|
| 259 |
+
)
|
| 260 |
+
else:
|
| 261 |
+
rle = [ann["segmentation"]]
|
| 262 |
+
m = maskUtils.decode(rle)
|
| 263 |
+
img = np.ones((m.shape[0], m.shape[1], 3))
|
| 264 |
+
if ann["iscrowd"] == 1:
|
| 265 |
+
color_mask = np.array([2.0, 166.0, 101.0]) / 255
|
| 266 |
+
if ann["iscrowd"] == 0:
|
| 267 |
+
color_mask = np.random.random((1, 3)).tolist()[0]
|
| 268 |
+
for i in range(3):
|
| 269 |
+
img[:, :, i] = color_mask[i]
|
| 270 |
+
ax.imshow(np.dstack((img, m * 0.5)))
|
| 271 |
+
if "keypoints" in ann and type(ann["keypoints"]) == list:
|
| 272 |
+
# turn skeleton into zero-based index
|
| 273 |
+
sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
|
| 274 |
+
kp = np.array(ann["keypoints"])
|
| 275 |
+
x = kp[0::3]
|
| 276 |
+
y = kp[1::3]
|
| 277 |
+
v = kp[2::3]
|
| 278 |
+
for sk in sks:
|
| 279 |
+
if np.all(v[sk] > 0):
|
| 280 |
+
plt.plot(x[sk], y[sk], linewidth=3, color=c)
|
| 281 |
+
plt.plot(
|
| 282 |
+
x[v > 0],
|
| 283 |
+
y[v > 0],
|
| 284 |
+
"o",
|
| 285 |
+
markersize=8,
|
| 286 |
+
markerfacecolor=c,
|
| 287 |
+
markeredgecolor="k",
|
| 288 |
+
markeredgewidth=2,
|
| 289 |
+
)
|
| 290 |
+
plt.plot(
|
| 291 |
+
x[v > 1],
|
| 292 |
+
y[v > 1],
|
| 293 |
+
"o",
|
| 294 |
+
markersize=8,
|
| 295 |
+
markerfacecolor=c,
|
| 296 |
+
markeredgecolor=c,
|
| 297 |
+
markeredgewidth=2,
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
if draw_bbox:
|
| 301 |
+
[bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
|
| 302 |
+
poly = [
|
| 303 |
+
[bbox_x, bbox_y],
|
| 304 |
+
[bbox_x, bbox_y + bbox_h],
|
| 305 |
+
[bbox_x + bbox_w, bbox_y + bbox_h],
|
| 306 |
+
[bbox_x + bbox_w, bbox_y],
|
| 307 |
+
]
|
| 308 |
+
np_poly = np.array(poly).reshape((4, 2))
|
| 309 |
+
polygons.append(Polygon(np_poly))
|
| 310 |
+
color.append(c)
|
| 311 |
+
|
| 312 |
+
# p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
|
| 313 |
+
# ax.add_collection(p)
|
| 314 |
+
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
|
| 315 |
+
ax.add_collection(p)
|
| 316 |
+
elif datasetType == "captions":
|
| 317 |
+
for ann in anns:
|
| 318 |
+
print(ann["caption"])
|
groundingdino/util/vl_utils.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
from typing import List
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
|
| 9 |
+
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j
|
| 10 |
+
Input:
|
| 11 |
+
- tokenized:
|
| 12 |
+
- input_ids: Tensor[1, ntokens]
|
| 13 |
+
- attention_mask: Tensor[1, ntokens]
|
| 14 |
+
- token_span: list with length num_boxes.
|
| 15 |
+
- each item: [start_idx, end_idx]
|
| 16 |
+
"""
|
| 17 |
+
positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)
|
| 18 |
+
for j, tok_list in enumerate(token_span):
|
| 19 |
+
for (beg, end) in tok_list:
|
| 20 |
+
beg_pos = tokenized.char_to_token(beg)
|
| 21 |
+
end_pos = tokenized.char_to_token(end - 1)
|
| 22 |
+
if beg_pos is None:
|
| 23 |
+
try:
|
| 24 |
+
beg_pos = tokenized.char_to_token(beg + 1)
|
| 25 |
+
if beg_pos is None:
|
| 26 |
+
beg_pos = tokenized.char_to_token(beg + 2)
|
| 27 |
+
except:
|
| 28 |
+
beg_pos = None
|
| 29 |
+
if end_pos is None:
|
| 30 |
+
try:
|
| 31 |
+
end_pos = tokenized.char_to_token(end - 2)
|
| 32 |
+
if end_pos is None:
|
| 33 |
+
end_pos = tokenized.char_to_token(end - 3)
|
| 34 |
+
except:
|
| 35 |
+
end_pos = None
|
| 36 |
+
if beg_pos is None or end_pos is None:
|
| 37 |
+
continue
|
| 38 |
+
|
| 39 |
+
assert beg_pos is not None and end_pos is not None
|
| 40 |
+
if os.environ.get("SHILONG_DEBUG_ONLY_ONE_POS", None) == "TRUE":
|
| 41 |
+
positive_map[j, beg_pos] = 1
|
| 42 |
+
break
|
| 43 |
+
else:
|
| 44 |
+
positive_map[j, beg_pos : end_pos + 1].fill_(1)
|
| 45 |
+
|
| 46 |
+
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def build_captions_and_token_span(cat_list, force_lowercase):
|
| 50 |
+
"""
|
| 51 |
+
Return:
|
| 52 |
+
captions: str
|
| 53 |
+
cat2tokenspan: dict
|
| 54 |
+
{
|
| 55 |
+
'dog': [[0, 2]],
|
| 56 |
+
...
|
| 57 |
+
}
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
cat2tokenspan = {}
|
| 61 |
+
captions = ""
|
| 62 |
+
for catname in cat_list:
|
| 63 |
+
class_name = catname
|
| 64 |
+
if force_lowercase:
|
| 65 |
+
class_name = class_name.lower()
|
| 66 |
+
if "/" in class_name:
|
| 67 |
+
class_name_list: List = class_name.strip().split("/")
|
| 68 |
+
class_name_list.append(class_name)
|
| 69 |
+
class_name: str = random.choice(class_name_list)
|
| 70 |
+
|
| 71 |
+
tokens_positive_i = []
|
| 72 |
+
subnamelist = [i.strip() for i in class_name.strip().split(" ")]
|
| 73 |
+
for subname in subnamelist:
|
| 74 |
+
if len(subname) == 0:
|
| 75 |
+
continue
|
| 76 |
+
if len(captions) > 0:
|
| 77 |
+
captions = captions + " "
|
| 78 |
+
strat_idx = len(captions)
|
| 79 |
+
end_idx = strat_idx + len(subname)
|
| 80 |
+
tokens_positive_i.append([strat_idx, end_idx])
|
| 81 |
+
captions = captions + subname
|
| 82 |
+
|
| 83 |
+
if len(tokens_positive_i) > 0:
|
| 84 |
+
captions = captions + " ."
|
| 85 |
+
cat2tokenspan[class_name] = tokens_positive_i
|
| 86 |
+
|
| 87 |
+
return captions, cat2tokenspan
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def build_id2posspan_and_caption(category_dict: dict):
|
| 91 |
+
"""Build id2pos_span and caption from category_dict
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
category_dict (dict): category_dict
|
| 95 |
+
"""
|
| 96 |
+
cat_list = [item["name"].lower() for item in category_dict]
|
| 97 |
+
id2catname = {item["id"]: item["name"].lower() for item in category_dict}
|
| 98 |
+
caption, cat2posspan = build_captions_and_token_span(cat_list, force_lowercase=True)
|
| 99 |
+
id2posspan = {catid: cat2posspan[catname] for catid, catname in id2catname.items()}
|
| 100 |
+
return id2posspan, caption
|