""" Misc functions, including distributed helpers. Mostly copy-paste from torchvision references. """ import os import io import subprocess import time from collections import defaultdict, deque import datetime from typing import Optional, List, Dict, Any import torch import torch.distributed as dist from torch import Tensor import functools _LOCAL_PROCESS_GROUP = None @functools.lru_cache() def _get_global_gloo_group(): """ Return a process group based on gloo backend, containing all the ranks The result is cached. """ if dist.get_backend() == "nccl": return dist.new_group(backend="gloo") return dist.group.WORLD # needed due to empty tensor bug in pytorch and torchvision 0.5 import torchvision # if float(torchvision.__version__[:3]) < 0.7: # from torchvision.ops import _new_empty_tensor # from torchvision.ops.misc import _output_size class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) # copy-paste from mdetr: https://github.com/ashkamath/mdetr/blob/main/util/dist.py def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] cpu_group = None if os.getenv("MDETR_CPU_REDUCE") == "1": cpu_group = _get_global_gloo_group() buffer = io.BytesIO() torch.save(data, buffer) data_view = buffer.getbuffer() device = "cuda" if cpu_group is None else "cpu" tensor = torch.ByteTensor(data_view).to(device) # obtain Tensor size of each rank local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long) size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)] if cpu_group is None: dist.all_gather(size_list, local_size) else: print("gathering on cpu") dist.all_gather(size_list, local_size, group=cpu_group) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) assert isinstance(local_size.item(), int) local_size = int(local_size.item()) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device)) if local_size != max_size: padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device) tensor = torch.cat((tensor, padding), dim=0) if cpu_group is None: dist.all_gather(tensor_list, tensor) else: dist.all_gather(tensor_list, tensor, group=cpu_group) data_list = [] for size, tensor in zip(size_list, tensor_list): tensor = torch.split(tensor, [size, max_size - size], dim=0)[0] buffer = io.BytesIO(tensor.cpu().numpy()) obj = torch.load(buffer) data_list.append(obj) return data_list def reduce_dict(input_dict, average=True): """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' if torch.cuda.is_available(): log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}', 'max mem: {memory:.0f}' ]) else: log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ]) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) def get_sha(): cwd = os.path.dirname(os.path.abspath(__file__)) def _run(command): return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() sha = 'N/A' diff = "clean" branch = 'N/A' try: sha = _run(['git', 'rev-parse', 'HEAD']) subprocess.check_output(['git', 'diff'], cwd=cwd) diff = _run(['git', 'diff-index', 'HEAD']) diff = "has uncommited changes" if diff else "clean" branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) except Exception: pass message = f"sha: {sha}, status: {diff}, branch: {branch}" return message def collate_fn(batch): # batch: imgs, targets batch = list(zip(*batch)) batch[0] = nested_tensor_from_videos_list(batch[0], size_divisibility=32) # batch[0]: samples: NestedTensor(tensor, mask) # tensor: [B, T, C, H, W], mask: [B, T, H, W] # batch[1]: targets: list[dict] return tuple(batch) def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: # (C, H, W) for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def nested_tensor_from_tensor_list(tensor_list: List[Tensor], size_divisibility=1, split=True): """ This function receives a list of image tensors and returns a NestedTensor of the padded images, along with their padding masks (true for padding areas, false otherwise). """ # TODO make this more general # if image tensor is stacked as [T*3, H, W], then use split if split: tensor_list = [tensor.split(3,dim=0) for tensor in tensor_list] tensor_list = [item for sublist in tensor_list for item in sublist] # list[tensor], length = batch_size x time if tensor_list[0].ndim == 3: # TODO make it support different-sized images max_size = _max_by_axis([list(img.shape) for img in tensor_list]) if size_divisibility > 1: # so that the mask dowmsample can be matched stride = size_divisibility # the last two dims are [H, W], both subject to divisibility requirement max_size[-2] = (max_size[-2] + (stride - 1)) // stride * stride max_size[-1] = (max_size[-1] + (stride - 1)) // stride * stride # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) batch_shape = [len(tensor_list)] + max_size b, c, h, w = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], :img.shape[2]] = False # valid locations else: raise ValueError('not supported') return NestedTensor(tensor, mask) def nested_tensor_from_videos_list(videos_list: List[Tensor], size_divisibility=1): """ This function receives a list of videos (each of shape [T, C, H, W]) and returns a NestedTensor of the padded videos (shape [B, T, C, PH, PW], along with their padding masks (true for padding areas, false otherwise, of shape [B, T, PH, PW]. """ max_size = _max_by_axis([list(img.shape) for img in videos_list]) if size_divisibility > 1: # so that the mask dowmsample can be matched stride = size_divisibility # the last two dims are [H, W], both subject to divisibility requirement max_size[-2] = (max_size[-2] + (stride - 1)) // stride * stride max_size[-1] = (max_size[-1] + (stride - 1)) // stride * stride padded_batch_shape = [len(videos_list)] + max_size b, t, c, h, w = padded_batch_shape dtype = videos_list[0].dtype device = videos_list[0].device padded_videos = torch.zeros(padded_batch_shape, dtype=dtype, device=device) videos_pad_masks = torch.ones((b, t, h, w), dtype=torch.bool, device=device) for vid_frames, pad_vid_frames, vid_pad_m in zip(videos_list, padded_videos, videos_pad_masks): pad_vid_frames[:vid_frames.shape[0], :, :vid_frames.shape[2], :vid_frames.shape[3]].copy_(vid_frames) vid_pad_m[:vid_frames.shape[0], :vid_frames.shape[2], :vid_frames.shape[3]] = False return NestedTensor(padded_videos, videos_pad_masks) class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): # type: (Device) -> NestedTensor # noqa cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: assert mask is not None cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs, _use_new_zipfile_serialization=False) def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) args.dist_url = 'env://' os.environ['LOCAL_SIZE'] = str(torch.cuda.device_count()) elif 'SLURM_PROCID' in os.environ: proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() addr = subprocess.getoutput( 'scontrol show hostname {} | head -n1'.format(node_list)) os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', '29500') os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['RANK'] = str(proc_id) os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) os.environ['LOCAL_SIZE'] = str(num_gpus) args.dist_url = 'env://' args.world_size = ntasks args.rank = proc_id args.gpu = proc_id % num_gpus else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0) @torch.no_grad() def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" if target.numel() == 0: return [torch.zeros([], device=output.device)] maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor """ Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. """ # if float(torchvision.__version__[:3]) < 0.7: # if input.numel() > 0: # return torch.nn.functional.interpolate( # input, size, scale_factor, mode, align_corners # ) # output_shape = _output_size(2, input, size, scale_factor) # output_shape = list(input.shape[:-2]) + list(output_shape) # return _new_empty_tensor(input, output_shape) # else: # return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) def targets_to(targets: List[Dict[str, Any]], device): """Moves the target dicts to the given device. excluded_keys = [ "questionId", "tokens_positive", "tokens", "dataset_name", "sentence_id", "original_img_id", "nb_eval", "task_id", "original_id", ] """ if "dataset_name" in targets[0]: # for ["refcoco", "refcoco+", "refcocog"] evaluation return [{k: v.to(device) for k, v in t.items() if k not in ["caption", "dataset_name", "original_id"]} for t in targets] else: return [{k: v.to(device) for k, v in t.items() if k not in ["caption", "dataset_name", "original_id", "image_id"]} for t in targets] def get_total_grad_norm(parameters, norm_type=2): parameters = list(filter(lambda p: p.grad is not None, parameters)) norm_type = float(norm_type) device = parameters[0].grad.device total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1/x2)