Spaces:
Sleeping
Sleeping
import torch | |
from PIL import Image | |
from torchvision.transforms import functional as TVTF | |
from .mafs import round_up_to_multiple | |
def prepare_image_for_inference(image_pil: Image.Image) -> torch.Tensor: | |
if image_pil.mode != 'RGB': | |
image_pil = image_pil.convert('RGB') | |
# Just being explicit, in case of environmental oddities or something. | |
image_pt = TVTF.to_tensor(image_pil) | |
image_pt = image_pt.to(device='cpu', dtype=torch.float32) | |
return image_pt | |
def pad_to_divisible(image_tensor: torch.Tensor, tile_size: int = 128): | |
c, h, w = image_tensor.shape | |
# If the dims are already divisible by the tile size, we're good. | |
if h % tile_size == 0 and w % tile_size == 0: | |
return image_tensor, 0, 0 | |
expanded_w = round_up_to_multiple(w, tile_size) | |
expanded_h = round_up_to_multiple(h, tile_size) | |
l, t, r, b = 0, 0, 0, 0 | |
# Distribute the padding evenly on all sides. | |
if expanded_w > w: | |
diff = expanded_w - w | |
l = diff // 2 | |
r = diff - l | |
if expanded_h > h: | |
diff = expanded_h - h | |
t = diff // 2 | |
b = diff - t | |
image_tensor = TVTF.pad(image_tensor, padding=[l, t, r, b], padding_mode='reflect') | |
return image_tensor, l, t | |
if __name__ == '__main__': | |
print('__main__ not supported in modules.') | |