import torch | |
from PIL import Image | |
import matplotlib.pyplot as plt | |
from extract.getim import load_image | |
from torchvision import transforms | |
import cv2 | |
import matplotlib.pyplot as plt | |
transform = transforms.Compose([ | |
transforms.ToTensor(), # 将numpy数组或PIL.Image读的图片转换成(C,H, W)的Tensor格式且/255归一化到[0,1.0]之间 | |
]) # 来自ImageNet的mean和variance | |
# fcontent = load_image("./ori/0.jpg",transform=None,shape=[512, 256]) | |
def show_cut(path, left, upper, right, lower): | |
""" | |
原图与所截区域相比较 | |
:param path: 图片路径 | |
:param left: 区块左上角位置的像素点离图片左边界的距离 | |
:param upper:区块左上角位置的像素点离图片上边界的距离 | |
:param right:区块右下角位置的像素点离图片左边界的距离 | |
:param lower:区块右下角位置的像素点离图片上边界的距离 | |
故需满足:lower > upper、right > left | |
""" | |
img = path | |
# print("This image's size: {}".format(img.size)) # (W, H) | |
# img.save("kkk.jpg") | |
# plt.figure("Image Contrast") | |
# | |
# plt.subplot(1, 2, 1) | |
# plt.title('origin') | |
# | |
# plt.imshow(img) | |
# plt.axis('off') | |
# | |
# box = (left, upper, right, lower) | |
# roi = img.crop(box) | |
# | |
# plt.subplot(1, 2, 2) | |
# plt.title('roi') | |
# plt.imshow(roi) | |
# plt.axis('off') | |
# plt.show() | |
def image_cut_save(path, left, upper, right, lower): | |
""" | |
所截区域图片保存 | |
:param path: 图片路径 | |
:param left: 区块左上角位置的像素点离图片左边界的距离 | |
:param upper:区块左上角位置的像素点离图片上边界的距离 | |
:param right:区块右下角位置的像素点离图片左边界的距离 | |
:param lower:区块右下角位置的像素点离图片上边界的距离 | |
故需满足:lower > upper、right > left | |
:param save_path: 所截图片保存位置 | |
""" | |
img = path # 打开图像 | |
box = (left, upper, right, lower) | |
roi = img.crop(box) | |
# roi.save(save_path) | |
return transform(roi) | |
# 保存截取的图片 | |
# def getcontent(fcontent,gap): | |
# Intgap=gap/9 | |
# a=torch.Tensor() | |
# for i in range(10): | |
# pic_path = fcontent | |
# # pic_save_dir_path = './out2/0-'+str(i)+".jpg" | |
# left, upper, right, lower = Intgap*i, 0, Intgap*i+gap, gap | |
# a=torch.cat([a,image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)],dim=1) | |
# return a | |
# def cobtwoten(image_path): | |
# fcontent = load_image(image_path, transform=None, shape=[512, 256]) | |
# Intgap = 256 | |
# a = torch.Tensor() | |
# for i in range(2): | |
# pic_path = fcontent | |
# #pic_save_dir_path = './out2/0-' + str(i) + ".jpg" | |
# left, upper, right, lower = Intgap * i, 0, Intgap * i + Intgap, Intgap | |
# a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1) | |
# return a.unsqueeze(0) | |
def cobtwoten(image_path): | |
fcontent = load_image(image_path, transform=None, shape=[256, 128]) | |
Intgap = 128/9 | |
a = torch.Tensor() | |
for i in range(10): | |
pic_path = fcontent | |
#pic_save_dir_path = './out2/0-' + str(i) + ".jpg" | |
left, upper, right, lower = Intgap * i, 0, Intgap * i + 128, 128 | |
a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1) | |
return a.unsqueeze(0) | |
def cobtwoten256(image_path): | |
fcontent = load_image(image_path, transform=None, shape=[512,256]) | |
Intgap = 256/9 | |
a = torch.Tensor() | |
for i in range(10): | |
pic_path = fcontent | |
#pic_save_dir_path = './out2/0-' + str(i) + ".jpg" | |
left, upper, right, lower = Intgap * i, 0, Intgap * i + 256, 256 | |
a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1) | |
return a.unsqueeze(0) | |
# | |
# fcontent = load_image("./extract/image/0.jpg",transform=None,shape=[256,128]) | |
# Intgap = 128 | |
# a = torch.Tensor() | |
# for i in range(2): | |
# pic_path = fcontent | |
# pic_save_dir_path = './out2/0-'+str(i)+".jpg" | |
# left, upper, right, lower = Intgap * i, 0, Intgap * i + 128, 128 | |
# a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower,pic_save_dir_path).unsqueeze(1)], dim=1) | |
# print(a.shape) | |
import numpy as np | |
def imgsave(image, path): | |
image = image.squeeze(0) | |
image = image.permute(1, 2, 0) | |
image_np = image.cpu().numpy()*255 | |
image_np = image_np.astype(np.uint8) | |
Image.fromarray(image_np).save(path) # 直接保存PIL图像对象 | |
# lik=["0"] | |
# for name in lik: | |
# videos=cobtwoten("./extract/image/0.jpg").permute(0, 2, 1, 3, 4) | |
# print(videos.shape) | |
# for i in range(10): | |
# frame = videos[:, i, :, :] | |
# imgsave(frame, "./out2/"+str(i)+".jpg") |