"""General-purpose training script for image-to-image translation. This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). It first creates model, dataset, and visualizer given the option. It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. The script supports continue/resume training. Use '--continue_train' to resume your previous training. Example: Train a CycleGAN model: python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan Train a pix2pix model: python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA See options/base_options.py and options/train_options.py for more training options. See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md """ import time from options.train_options import TrainOptions from data import create_dataset from models import create_model from util.visualizer import Visualizer from options.test_options import TestOptions from tensorboardX import SummaryWriter import torchvision.utils as vutils import os import torch from skimage.metrics import structural_similarity as ssim from skimage.metrics import peak_signal_noise_ratio as psnr import numpy as np import torch.nn.functional as F import math def dice_score(pred, target, smooth=1e-5): pred_flat = pred.contiguous().view(-1) target_flat = target.contiguous().view(-1) intersection = (pred_flat * target_flat).sum() return (2. * intersection + smooth) / (pred_flat.sum() + target_flat.sum() + smooth) def iou_score(pred, target, smooth=1e-5): pred_flat = pred.contiguous().view(-1) target_flat = target.contiguous().view(-1) intersection = (pred_flat * target_flat).sum() union = pred_flat.sum() + target_flat.sum() - intersection return (intersection + smooth) / (union + smooth) def evaluate_model(model, test_loader, device,checkpoint_path, iteration): model.eval() # 将模型设置为评估模式 with torch.no_grad(): # 关闭梯度计算 ssim_scores, psnr_scores, dice_scores, iou_scores = [], [], [], [] Diff_hs = [] for batch in test_loader: model.set_input(batch) ground_truths, labels, normal_vert_labels, masks,CAMs,heights,x1,x2,slice_ratio = \ model.real_B,model.real_B_mask,model.normal_vert,model.mask,model.CAM,model.height,\ model.x1,model.x2,model.slice_ratio maxheight = model.maxheight ct_upper_list = [] ct_bottom_list = [] for i in range(ground_truths.shape[0]): ct_upper = ground_truths[i, :, :x1[i], :] ct_bottom = ground_truths[i, :, x2[i]:, :] ct_upper_list.append(ct_upper.unsqueeze(0)) # 添加批次维度以便合并 ct_bottom_list.append(ct_bottom.unsqueeze(0)) # 模型推理 CAM_temp = 1-CAMs inputs = model.real_A outputs = model.netG(inputs,masks,CAM_temp,slice_ratio) # 根据你的模型调整 coarse_seg_sigmoid,fine_seg_sigmoid, stage1, stage2, offset_flow,pred1_h,pred2_h = outputs # 根据你的输出调整 pred1_h = pred1_h.T*maxheight pred2_h = pred2_h.T*maxheight coarse_seg_binary = torch.where(coarse_seg_sigmoid>0.5,torch.ones_like(coarse_seg_sigmoid),torch.zeros_like(coarse_seg_sigmoid)) fine_seg_binary = torch.where(fine_seg_sigmoid>0.5,torch.ones_like(fine_seg_sigmoid),torch.zeros_like(fine_seg_sigmoid)) fake_B_raw_list = [] for i in range(stage2.size(0)): height = math.ceil(pred2_h[0][i].item()) # 获取当前图片的目标高度 if height, + epoch_start_time = time.time() # timer for entire epoch iter_data_time = time.time() # timer for data loading per iteration epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch model.update_learning_rate() # update learning rates in the beginning of every epoch. for i, data in enumerate(dataset): # inner loop within one epoch iter_start_time = time.time() # timer for computation per iteration if total_iters % opt.print_freq == 0: t_data = iter_start_time - iter_data_time total_iters += opt.batch_size epoch_iter += opt.batch_size model.set_input(data) # unpack data from dataset and apply preprocessing model.optimize_parameters() # calculate loss functions, get gradients, update network weights if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file save_result = total_iters % opt.update_html_freq == 0 model.compute_visuals() visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk losses = model.get_current_losses() t_comp = (time.time() - iter_start_time) / opt.batch_size visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) if opt.display_id > 0: visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' model.save_networks(save_suffix) iter_data_time = time.time() if epoch % opt.save_epoch_freq == 0: # cache our model every epochs print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) model.save_networks('latest') model.save_networks(epoch) # 经过15个epoch评估一次 if epoch % 15==0: avg_ssim, avg_psnr, avg_dice, avg_iou,avg_diffh = evaluate_model(model, dataset_test, "cuda:0",os.path.join(opt.checkpoints_dir, opt.name),epoch) # 记录评估指标 writer.add_scalar('Eval/SSIM', avg_ssim, epoch) writer.add_scalar('Eval/PSNR', avg_psnr, epoch) writer.add_scalar('Eval/Dice', avg_dice, epoch) writer.add_scalar('Eval/IoU', avg_iou, epoch) writer.add_scalar('Eval/DiffH', avg_diffh, epoch) print(f'epoch[{epoch}/{opt.n_epochs + opt.n_epochs_decay + 1}], SSIM: {avg_ssim}, PSNR: {avg_psnr}, Dice: {avg_dice}, IoU: {avg_iou}, Diffh: {avg_diffh}') print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))