import os os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False" os.environ["TOKENIZERS_PARALLELISM"] = "true" import numpy as np import gradio as gr import torch from PIL import Image from omegaconf import OmegaConf from transformers import AutoTokenizer import torch.nn.functional as F from transformers import CLIPImageProcessor import sys sys.path.insert(0, ".") from training import conversation as conversation_lib from prompting_utils import UniversalPrompting, create_attention_mask_predict_next, create_attention_mask_for_mmu_vit from training_utils import image_transform from models import Showo, MAGVITv2, get_mask_chedule, CLIPVisionTower device = torch.device("cuda" if torch.cuda.is_available() else "cpu") conversation_lib.default_conversation = conversation_lib.conv_templates["phi1.5"] SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. " \ "The assistant gives helpful, detailed, and polite answers to the user's questions." SYSTEM_PROMPT_LEN = 28 def load_discrete_checkpoint(): config = OmegaConf.load("configs/showo_demo.yaml") tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left") uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length, special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>", "<|v2v|>", "<|lvg|>"), ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob) vq_model = MAGVITv2() vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device) vq_model.requires_grad_(False) vq_model.eval() model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device) model.eval() mask_token_id = model.config.mask_token_id return config, uni_prompting, tokenizer, vq_model, model, mask_token_id config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen, mask_token_id = load_discrete_checkpoint() def load_continuous_checkpoint(): config = OmegaConf.load("configs/showo_demo_w_clip_vit.yaml") tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left") uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length, special_tokens=( "<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>", "<|v2v|>", "<|lvg|>"), ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob) vision_tower_name = "openai/clip-vit-large-patch14-336" vision_tower = CLIPVisionTower(vision_tower_name).to(device) clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower_name) model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device) model.eval() return config, uni_prompting, tokenizer, model, vision_tower, clip_image_processor config_mmu = uni_prompting_mmu = tokenizer_mmu = model_mmu = vision_tower = clip_image_processor = None def text_to_image_generation(input_text, guidance_scale, generation_timesteps): config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen prompts = [input_text] config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps image_tokens = torch.ones((len(prompts), config.model.showo.num_vq_tokens), dtype=torch.long, device=device) * mask_token_id input_ids, _ = uni_prompting((prompts, image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prompts), image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) images = vq_model.decode_code(gen_token_ids) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) return images[0] def text_guided_inpainting(input_text, inpainting_image, inpainting_mask, guidance_scale, generation_timesteps): config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen prompt = [input_text] config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps inpainting_image = image_transform(inpainting_image, resolution=config.dataset.params.resolution).to(device) inpainting_mask = image_transform(inpainting_mask, resolution=config.dataset.params.resolution, normalize=False) inpainting_image = inpainting_image.unsqueeze(0).repeat(config.training.batch_size, 1, 1, 1) inpainting_mask = inpainting_mask.unsqueeze(0).to(device) inpainting_mask = F.interpolate(inpainting_mask, size=config.dataset.params.resolution // 16, mode='bicubic') inpainting_mask = inpainting_mask.repeat(config.training.batch_size, 1, 1, 1) inpainting_mask[inpainting_mask < 0.5] = 0 inpainting_mask[inpainting_mask >= 0.5] = 1 inpainting_mask = inpainting_mask.reshape(config.training.batch_size, -1) inpainting_mask = inpainting_mask.to(torch.bool) inpainting_image_tokens = vq_model.get_code(inpainting_image) + len(uni_prompting.text_tokenizer) inpainting_image_tokens[inpainting_mask] = mask_token_id input_ids, _ = uni_prompting((prompt, inpainting_image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prompt), inpainting_image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) images = vq_model.decode_code(gen_token_ids) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) return images[0] def text_guided_extrapolation(input_img, input_text, left_ext, right_ext, guidance_scale, generation_timesteps): config, uni_prompting, tokenizer, vq_model, model = config_gen, uni_prompting_gen, tokenizer_gen, vq_model_gen, model_gen config.offset = 0 config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps extra_direction = ['right'] * int(right_ext) + ['left'] * int(left_ext) prompt = [input_text] * len(extra_direction) W = config.dataset.params.resolution // 16 for id, (prt, direction) in enumerate(zip(prompt, extra_direction)): prt = [prt] * config.training.batch_size if id == 0: # extrapolation_image = Image.open(config.image_path).convert("RGB") extrapolation_image = input_img extrapolation_image = image_transform(extrapolation_image, resolution=config.dataset.params.resolution).to(device) B, _, _ = extrapolation_image.shape extrapolation_image = extrapolation_image.unsqueeze(0) extrapolation_image_tokens = vq_model.get_code(extrapolation_image) + len(uni_prompting.text_tokenizer) extrapolation_image_tokens = extrapolation_image_tokens.reshape(1, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16) extrapolation_image_tokens = extrapolation_image_tokens.repeat(config.training.batch_size, 1, 1) else: extrapolation_image_tokens = gen_token_ids + len(uni_prompting.text_tokenizer) image_left_part = extrapolation_image_tokens[:, :, :-(W // 2 - config.offset)] - len( uni_prompting.text_tokenizer) image_right_part = extrapolation_image_tokens[:, :, W // 2 - config.offset:] - len(uni_prompting.text_tokenizer) image_up_part = extrapolation_image_tokens[:, :-(W // 2 - config.offset), :] - len(uni_prompting.text_tokenizer) image_down_part = extrapolation_image_tokens[:, W // 2 - config.offset:, :] - len(uni_prompting.text_tokenizer) if direction in ['left', 'right']: extrapolation_mask = torch.zeros((config.training.batch_size, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16 // 2 + config.offset), dtype=torch.int64, device=device) + mask_token_id else: extrapolation_mask = torch.zeros((config.training.batch_size, config.dataset.params.resolution // 16 // 2 + config.offset, config.dataset.params.resolution // 16), dtype=torch.int64, device=device) + mask_token_id if direction == 'left': extrapolation_image_tokens = torch.cat( [extrapolation_mask, extrapolation_image_tokens[:, :, :W // 2 - config.offset]], dim=-1) elif direction == 'right': extrapolation_image_tokens = torch.cat( [extrapolation_image_tokens[:, :, -(W // 2 - config.offset):], extrapolation_mask], dim=-1) elif direction == 'up': extrapolation_image_tokens = torch.cat( [extrapolation_mask, extrapolation_image_tokens[:, :W // 2 - config.offset, :]], dim=-2) else: extrapolation_image_tokens = torch.cat( [extrapolation_image_tokens[:, -(W // 2 - config.offset):, :], extrapolation_mask], dim=-2) extrapolation_image_tokens = extrapolation_image_tokens.reshape(config.training.batch_size, -1) input_ids, _ = uni_prompting((prt, extrapolation_image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prt), extrapolation_image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) gen_token_ids = gen_token_ids.reshape(config.training.batch_size, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16) if direction == 'left': gen_token_ids = torch.cat([gen_token_ids, image_right_part], dim=-1) elif direction == 'right': gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-1) elif direction == 'up': gen_token_ids = torch.cat([gen_token_ids, image_down_part], dim=-2) else: gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-2) _, h, w = gen_token_ids.shape gen_token_ids = gen_token_ids.reshape(config.training.batch_size, -1) images = vq_model.decode_code(gen_token_ids, shape=(h, w)) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) return images[0] def multimodal_understanding(input_img, input_text, chat_history): global config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu, vision_tower, clip_image_processor if model_mmu is None: config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu, vision_tower, clip_image_processor = load_continuous_checkpoint() config, uni_prompting, tokenizer, model = config_mmu, uni_prompting_mmu, tokenizer_mmu, model_mmu image_ori = input_img pixel_values = clip_image_processor.preprocess(image_ori, return_tensors="pt")["pixel_values"][0] batch_size = 1 question = input_text top_k = 1 # retain only the top_k most likely tokens, clamp others to have 0 probability conv = conversation_lib.default_conversation.copy() conv.append_message(conv.roles[0], question) conv.append_message(conv.roles[1], None) prompt_question = conv.get_prompt() question_input = [] question_input.append(prompt_question.strip()) input_ids_system = [uni_prompting.text_tokenizer(SYSTEM_PROMPT, return_tensors="pt", padding="longest").input_ids for _ in range(batch_size)] input_ids_system = torch.stack(input_ids_system, dim=0) assert input_ids_system.shape[-1] == 28 input_ids_system = input_ids_system.to(device) input_ids_system = input_ids_system[0] input_ids = [uni_prompting.text_tokenizer(prompt, return_tensors="pt", padding="longest").input_ids for prompt in question_input] input_ids = torch.stack(input_ids) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=uni_prompting.text_tokenizer.pad_token_id ) input_ids = torch.tensor(input_ids).to(device).squeeze(0) input_ids_llava = torch.cat([ (torch.ones(input_ids.shape[0], 1) *uni_prompting.sptids_dict['<|mmu|>']).to(device), input_ids_system, (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|soi|>']).to(device), # place your img embedding here (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device), input_ids, ], dim=1).long() images_embeddings = vision_tower(pixel_values[None]) images_embeddings = model.mm_projector(images_embeddings) text_embeddings = model.showo.model.embed_tokens(input_ids_llava) # Full input seq part1 = text_embeddings[:, :2 + SYSTEM_PROMPT_LEN, :] part2 = text_embeddings[:, 2 + SYSTEM_PROMPT_LEN:, :] input_embeddings = torch.cat((part1, images_embeddings, part2), dim=1) attention_mask_llava = create_attention_mask_for_mmu_vit(input_embeddings, system_prompt_len=SYSTEM_PROMPT_LEN) cont_toks_list = model.mmu_generate(input_embeddings=input_embeddings, attention_mask=attention_mask_llava[0].unsqueeze(0), max_new_tokens=100, top_k=top_k, # eot_token=uni_prompting.sptids_dict['<|eot|>'] eot_token=tokenizer.eos_token_id ) cont_toks_list = torch.stack(cont_toks_list).squeeze()[None] output_text = uni_prompting.text_tokenizer.batch_decode(cont_toks_list, skip_special_tokens=True) output_text = output_text[0].strip() chat_history.append((input_text, output_text)) return "", chat_history with gr.Blocks() as demo: gr.HTML("""
This is the official Gradio demo for the Show-o model, a unified model that can do multimodal understanding and generation.
Paper: Show-o: One Single Transformer To Unify Multimodal Understanding and Generation