import gradio as gr import torch import torchvision from diffusers import DiffusionPipeline import PIL.Image import numpy as np ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large") generator = torch.manual_seed(42) prompt = "A painting of a squirrel eating a burger" image = ldm([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=50) image_processed = image.cpu().permute(0, 2, 3, 1) image_processed = image_processed * 255. image_processed = image_processed.numpy().astype(np.uint8) image_pil = PIL.Image.fromarray(image_processed[0]) # save image image_pil.save("test.png") def greet(name): return "Hello " + name + "!!" iface = gr.Interface(fn=greet, inputs="text", outputs="text") iface.launch()