Spaces:
Build error
Build error
File size: 1,076 Bytes
2e8d652 c82c2e4 bb3e285 5d2696c 2cec9ee 8b4dd82 c82c2e4 5d2696c bfe9080 5d2696c 68040ae d10fb3e ee8960e ab690cb 8b4dd82 817095a 6dd1bf4 d10fb3e 64150cb 817095a 68040ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import base64
import gradio as gr
import torch
import torchvision
from diffusers import DiffusionPipeline
import PIL.Image
import numpy as np
from io import BytesIO
ldm = DiffusionPipeline.from_pretrained("fusing/latent-diffusion-text2im-large")
generator = torch.manual_seed(42)
def greet(name):
#prompt = "A squirrel eating a burger"
prompt = name
image = ldm([prompt], generator=generator, eta=0.3, guidance_scale=6.0, num_inference_steps=50)
image_processed = image.cpu().permute(0, 2, 3, 1)
image_processed = image_processed * 255.
image_processed = image_processed.numpy().astype(np.uint8)
image_pil = PIL.Image.fromarray(image_processed[0])
# save image as buffer
buffered = BytesIO()
image_pil.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue())
print(img_str.decode('utf-8'))
return img_str.decode('utf-8')
#return "Gello " + prompt + "!!"
image = gr.Image(type="pil", label="Your result")
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch() |