Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import requests | |
| import time | |
| import random | |
| import json | |
| import base64 | |
| import os | |
| from transformers import pipeline, set_seed | |
| from io import BytesIO | |
| import io | |
| import html | |
| import PIL | |
| from PIL import Image | |
| import re | |
| import cv2 | |
| import numpy as np | |
| def upscale_image(input_image, radio_input): | |
| upscale_factor = radio_input | |
| output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC) | |
| return [output_image], output_image | |
| def send_prompt(prompt): | |
| return prompt | |
| def query(payload, model): | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| url = "https://api-inference.huggingface.co/models/" | |
| API_URL = f"{url}{model}" | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| return response.content | |
| def hf_inference(prompt, negative, model, steps, sampler, guidance, width, height, seed, progress=gr.Progress()): | |
| try: | |
| progress(0, desc="Starting") | |
| images=[] | |
| time.sleep(2.5) | |
| progress(0.05) | |
| progress(0.25, desc="Generating") | |
| image_bytes = query(payload={ | |
| "inputs": f"{prompt}", | |
| "parameters": { | |
| "negative_prompt": f"{negative}", | |
| "num_inference_steps": steps, | |
| "guidance_scale": guidance, | |
| "width": width, "height": height, | |
| "seed": seed, | |
| }, | |
| }, model=model) | |
| progress(0.75, desc="Opening image") | |
| time.sleep(1) | |
| image = Image.open(io.BytesIO(image_bytes)) | |
| images.append(image) | |
| progress(0.99, desc="Sending image") | |
| time.sleep(0.5) | |
| return images | |
| except PIL.UnidentifiedImageError: | |
| gr.Warning("This model is not loaded now. Try others models.") | |
| class Prodia: | |
| def __init__(self, api_key, base=None): | |
| self.base = base or "https://api.prodia.com/v1" | |
| self.headers = { | |
| "X-Prodia-Key": api_key | |
| } | |
| def generate(self, params): | |
| response = self._post(f"{self.base}/sd/generate", params) | |
| return response.json() | |
| def transform(self, params): | |
| response = self._post(f"{self.base}/sd/transform", params) | |
| return response.json() | |
| def controlnet(self, params): | |
| response = self._post(f"{self.base}/sd/controlnet", params) | |
| return response.json() | |
| def get_job(self, job_id): | |
| response = self._get(f"{self.base}/job/{job_id}") | |
| return response.json() | |
| def wait(self, job): | |
| job_result = job | |
| while job_result['status'] not in ['succeeded', 'failed']: | |
| time.sleep(0.25) | |
| job_result = self.get_job(job['job']) | |
| return job_result | |
| def list_models(self): | |
| response = self._get(f"{self.base}/sd/models") | |
| return response.json() | |
| def list_samplers(self): | |
| response = self._get(f"{self.base}/sd/samplers") | |
| return response.json() | |
| def _post(self, url, params): | |
| headers = { | |
| **self.headers, | |
| "Content-Type": "application/json" | |
| } | |
| response = requests.post(url, headers=headers, data=json.dumps(params)) | |
| if response.status_code != 200: | |
| raise Exception(f"Bad Prodia Response: {response.status_code}") | |
| return response | |
| def _get(self, url): | |
| response = requests.get(url, headers=self.headers) | |
| if response.status_code != 200: | |
| raise Exception(f"Bad Prodia Response: {response.status_code}") | |
| return response | |
| def image_to_base64(image): | |
| # Convert the image to bytes | |
| buffered = BytesIO() | |
| image.save(buffered, format="PNG") # You can change format to PNG if needed | |
| # Encode the bytes to base64 | |
| img_str = base64.b64encode(buffered.getvalue()) | |
| return img_str.decode('utf-8') # Convert bytes to string | |
| def remove_id_and_ext(text): | |
| text = re.sub(r'\[.*\]$', '', text) | |
| extension = text[-12:].strip() | |
| if extension == "safetensors": | |
| text = text[:-13] | |
| elif extension == "ckpt": | |
| text = text[:-4] | |
| return text | |
| def get_data(text): | |
| results = {} | |
| patterns = { | |
| 'prompt': r'(.*)', | |
| 'negative_prompt': r'Negative prompt: (.*)', | |
| 'steps': r'Steps: (\d+),', | |
| 'seed': r'Seed: (\d+),', | |
| 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)', | |
| 'model': r'Model:\s*([^\s,]+)', | |
| 'cfg_scale': r'CFG scale:\s*([\d\.]+)', | |
| 'size': r'Size:\s*([0-9]+x[0-9]+)' | |
| } | |
| for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']: | |
| match = re.search(patterns[key], text) | |
| if match: | |
| results[key] = match.group(1) | |
| else: | |
| results[key] = None | |
| if results['size'] is not None: | |
| w, h = results['size'].split("x") | |
| results['w'] = w | |
| results['h'] = h | |
| else: | |
| results['w'] = None | |
| results['h'] = None | |
| return results | |
| def send_to_img2img_def(images): | |
| return images | |
| def send_to_txt2img(image): | |
| result = {tabs: gr.update(selected="t2i")} | |
| try: | |
| text = image.info['parameters'] | |
| data = get_data(text) | |
| result[prompt] = gr.update(value=data['prompt']) | |
| result[negative_prompt] = gr.update(value=data['negative_prompt']) if data[ | |
| 'negative_prompt'] is not None else gr.update() | |
| result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update() | |
| result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update() | |
| result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update() | |
| result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update() | |
| result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update() | |
| result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update() | |
| if model in model_names: | |
| result[model] = gr.update(value=model_names[model]) | |
| else: | |
| result[model] = gr.update() | |
| return result | |
| except Exception as e: | |
| print(e) | |
| return result | |
| prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY")) | |
| model_list = prodia_client.list_models() | |
| model_names = {} | |
| for model_name in model_list: | |
| name_without_ext = remove_id_and_ext(model_name) | |
| model_names[name_without_ext] = model_name | |
| def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, upscale, seed, progress=gr.Progress()): | |
| progress(0, desc="Starting") | |
| time.sleep(2.5) | |
| progress(0.25, desc="Generating") | |
| result = prodia_client.generate({ | |
| "prompt": prompt, | |
| "negative_prompt": negative_prompt, | |
| "model": model, | |
| "steps": steps, | |
| "sampler": sampler, | |
| "cfg_scale": cfg_scale, | |
| "width": width, | |
| "height": height, | |
| "upscale": upscale, | |
| "seed": seed | |
| }) | |
| progress(0.75, desc="Opening image") | |
| job = prodia_client.wait(result) | |
| progress(0.99, desc="Sending image") | |
| return [job["imageUrl"]], job["imageUrl"] | |
| def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, upscale, seed, progress=gr.Progress()): | |
| progress(0, desc="Starting") | |
| time.sleep(1.5) | |
| progress(0.10, desc="Uploading input image") | |
| time.sleep(1.5) | |
| progress(0.25, desc="Generating") | |
| result = prodia_client.transform({ | |
| "imageData": image_to_base64(input_image), | |
| "denoising_strength": denoising, | |
| "prompt": prompt, | |
| "negative_prompt": negative_prompt, | |
| "model": model, | |
| "steps": steps, | |
| "sampler": sampler, | |
| "cfg_scale": cfg_scale, | |
| "width": width, | |
| "height": height, | |
| "upscale": upscale, | |
| "seed": seed | |
| }) | |
| progress(0.75, desc="Opening image") | |
| job = prodia_client.wait(result) | |
| progress(0.99, desc="Sending image") | |
| time.sleep(0.5) | |
| return [job["imageUrl"]], job["imageUrl"] | |
| css = """ | |
| #generate { | |
| height: 100%; | |
| } | |
| """ | |
| def change_checkpoint(name, progress=gr.Progress()): | |
| progress(0, desc="Starting") | |
| time.sleep(0.5) | |
| progress(0.25, desc="Changing") | |
| time.sleep(1) | |
| progress(0.95, desc="Loading to GPU") | |
| time.sleep(0.5) | |
| progress(0.100, desc="Ready") | |
| return name | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Row(): | |
| with gr.Column(scale=6): | |
| model = gr.Dropdown(interactive=True, show_label=True, | |
| label="Stable Diffusion Checkpoint", choices=prodia_client.list_models()) | |
| with gr.Tabs() as tabs: | |
| with gr.Tab("txt2img", id='t2i'): | |
| with gr.Row(): | |
| with gr.Column(scale=6, min_width=600): | |
| prompt = gr.Textbox(placeholder="Prompt", show_label=False, lines=3) | |
| negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, | |
| value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation") | |
| with gr.Column(): | |
| text_button = gr.Button("Generate", variant='primary', elem_id="generate") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| with gr.Tab("Generation"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", | |
| choices=prodia_client.list_samplers()) | |
| with gr.Column(scale=1): | |
| steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1) | |
| with gr.Column(scale=1): | |
| upscale = gr.Checkbox(label="Upscale", value=False, interactive=True) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| width = gr.Slider(label="Width", maximum=1024, value=512, step=8) | |
| height = gr.Slider(label="Height", maximum=1024, value=512, step=8) | |
| with gr.Column(scale=1): | |
| batch_size = gr.Slider(label="Batch Size", maximum=1, value=1) | |
| batch_count = gr.Slider(label="Batch Count", maximum=1, value=1) | |
| cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1) | |
| seed = gr.Number(label="Seed", value=-1) | |
| with gr.Column(scale=2): | |
| image_output = gr.Gallery(show_label=False, rows=2, preview=True) | |
| send_to_img2img = gr.Button(value="Send OUTPUT IMAGE to img2img") | |
| send_to_png = gr.Button(value="Send OUTPUT IMAGE to PNG Info") | |
| past_url = gr.Textbox(visible=False, interactive=False) | |
| text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, upscale, | |
| seed], outputs=[image_output, past_url], concurrency_limit=64) | |
| with gr.Tab("img2img", id='i2i'): | |
| with gr.Row(): | |
| with gr.Column(scale=6, min_width=600): | |
| i2i_prompt = gr.Textbox(placeholder="Prompt", show_label=False, lines=3) | |
| i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, | |
| value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation") | |
| with gr.Column(): | |
| i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| with gr.Tab("Generation"): | |
| i2i_image_input = gr.Image(type="pil", interactive=True) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", | |
| choices=prodia_client.list_samplers()) | |
| with gr.Column(scale=1): | |
| i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1) | |
| with gr.Column(scale=1): | |
| i2i_upscale = gr.Checkbox(label="Upscale", value=False, interactive=True) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| i2i_width = gr.Slider(label="Width", maximum=1024, value=512, step=8) | |
| i2i_height = gr.Slider(label="Height", maximum=1024, value=512, step=8) | |
| with gr.Column(scale=1): | |
| i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1) | |
| i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1) | |
| i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1) | |
| i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1) | |
| i2i_seed = gr.Number(label="Seed", value=-1) | |
| with gr.Column(scale=2): | |
| i2i_image_output = gr.Gallery(show_label=False, rows=2, preview=True) | |
| send_to_png_i2i = gr.Button(value="Send INPUT IMAGE to PNG Info") | |
| i2i_past_url = gr.Textbox(visible=False, interactive=False) | |
| i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt, | |
| model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height, i2i_upscale, | |
| i2i_seed], outputs=[i2i_image_output, i2i_past_url], concurrency_limit=64) | |
| send_to_img2img.click(send_to_img2img_def, inputs=past_url, outputs=i2i_image_input) | |
| with gr.Tab("PNG Info"): | |
| def plaintext_to_html(text, classname=None): | |
| content = "<br>\n".join(html.escape(x) for x in text.split('\n')) | |
| return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>" | |
| def get_exif_data(path): | |
| image = Image.open(path) | |
| items = image.info | |
| info = '' | |
| for key, text in items.items(): | |
| info += f""" | |
| <div> | |
| <p><b>{plaintext_to_html(str(key))}</b></p> | |
| <p>{plaintext_to_html(str(text))}</p> | |
| </div> | |
| """.strip() + "\n" | |
| if len(info) == 0: | |
| message = "Nothing found in the image." | |
| info = f"<div><p>{message}<p></div>" | |
| return info | |
| with gr.Row(): | |
| with gr.Column(): | |
| image_input = gr.Image(type="filepath", interactive=True) | |
| png_button = gr.Button("Get Info", variant="primary") | |
| with gr.Row(): | |
| with gr.Column(): | |
| exif_output = gr.HTML(label="EXIF Data") | |
| send_to_txt2img_btn = gr.Button("Send PARAMETRS to txt2img") | |
| send_to_img2img_png = gr.Button("Send IMAGE to img2img") | |
| image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output) | |
| png_button.click(get_exif_data, inputs=[image_input], outputs=exif_output) | |
| send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt, | |
| steps, seed, model, sampler, | |
| width, height, cfg_scale], | |
| concurrency_limit=64) | |
| send_to_png.click(send_to_img2img_def, inputs=past_url, outputs=image_input) | |
| send_to_img2img_png.click(send_to_img2img_def, inputs=past_url, outputs=i2i_image_input) | |
| send_to_png_i2i.click(send_to_img2img_def, inputs=i2i_past_url, outputs=image_input) | |
| with gr.Tab("HuggingFace Inference"): | |
| with gr.Row(): | |
| gr.Markdown("Add your model from HF.co, enter model ID.") | |
| hf_model = gr.Dropdown(label="HuggingFace checkpoint", choices=["runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "dataautogpt3/OpenDalleV1.1", "CompVis/stable-diffusion-v1-4", "playgroundai/playground-v2-1024px-aesthetic", "prompthero/openjourney", "openskyml/dreamdrop-v1", "SG161222/Realistic_Vision_V1.4", "digiplay/AbsoluteReality_v1.8.1", "openskyml/dalle-3-xl", "Lykon/dreamshaper-7", "Pclanglais/Mickey-1928"], value="runwayml/stable-diffusion-v1-5", allow_custom_value=True, interactive=True) | |
| with gr.Row(): | |
| with gr.Column(scale=6, min_width=600): | |
| hf_prompt = gr.Textbox(placeholder="Prompt", show_label=False, lines=3) | |
| hf_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, | |
| value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation") | |
| with gr.Column(): | |
| hf_text_button = gr.Button("Generate with HF", variant='primary', elem_id="generate") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| with gr.Tab("Generation"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| hf_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| hf_width = gr.Slider(label="Width", maximum=1024, value=512, step=8) | |
| hf_height = gr.Slider(label="Height", maximum=1024, value=512, step=8) | |
| with gr.Column(scale=1): | |
| hf_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1) | |
| hf_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1) | |
| hf_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=8, step=1) | |
| hf_seed = gr.Number(label="Seed", value=-1) | |
| with gr.Column(scale=2): | |
| hf_image_output = gr.Gallery(show_label=False, preview=True, rows=2, allow_preview=True) | |
| #hf_send_to_img2img = gr.Button(value="Send to img2img") | |
| hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height, | |
| hf_seed], outputs=hf_image_output, concurrency_limit=64) | |
| with gr.Tab("Prompt Generator"): | |
| gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2') | |
| with open("ideas.txt", "r") as f: | |
| line = f.readlines() | |
| def prompt_gen(starting_text): | |
| seed = random.randint(100, 1000000) | |
| set_seed(seed) | |
| if starting_text == "": | |
| starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize() | |
| starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text) | |
| response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=1) | |
| response_list = [] | |
| for x in response: | |
| resp = x['generated_text'].strip() | |
| if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False: | |
| response_list.append(resp+'\n') | |
| response_end = "\n".join(response_list) | |
| response_end = re.sub('[^ ]+\.[^ ]+','', response_end) | |
| response_end = response_end.replace("<", "").replace(">", "") | |
| if response_end != "": | |
| return response_end | |
| idea = gr.Textbox(show_label=False, placeholder="Enter your idea", max_lines=3, lines=1, interactive=True) | |
| prompt_button = gr.Button(value="Generate Prompt", variant="primary") | |
| prompt_full = gr.Textbox(label="Full Prompt", max_lines=8, lines=3, interactive=False, show_copy_button=True) | |
| send_to_txt2img_prompt = gr.Button(value="Send FULL PROMPT to txt2img") | |
| send_to_img2img_prompt = gr.Button(value="Send FULL PROMPT to img2img") | |
| send_to_txt2img_prompt.click(send_prompt, inputs=prompt_full, outputs=prompt) | |
| send_to_img2img_prompt.click(send_prompt, inputs=prompt_full, outputs=i2i_prompt) | |
| idea.submit(prompt_gen, inputs=idea, outputs=prompt_full) | |
| prompt_button.click(prompt_gen, inputs=idea, outputs=prompt_full) | |
| with gr.Tab("Upscaler"): | |
| gr.HTML(value=""" | |
| <h1><center>Upscaler</center></h1> | |
| """) | |
| with gr.Row(): | |
| us_input = gr.Image(show_label=False, interactive=True, scale=10) | |
| us_radio = gr.Radio(label="Upscale Levels", choices=[2, 4, 6, 8, 10], value=2, scale=5) | |
| us_button = gr.Button(value="Generate with Upscaler", variant="primary", scale=5) | |
| with gr.Column(scale=1): | |
| us_output = gr.Gallery(show_label=False, rows=2, preview=True, scale=1) | |
| send_to_img2img_us = gr.Button(value="Send UPSCALED IMAGE to img2img") | |
| us_past = gr.Image(interactive=False, visible=False) | |
| us_button.click(upscale_image, inputs=[us_input, us_radio], outputs=[us_output, us_past]) | |
| send_to_img2img_us.click(send_to_img2img_def, inputs=us_past, outputs=i2i_image_input) | |
| with gr.Tab("BLIP"): | |
| with gr.Tab("Base"): | |
| gr.load("models/Salesforce/blip-image-captioning-base", title="BLIP-base") | |
| with gr.Tab("Large"): | |
| gr.load("models/Salesforce/blip-image-captioning-large", title="BLIP-large") | |
| with gr.Tab("Classification"): | |
| gr.load("models/google/vit-base-patch16-224", title="ViT Classification") | |
| #with gr.Tab("Segmentation"): | |
| # gr.load("models/mattmdjaga/segformer_b2_clothes", title="SegFormer Segmentation") | |
| with gr.Tab("Visual Question Answering"): | |
| gr.load("models/dandelin/vilt-b32-finetuned-vqa", title="ViLT VQA") | |
| model.change(change_checkpoint, inputs=model, outputs=model) | |
| hf_model.change(change_checkpoint, inputs=hf_model, outputs=hf_model) | |
| demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False) | |