import gradio as gr import os import sys from pathlib import Path import random import string import time from queue import Queue from threading import Thread import emoji text_gen=gr.Interface.load("spaces/Dao3/MagicPrompt-Stable-Diffusion") def get_prompts(prompt_text): if prompt_text: return text_gen("photo, " + prompt_text) else: return text_gen("") proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0") def restart_script_periodically(): while True: random_time = random.randint(540, 600) time.sleep(random_time) os.execl(sys.executable, sys.executable, *sys.argv) restart_thread = Thread(target=restart_script_periodically, daemon=True) restart_thread.start() queue = Queue() queue_threshold = 100 def add_random_noise(prompt, noise_level=0.00): if noise_level == 0: noise_level = 0.00 percentage_noise = noise_level * 5 num_noise_chars = int(len(prompt) * (percentage_noise/100)) noise_indices = random.sample(range(len(prompt)), num_noise_chars) prompt_list = list(prompt) noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈']) for index in noise_indices: prompt_list[index] = random.choice(noise_chars) return "".join(prompt_list) def send_it1(inputs, noise_level, proc1=proc1): prompt_with_noise = add_random_noise(inputs, noise_level) while queue.qsize() >= queue_threshold: time.sleep(2) queue.put(prompt_with_noise) output1 = proc1(prompt_with_noise) return output1 def send_it2(inputs, noise_level, proc1=proc1): prompt_with_noise = add_random_noise(inputs, noise_level) while queue.qsize() >= queue_threshold: time.sleep(2) queue.put(prompt_with_noise) output2 = proc1(prompt_with_noise) return output2 #def send_it3(inputs, noise_level, proc1=proc1): #prompt_with_noise = add_random_noise(inputs, noise_level) #while queue.qsize() >= queue_threshold: #time.sleep(2) #queue.put(prompt_with_noise) #output3 = proc1(prompt_with_noise) #return output3 #def send_it4(inputs, noise_level, proc1=proc1): #prompt_with_noise = add_random_noise(inputs, noise_level) #while queue.qsize() >= queue_threshold: #time.sleep(2) #queue.put(prompt_with_noise) #output4 = proc1(prompt_with_noise) #return output4 def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed): print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}") global last_mode global pipe global current_model_path if model_path != current_model_path or last_mode != "txt2img": current_model_path = model_path update_state(f"Loading {current_model.name} text-to-image model...") if is_colab or current_model == custom_model: pipe = StableDiffusionPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), safety_checker=lambda images, clip_input: (images, False) ) else: pipe = StableDiffusionPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") ) # pipe = pipe.to("cpu") # pipe = current_model.pipe_t2i if torch.cuda.is_available(): pipe = pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() last_mode = "txt2img" prompt = current_model.prefix + prompt result = pipe( prompt, negative_prompt = neg_prompt, num_images_per_prompt=n_images, num_inference_steps = int(steps), guidance_scale = guidance, width = width, height = height, generator = generator, callback=pipe_callback) # update_state(f"Done. Seed: {seed}") return replace_nsfw_images(result) def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed): print(f"{datetime.datetime.now()} img_to_img, model: {model_path}") global last_mode global pipe global current_model_path if model_path != current_model_path or last_mode != "img2img": current_model_path = model_path update_state(f"Loading {current_model.name} image-to-image model...") if is_colab or current_model == custom_model: pipe = StableDiffusionImg2ImgPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"), safety_checker=lambda images, clip_input: (images, False) ) else: pipe = StableDiffusionImg2ImgPipeline.from_pretrained( current_model_path, torch_dtype=torch.float16, scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler") ) # pipe = pipe.to("cpu") # pipe = current_model.pipe_i2i if torch.cuda.is_available(): pipe = pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() last_mode = "img2img" prompt = current_model.prefix + prompt ratio = min(height / img.height, width / img.width) img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) result = pipe( prompt, negative_prompt = neg_prompt, num_images_per_prompt=n_images, image = img, num_inference_steps = int(steps), strength = strength, guidance_scale = guidance, # width = width, # height = height, generator = generator, callback=pipe_callback) # update_state(f"Done. Seed: {seed}") return replace_nsfw_images(result) def replace_nsfw_images(results): if is_colab: return results.images for i in range(len(results.images)): if results.nsfw_content_detected[i]: results.images[i] = Image.open("nsfw.png") return results.images with gr.Blocks(css='style.css') as demo: gr.HTML( """
差异程度: 用数值调节两张图的差异程度。数值越大,两张图的差异越大,反之越小。
❤️ 喜欢的话,就点上面的❤️吧~❤️
这个模型和幻梦成真的区别是:幻梦显形更虚幻,这个模型更真实,毕竟都"成真"了嘛。
安利:还有一个汉化项目:TiwenTi.chat,这是一个ChatGPT的中文案例库,按照工具用途和角色扮演用途做了分类,欢迎去看去分享~