Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,311 +1,139 @@
|
|
1 |
import os
|
2 |
-
import
|
3 |
-
import
|
4 |
import gradio as gr
|
5 |
-
import numpy as np
|
6 |
-
from PIL import Image
|
7 |
import spaces
|
8 |
import torch
|
9 |
-
from
|
10 |
-
from typing import Tuple
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
footer {
|
16 |
-
visibility: hidden
|
17 |
-
}
|
18 |
-
'''
|
19 |
-
|
20 |
-
DESCRIPTIONXX = """## TEXT 2 IMAGE🥠"""
|
21 |
-
|
22 |
-
examples = [
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
34 |
}
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
|
40 |
|
41 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
53 |
-
},
|
54 |
-
{
|
55 |
-
"name": "HD+",
|
56 |
-
"prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
|
57 |
-
"negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
|
58 |
-
},
|
59 |
-
{
|
60 |
-
"name": "Style Zero",
|
61 |
-
"prompt": "{prompt}",
|
62 |
-
"negative_prompt": "",
|
63 |
-
},
|
64 |
-
]
|
65 |
-
|
66 |
-
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
67 |
-
DEFAULT_STYLE_NAME = "3840 x 2160"
|
68 |
-
STYLE_NAMES = list(styles.keys())
|
69 |
|
70 |
-
def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
|
71 |
-
if style_name in styles:
|
72 |
-
p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
|
73 |
-
else:
|
74 |
-
p, n = styles[DEFAULT_STYLE_NAME]
|
75 |
|
76 |
-
|
77 |
-
negative = ""
|
78 |
-
return p.replace("{prompt}", positive), n + negative
|
79 |
-
|
80 |
-
def load_and_prepare_model(model_id):
|
81 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
82 |
-
model_id,
|
83 |
-
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
84 |
-
use_safetensors=True,
|
85 |
-
add_watermarker=False,
|
86 |
-
).to(device)
|
87 |
-
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
88 |
-
|
89 |
-
if USE_TORCH_COMPILE:
|
90 |
-
pipe.compile()
|
91 |
-
|
92 |
-
if ENABLE_CPU_OFFLOAD:
|
93 |
-
pipe.enable_model_cpu_offload()
|
94 |
-
|
95 |
-
return pipe
|
96 |
-
|
97 |
-
# Preload and compile both models
|
98 |
-
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
99 |
-
|
100 |
-
MAX_SEED = np.iinfo(np.int32).max
|
101 |
-
|
102 |
-
def save_image(img):
|
103 |
-
unique_name = str(uuid.uuid4()) + ".png"
|
104 |
-
img.save(unique_name)
|
105 |
-
return unique_name
|
106 |
-
|
107 |
-
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
108 |
-
if randomize_seed:
|
109 |
-
seed = random.randint(0, MAX_SEED)
|
110 |
-
return seed
|
111 |
-
|
112 |
-
@spaces.GPU(duration=60, enable_queue=True)
|
113 |
def generate(
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
)
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
"output_type": "pil",
|
146 |
-
}
|
147 |
-
|
148 |
-
if use_resolution_binning:
|
149 |
-
options["use_resolution_binning"] = True
|
150 |
-
|
151 |
-
images = []
|
152 |
-
for i in range(0, num_images, BATCH_SIZE):
|
153 |
-
batch_options = options.copy()
|
154 |
-
batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
|
155 |
-
if "negative_prompt" in batch_options:
|
156 |
-
batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
|
157 |
-
images.extend(pipe(**batch_options).images)
|
158 |
-
|
159 |
-
image_paths = [save_image(img) for img in images]
|
160 |
-
return image_paths, seed
|
161 |
-
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
label="Prompt",
|
168 |
-
show_label=False,
|
169 |
-
max_lines=1,
|
170 |
-
placeholder="Enter your prompt",
|
171 |
-
container=False,
|
172 |
-
)
|
173 |
-
run_button = gr.Button("Run", scale=0)
|
174 |
-
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
175 |
|
176 |
-
with gr.Row():
|
177 |
-
model_choice = gr.Dropdown(
|
178 |
-
label="Model Selection⬇️",
|
179 |
-
choices=list(MODEL_OPTIONS.keys()),
|
180 |
-
value="LIGHTNING V5.0"
|
181 |
-
)
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
choices=STYLE_NAMES,
|
189 |
-
value=DEFAULT_STYLE_NAME,
|
190 |
-
label="Quality Style",
|
191 |
-
)
|
192 |
-
num_images = gr.Slider(
|
193 |
-
label="Number of Images",
|
194 |
minimum=1,
|
195 |
-
maximum=
|
196 |
step=1,
|
197 |
-
value=
|
198 |
-
)
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
|
|
|
|
|
|
214 |
step=1,
|
215 |
-
value=
|
216 |
-
)
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
step=0.1,
|
239 |
-
value=3.0,
|
240 |
-
)
|
241 |
-
num_inference_steps = gr.Slider(
|
242 |
-
label="Number of inference steps",
|
243 |
-
minimum=1,
|
244 |
-
maximum=60,
|
245 |
-
step=1,
|
246 |
-
value=28,
|
247 |
-
)
|
248 |
-
|
249 |
-
gr.Examples(
|
250 |
-
examples=examples,
|
251 |
-
inputs=prompt,
|
252 |
-
cache_examples=False
|
253 |
-
)
|
254 |
-
|
255 |
-
use_negative_prompt.change(
|
256 |
-
fn=lambda x: gr.update(visible=x),
|
257 |
-
inputs=use_negative_prompt,
|
258 |
-
outputs=negative_prompt,
|
259 |
-
api_name=False,
|
260 |
-
)
|
261 |
-
|
262 |
-
gr.on(
|
263 |
-
triggers=[
|
264 |
-
prompt.submit,
|
265 |
-
negative_prompt.submit,
|
266 |
-
run_button.click,
|
267 |
-
],
|
268 |
-
fn=generate,
|
269 |
-
inputs=[
|
270 |
-
model_choice,
|
271 |
-
prompt,
|
272 |
-
negative_prompt,
|
273 |
-
use_negative_prompt,
|
274 |
-
style_selection,
|
275 |
-
seed,
|
276 |
-
width,
|
277 |
-
height,
|
278 |
-
guidance_scale,
|
279 |
-
num_inference_steps,
|
280 |
-
randomize_seed,
|
281 |
-
num_images,
|
282 |
-
],
|
283 |
-
outputs=[result, seed],
|
284 |
-
)
|
285 |
-
|
286 |
-
gr.Markdown(
|
287 |
-
"""
|
288 |
-
<div style="text-align: justify;">
|
289 |
-
🥠Models used in the playground: <a href="https://huggingface.co/SG161222/RealVisXL_V5.0_Lightning">[LIGHTNING V5.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V4.0_Lightning">[LIGHTNING V4.0]</a>
|
290 |
-
for image generation. Stable Diffusion XL piped (SDXL) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multiple different variants available.
|
291 |
-
</div>
|
292 |
-
"""
|
293 |
-
)
|
294 |
|
295 |
-
gr.Markdown(
|
296 |
-
"""
|
297 |
-
<div style="text-align: justify;">
|
298 |
-
🥠This is the demo space for generating images using Stable Diffusion XL with quality styles, different models, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.
|
299 |
-
<a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
|
300 |
-
</div>
|
301 |
-
""")
|
302 |
|
303 |
-
gr.Markdown(
|
304 |
-
"""
|
305 |
-
<div style="text-align: justify;">
|
306 |
-
⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
|
307 |
-
</div>
|
308 |
-
""")
|
309 |
-
|
310 |
if __name__ == "__main__":
|
311 |
-
demo.queue(max_size=
|
|
|
1 |
import os
|
2 |
+
from collections.abc import Iterator
|
3 |
+
from threading import Thread
|
4 |
import gradio as gr
|
|
|
|
|
5 |
import spaces
|
6 |
import torch
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
8 |
|
9 |
+
DESCRIPTION = """
|
10 |
+
# QwQ Distill
|
11 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
css= '''
|
14 |
+
h1 {
|
15 |
+
text-align: center;
|
16 |
+
display: block;
|
17 |
+
}
|
18 |
|
19 |
+
#duplicate-button {
|
20 |
+
margin: auto;
|
21 |
+
color: #fff;
|
22 |
+
background: #1565c0;
|
23 |
+
border-radius: 100vh;
|
24 |
}
|
25 |
+
'''
|
26 |
|
27 |
+
MAX_MAX_NEW_TOKENS = 2048
|
28 |
+
DEFAULT_MAX_NEW_TOKENS = 1024
|
29 |
+
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
|
30 |
|
31 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
32 |
|
33 |
+
model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
34 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
35 |
+
model = AutoModelForCausalLM.from_pretrained(
|
36 |
+
model_id,
|
37 |
+
device_map="auto",
|
38 |
+
torch_dtype=torch.bfloat16,
|
39 |
+
)
|
40 |
+
model.config.sliding_window = 4096
|
41 |
+
model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
@spaces.GPU(duration=120)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def generate(
|
46 |
+
message: str,
|
47 |
+
chat_history: list[dict],
|
48 |
+
max_new_tokens: int = 1024,
|
49 |
+
temperature: float = 0.6,
|
50 |
+
top_p: float = 0.9,
|
51 |
+
top_k: int = 50,
|
52 |
+
repetition_penalty: float = 1.2,
|
53 |
+
) -> Iterator[str]:
|
54 |
+
conversation = chat_history.copy()
|
55 |
+
conversation.append({"role": "user", "content": message})
|
56 |
+
|
57 |
+
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
|
58 |
+
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
59 |
+
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
60 |
+
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
61 |
+
input_ids = input_ids.to(model.device)
|
62 |
+
|
63 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
|
64 |
+
generate_kwargs = dict(
|
65 |
+
{"input_ids": input_ids},
|
66 |
+
streamer=streamer,
|
67 |
+
max_new_tokens=max_new_tokens,
|
68 |
+
do_sample=True,
|
69 |
+
top_p=top_p,
|
70 |
+
top_k=top_k,
|
71 |
+
temperature=temperature,
|
72 |
+
num_beams=1,
|
73 |
+
repetition_penalty=repetition_penalty,
|
74 |
+
)
|
75 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
76 |
+
t.start()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
+
outputs = []
|
79 |
+
for text in streamer:
|
80 |
+
outputs.append(text)
|
81 |
+
yield "".join(outputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
+
demo = gr.ChatInterface(
|
85 |
+
fn=generate,
|
86 |
+
additional_inputs=[
|
87 |
+
gr.Slider(
|
88 |
+
label="Max new tokens",
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
minimum=1,
|
90 |
+
maximum=MAX_MAX_NEW_TOKENS,
|
91 |
step=1,
|
92 |
+
value=DEFAULT_MAX_NEW_TOKENS,
|
93 |
+
),
|
94 |
+
gr.Slider(
|
95 |
+
label="Temperature",
|
96 |
+
minimum=0.1,
|
97 |
+
maximum=4.0,
|
98 |
+
step=0.1,
|
99 |
+
value=0.6,
|
100 |
+
),
|
101 |
+
gr.Slider(
|
102 |
+
label="Top-p (nucleus sampling)",
|
103 |
+
minimum=0.05,
|
104 |
+
maximum=1.0,
|
105 |
+
step=0.05,
|
106 |
+
value=0.9,
|
107 |
+
),
|
108 |
+
gr.Slider(
|
109 |
+
label="Top-k",
|
110 |
+
minimum=1,
|
111 |
+
maximum=1000,
|
112 |
step=1,
|
113 |
+
value=50,
|
114 |
+
),
|
115 |
+
gr.Slider(
|
116 |
+
label="Repetition penalty",
|
117 |
+
minimum=1.0,
|
118 |
+
maximum=2.0,
|
119 |
+
step=0.05,
|
120 |
+
value=1.2,
|
121 |
+
),
|
122 |
+
],
|
123 |
+
stop_btn=None,
|
124 |
+
examples=[
|
125 |
+
["Write a Python function to reverses a string if it's length is a multiple of 4. def reverse_string(str1): if len(str1) % 4 == 0: return ''.join(reversed(str1)) return str1 print(reverse_string('abcd')) print(reverse_string('python')) "],
|
126 |
+
["Rectangle $ABCD$ is the base of pyramid $PABCD$. If $AB = 10$, $BC = 5$, $\overline{PA}\perp \text{plane } ABCD$, and $PA = 8$, then what is the volume of $PABCD$?"],
|
127 |
+
["Difference between List comprehension and Lambda in Python lst = [x ** 2 for x in range (1, 11) if x % 2 == 1] print(lst)"],
|
128 |
+
["What happens when the sun goes down?"],
|
129 |
+
],
|
130 |
+
cache_examples=False,
|
131 |
+
type="messages",
|
132 |
+
description=DESCRIPTION,
|
133 |
+
css=css,
|
134 |
+
fill_height=True,
|
135 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
if __name__ == "__main__":
|
139 |
+
demo.queue(max_size=20).launch()
|