Spaces:
Running
on
Zero
Running
on
Zero
adapt for llama 3.2
Browse files
app.py
CHANGED
@@ -35,16 +35,22 @@ def infer_cap(image):
|
|
35 |
return result
|
36 |
|
37 |
|
38 |
-
|
|
|
|
|
|
|
39 |
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
|
|
42 |
|
43 |
# FLUX
|
44 |
|
45 |
import numpy as np
|
46 |
import random
|
47 |
-
import torch
|
48 |
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
|
49 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
50 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
@@ -83,7 +89,7 @@ def infer_flux(prompt, seed=42, randomize_seed=True, width=1024, height=1024, gu
|
|
83 |
@spaces.GPU
|
84 |
def llama_gen_fragrance(scene):
|
85 |
|
86 |
-
instruction = """
|
87 |
You are a poetic perfumer. Your role is to create the imaginary scent of a described scene.
|
88 |
You must always respond using the following structure:
|
89 |
---
|
@@ -142,24 +148,27 @@ Always ensure that:
|
|
142 |
– Never describe these narrative elements directly.
|
143 |
– Each perfume feels unique and consistent.
|
144 |
Here is the scene description to analyze:
|
145 |
-
|
146 |
|
147 |
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
157 |
|
158 |
def extract_notes(text, section_name):
|
159 |
import re
|
160 |
|
161 |
# 1. Try block of bullets
|
162 |
-
pattern_block = rf'{section_name}:\s*\n((
|
163 |
match_block = re.search(pattern_block, text, re.MULTILINE)
|
164 |
if match_block:
|
165 |
notes_text = match_block.group(1)
|
@@ -168,11 +177,15 @@ def extract_notes(text, section_name):
|
|
168 |
bullet = line.strip().lstrip('*').strip()
|
169 |
if ':' in bullet:
|
170 |
note, desc = bullet.split(':', 1)
|
171 |
-
|
|
|
172 |
else:
|
173 |
-
|
|
|
|
|
174 |
return notes
|
175 |
|
|
|
176 |
# 2. Try inline bullet style: * Section: item1, item2, item3
|
177 |
pattern_inline = rf'\* {section_name}:\s*(.+)'
|
178 |
match_inline = re.search(pattern_inline, text)
|
@@ -382,10 +395,10 @@ def infer(image_input):
|
|
382 |
|
383 |
llama_q = moondream_result
|
384 |
|
385 |
-
gr.Info('Calling
|
386 |
result = llama_gen_fragrance(llama_q)
|
387 |
|
388 |
-
print(f"Llama2 result: {result}")
|
389 |
yield result, None, None, None
|
390 |
|
391 |
parsed = parse_perfume_description(result)
|
@@ -405,7 +418,7 @@ with gr.Blocks(css=css) as demo:
|
|
405 |
gr.Markdown(
|
406 |
"""
|
407 |
<h1 style="text-align: center">Image to Fragrance</h1>
|
408 |
-
<p style="text-align: center">Upload an image, get a pro fragrance idea made by
|
409 |
"""
|
410 |
)
|
411 |
with gr.Row():
|
|
|
35 |
return result
|
36 |
|
37 |
|
38 |
+
import torch
|
39 |
+
from transformers import pipeline
|
40 |
+
|
41 |
+
llm_model_id = "meta-llama/Llama-3.2-3B-Instruct"
|
42 |
|
43 |
+
llm_pipe = pipeline(
|
44 |
+
"text-generation",
|
45 |
+
model=llm_model_id,
|
46 |
+
torch_dtype=torch.bfloat16,
|
47 |
+
device_map="auto",
|
48 |
+
)
|
49 |
|
50 |
# FLUX
|
51 |
|
52 |
import numpy as np
|
53 |
import random
|
|
|
54 |
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
|
55 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
56 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
|
|
89 |
@spaces.GPU
|
90 |
def llama_gen_fragrance(scene):
|
91 |
|
92 |
+
instruction = """
|
93 |
You are a poetic perfumer. Your role is to create the imaginary scent of a described scene.
|
94 |
You must always respond using the following structure:
|
95 |
---
|
|
|
148 |
– Never describe these narrative elements directly.
|
149 |
– Each perfume feels unique and consistent.
|
150 |
Here is the scene description to analyze:
|
151 |
+
"""
|
152 |
|
153 |
|
154 |
+
messages = [
|
155 |
+
{"role": "system", "content": instruction},
|
156 |
+
{"role": "user", "content": scene},
|
157 |
+
]
|
158 |
+
outputs = llm_pipe(
|
159 |
+
messages,
|
160 |
+
max_new_tokens=4096,
|
161 |
+
)
|
162 |
+
generated_response = outputs[0]["generated_text"][-1]['content']
|
163 |
+
print(generated_response)
|
164 |
+
|
165 |
+
return generated_response
|
166 |
|
167 |
def extract_notes(text, section_name):
|
168 |
import re
|
169 |
|
170 |
# 1. Try block of bullets
|
171 |
+
pattern_block = rf'{section_name}:\s*\n((?:\s*\*.*(?:\n|$))+)'
|
172 |
match_block = re.search(pattern_block, text, re.MULTILINE)
|
173 |
if match_block:
|
174 |
notes_text = match_block.group(1)
|
|
|
177 |
bullet = line.strip().lstrip('*').strip()
|
178 |
if ':' in bullet:
|
179 |
note, desc = bullet.split(':', 1)
|
180 |
+
elif ',' in bullet:
|
181 |
+
note, desc = bullet.split(',', 1)
|
182 |
else:
|
183 |
+
note, desc = bullet, ''
|
184 |
+
notes.append({'note': note.strip(), 'description': desc.strip()})
|
185 |
+
|
186 |
return notes
|
187 |
|
188 |
+
|
189 |
# 2. Try inline bullet style: * Section: item1, item2, item3
|
190 |
pattern_inline = rf'\* {section_name}:\s*(.+)'
|
191 |
match_inline = re.search(pattern_inline, text)
|
|
|
395 |
|
396 |
llama_q = moondream_result
|
397 |
|
398 |
+
gr.Info('Calling Llama3.2 ...')
|
399 |
result = llama_gen_fragrance(llama_q)
|
400 |
|
401 |
+
#print(f"Llama2 result: {result}")
|
402 |
yield result, None, None, None
|
403 |
|
404 |
parsed = parse_perfume_description(result)
|
|
|
418 |
gr.Markdown(
|
419 |
"""
|
420 |
<h1 style="text-align: center">Image to Fragrance</h1>
|
421 |
+
<p style="text-align: center">Upload an image, get a pro fragrance idea made by Llama !</p>
|
422 |
"""
|
423 |
)
|
424 |
with gr.Row():
|