fffiloni's picture
Update app.py
fe118aa verified
raw
history blame
13.7 kB
import spaces
import gradio as gr
import re
import os
import json
from typing import Union
hf_token = os.environ.get('HF_TOKEN')
from gradio_client import Client, handle_file
clipi_client = Client("fffiloni/CLIP-Interrogator-2")
from transformers import AutoTokenizer, AutoModelForCausalLM
model_path = "meta-llama/Llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
#client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
# FLUX
import numpy as np
import random
import torch
from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
torch.cuda.empty_cache()
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
#pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
@spaces.GPU
def infer_flux(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
img = pipe(
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
output_type="pil",
good_vae=good_vae,
).images[0]
img.save("flux-dev.png")
return "flux-dev.png"
@spaces.GPU
def llama_gen_fragrance(scene):
instruction = """[INST] <<SYS>>\n
You are a poetic perfumer. Your role is to create the imaginary scent of a described scene.
You must always respond using the following structure:
---
Perfume Name:
An original, evocative, and unique name — in French or English.
Tagline:
A short, poetic sentence — like a perfume advertisement hook.
Poetic Olfactory Description:
A freeform and expressive description of the scent ambiance evoked by the scene. Use rich sensory, emotional, and metaphorical language. Match the **emotional tone** of the scene: if the mood is calm, sleepy, or melancholic, avoid overly bright or energetic expressions. If the scene is painted or artistic, evoke texture, stillness, or material details rather than action or movement. Be subtle and precise.
**Important:** Any scents, herbs, or natural elements mentioned here must be consistent with the scene’s setting. Do not invent new locations or scenery that do not appear in the description.
Olfactory Pyramid (technical):
- Top Notes:
List 3–4 real, concrete scent materials that would be perceived first. These must be plausible fragrance ingredients (e.g. herbs, resins, citrus peels, spices, aldehydes, etc.). Pick notes that reflect the **real mood, climate, and setting** of the scene. Do not add locations or elements that don’t appear in the scene. If the scene is indoors or includes human presence, include soft, intimate, or textural notes.
- Heart Notes:
List 3–4 real fragrance elements that give body and soul to the perfume. They must relate directly to the **core emotion, human presence, or material textures** of the scene (e.g. warm fabric, skin, dry flowers, books, wood, canvas). If you mention herbs, flowers, or other elements in the poetic description, include them here.
- Base Notes:
List 3–4 real, longer-lasting ingredients such as woods, musks, resins, or earthy accords. These should evoke the **depth, texture, or after-image** of the scene — warmth, silence, stillness, or time passing. Avoid generic bases unless they fit the mood. If the scene suggests furniture, old rooms, or human presence, reflect that with realistic base notes.
Consistency Rule:
The top, heart, and base notes must not introduce new ideas, plants, or places that were not in the poetic description or the scene. Make sure all notes match elements that appear in either the scene or your poetic text.
General Atmosphere:
Summarize the fragrance’s evolution and overall emotional impression. Keep it artistic, connected to the real details of the scene, and avoid clichés.
Image Description (for marketing visuals):
Describe an imagined marketing image that captures the perfume’s essence. **Begin your description by focusing exclusively on the perfume bottle as the single and main subject.**
Describe the bottle’s shape, glass texture, cap, and label — the label must clearly display the exact **Perfume Name** generated in this output.
Specify the typography style used on the label text, ensuring it reflects the perfume’s mood and story (for example, elegant script for romantic scents, bold sans-serif for modern ones, vintage serif for nostalgic fragrances).
The bottle must be visually dominant and centrally placed, occupying most of the image frame, shown in sharp focus and fine detail.
The background should be minimal, abstract, or atmospheric — such as gradients, soft light, fabric textures, or mist — with **no depiction of people, animals, symbols, or narrative scenes.**
Do not mention or describe any human figures, characters, or storytelling elements unrelated to the bottle design itself.
Use cinematic luxury advertising codes: refined shadows, soft directional lighting, elegant minimalism, and a product-hero perspective.
---
Always ensure that:
– The fragrance matches the mood and visual setting of the scene
– All ingredients are real, plausible, and fit together naturally
– No invented scenery or extra context is added
– The poetic description and pyramid share the same notes and details
– The image description **must mention the exact Perfume Name on the label**, and **focus exclusively on the perfume bottle as the main subject**
– No humans, animals, narrative scenes, or symbolic objects unrelated to the bottle design are described
– Each perfume feels unique and consistent
Here is the scene description to analyze:
\n<</SYS>>\n\n{} [/INST]"""
prompt = instruction.format(scene)
generate_ids = model.generate(tokenizer(prompt, return_tensors='pt').input_ids.cuda(), max_new_tokens=4096)
output_text = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
#print(generate_ids)
#print(output_text)
pattern = r'\[INST\].*?\[/INST\]'
cleaned_text = re.sub(pattern, '', output_text, flags=re.DOTALL)
return cleaned_text
def extract_notes(text, section_name):
import re
# 1. Try block of bullets
pattern_block = rf'{section_name}:\s*\n((?:\*.*(?:\n|$))+)'
match_block = re.search(pattern_block, text, re.MULTILINE)
if match_block:
notes_text = match_block.group(1)
notes = []
for line in notes_text.strip().splitlines():
bullet = line.strip().lstrip('*').strip()
if ':' in bullet:
note, desc = bullet.split(':', 1)
notes.append({'note': note.strip(), 'description': desc.strip()})
else:
notes.append({'note': bullet, 'description': ''})
return notes
# 2. Try inline bullet style: * Section: item1, item2, item3
pattern_inline = rf'\* {section_name}:\s*(.+)'
match_inline = re.search(pattern_inline, text)
if match_inline:
notes_raw = match_inline.group(1).strip()
notes = []
for item in notes_raw.split(','):
notes.append({'note': item.strip(), 'description': ''})
return notes
# 3. Try plain line style: Section: item1, item2, item3 (no bullet)
pattern_line = rf'^{section_name}:\s*(.+)$'
match_line = re.search(pattern_line, text, re.MULTILINE)
if match_line:
notes_raw = match_line.group(1).strip()
notes = []
for item in notes_raw.split(','):
notes.append({'note': item.strip(), 'description': ''})
return notes
return []
def parse_perfume_description(text: str) -> dict:
# Perfume Name
perfume_name = re.search(r'Perfume Name:\s*(.+)', text).group(1).strip()
# Tagline (quoted)
tagline = re.search(r'Tagline:\s*"(.*?)"', text, re.DOTALL)
tagline = tagline.group(1).strip() if tagline else ""
# Poetic Olfactory Description
poetic_desc_match = re.search(
r'Poetic Olfactory Description:\s*"(.*?)"', text, re.DOTALL)
if poetic_desc_match:
poetic_desc = poetic_desc_match.group(1).strip()
else:
poetic_desc_match = re.search(
r'Poetic Olfactory Description:\s*(.*?)\s*(Olfactory Pyramid:|Image Description:|General Atmosphere:)',
text, re.DOTALL)
poetic_desc = poetic_desc_match.group(1).strip() if poetic_desc_match else ""
# General Atmosphere: stop at Image Description if present
general_atmosphere_match = re.search(
r'General Atmosphere:\s*(.*?)(?:\s*Image Description:|$)', text, re.DOTALL)
general_atmosphere = general_atmosphere_match.group(1).strip() if general_atmosphere_match else ""
# Image Description
image_desc_match = re.search(
r'Image Description:\s*"(.*?)"', text, re.DOTALL)
if image_desc_match:
image_desc = image_desc_match.group(1).strip()
else:
image_desc_match = re.search(
r'Image Description:\s*(.*?)$', text, re.DOTALL)
image_desc = image_desc_match.group(1).strip() if image_desc_match else ""
# 🗂️ Smart bullet extractor
top_notes = extract_notes(text, 'Top Notes')
heart_notes = extract_notes(text, 'Heart Notes')
base_notes = extract_notes(text, 'Base Notes')
result = {
'Perfume Name': perfume_name,
'Tagline': tagline,
'Poetic Olfactory Description': poetic_desc,
'Image Description': image_desc,
'Olfactory Pyramid': {
'Top Notes': top_notes,
'Heart Notes': heart_notes,
'Base Notes': base_notes
},
'General Atmosphere': general_atmosphere
}
return result
def extract_field(data: Union[str, dict], field_name: str) -> str:
"""
Extracts a specific field value from a JSON string or Python dict.
Args:
data (Union[str, dict]): The JSON string or dict to extract from.
field_name (str): The exact field name to extract.
Returns:
str: The extracted field value as a string.
"""
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
raise ValueError("Invalid JSON string provided")
if not isinstance(data, dict):
raise TypeError("Input must be a dict or a valid JSON string")
value = data.get(field_name) or data.get(field_name.lower()) or None
if value is None:
raise KeyError(f"No field named '{field_name}' found in the data")
return str(value).strip()
def get_text_after_colon(input_text):
# Find the first occurrence of ":"
colon_index = input_text.find(":")
# Check if ":" exists in the input_text
if colon_index != -1:
# Extract the text after the colon
result_text = input_text[colon_index + 1:].strip()
return result_text
else:
# Return the original text if ":" is not found
return input_text
def infer(image_input):
gr.Info('Calling CLIP Interrogator ...')
clipi_result = clipi_client.predict(
image=handle_file(image_input),
mode="best",
best_max_flavors=4,
api_name="/clipi2"
)
print(clipi_result)
llama_q = clipi_result
gr.Info('Calling Llama2 ...')
result = llama_gen_fragrance(llama_q)
print(f"Llama2 result: {result}")
parsed = parse_perfume_description(result)
image_desc = extract_field(parsed, "Image Description")
print(image_desc)
gen_bottle = infer_flux(image_desc)
return result, parsed, gen_bottle
css="""
#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
<h1 style="text-align: center">Image to Fragrance</h1>
<p style="text-align: center">Upload an image, get a pro fragrance idea made by Llama2 !</p>
"""
)
with gr.Row():
with gr.Column():
image_in = gr.Image(label="Image input", type="filepath", elem_id="image-in")
submit_btn = gr.Button('Give me a Fragrance')
with gr.Column():
#caption = gr.Textbox(label="Generated Caption")
fragrance = gr.Textbox(label="generated Fragrance", elem_id="fragrance")
json_res = gr.JSON(label="JSON")
bottle_res = gr.Image(label="Flacon")
submit_btn.click(fn=infer, inputs=[image_in], outputs=[fragrance, json_res, bottle_res])
demo.queue(max_size=12).launch(ssr_mode=False, mcp_server=True)