fffiloni commited on
Commit
5c013fe
·
verified ·
1 Parent(s): e2bd9cf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -0
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import re
4
+ import os
5
+ hf_token = os.environ.get('HF_TOKEN')
6
+
7
+ from gradio_client import Client, handle_file
8
+
9
+ clipi_client = Client("fffiloni/CLIP-Interrogator-2")
10
+
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+
13
+ model_path = "meta-llama/Llama-2-7b-chat-hf"
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
16
+ model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
17
+
18
+ #client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
19
+
20
+ clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
21
+
22
+ @spaces.GPU
23
+ def llama_gen_fragrance(scene):
24
+
25
+ instruction = f"""[INST] <<SYS>>\n
26
+ You are a poetic perfumer. Your role is to create the imaginary scent of a described scene.
27
+
28
+ You must always respond using the following structure:
29
+
30
+ ---
31
+
32
+ **Perfume Name**:
33
+ [An original, evocative, and unique name — in French or English]
34
+
35
+ **Tagline**:
36
+ [A short, poetic sentence — like a perfume advertisement hook]
37
+
38
+ **Poetic Olfactory Description**:
39
+ [A freeform and expressive description of the scent ambiance evoked by the scene. Use sensory, emotional, and evocative language. You may include metaphors, seasonal references, or mood imagery.]
40
+
41
+ **Olfactory Pyramid (technical)**:
42
+
43
+ - **Top Notes**: [3–4 precise and vivid elements. Example: frozen pine, mandarin peel, cold ozone]
44
+ - **Heart Notes**: [3–4 elements suggesting warmth or the body of the fragrance. Example: smoked wood, soft leather, toasted bread]
45
+ - **Base Notes**: [3–4 deeper, earthier, or longer-lasting elements. Example: white musk, ash, ambergris]
46
+
47
+ **General Atmosphere**:
48
+ [A final synthesis that summarizes the overall impression of the fragrance — contrast, duration, evolution, or emotional tone. Keep it artistic.]
49
+
50
+ ---
51
+
52
+ Generate one single imaginary perfume following these instructions.
53
+ Here is the scene to analyze:
54
+ \n<</SYS>>\n\n{} [/INST]"""
55
+
56
+
57
+ prompt = instruction.format(scene)
58
+
59
+ generate_ids = model.generate(tokenizer(prompt, return_tensors='pt').input_ids.cuda(), max_new_tokens=4096)
60
+ output_text = tokenizer.decode(generate_ids[0], skip_special_tokens=True)
61
+ #print(generate_ids)
62
+ #print(output_text)
63
+ pattern = r'\[INST\].*?\[/INST\]'
64
+ cleaned_text = re.sub(pattern, '', output_text, flags=re.DOTALL)
65
+ return cleaned_text
66
+
67
+ def get_text_after_colon(input_text):
68
+ # Find the first occurrence of ":"
69
+ colon_index = input_text.find(":")
70
+
71
+ # Check if ":" exists in the input_text
72
+ if colon_index != -1:
73
+ # Extract the text after the colon
74
+ result_text = input_text[colon_index + 1:].strip()
75
+ return result_text
76
+ else:
77
+ # Return the original text if ":" is not found
78
+ return input_text
79
+
80
+ def infer(image_input, audience):
81
+
82
+ gr.Info('Calling CLIP Interrogator ...')
83
+
84
+ clipi_result = clipi_client.predict(
85
+ image=handle_file(image_input),
86
+ mode="best",
87
+ best_max_flavors=4,
88
+ api_name="/clipi2"
89
+ )
90
+ print(clipi_result)
91
+
92
+
93
+ llama_q = clipi_result
94
+
95
+
96
+ gr.Info('Calling Llama2 ...')
97
+ result = llama_gen_fragrance(llama_q)
98
+
99
+ print(f"Llama2 result: {result}")
100
+
101
+ result = get_text_after_colon(result)
102
+
103
+ # Split the text into paragraphs based on actual line breaks
104
+ paragraphs = result.split('\n')
105
+
106
+ # Join the paragraphs back with an extra empty line between each paragraph
107
+ formatted_text = '\n\n'.join(paragraphs)
108
+
109
+
110
+ return formatted_text
111
+
112
+ css="""
113
+ #col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
114
+ div#fragrance textarea {
115
+ font-size: 1.5em;
116
+ line-height: 1.4em;
117
+ }
118
+ """
119
+
120
+ with gr.Blocks(css=css) as demo:
121
+ with gr.Column(elem_id="col-container"):
122
+ gr.Markdown(
123
+ """
124
+ <h1 style="text-align: center">Image to Fragrance</h1>
125
+ <p style="text-align: center">Upload an image, get a pro fragrance idea made by Llama2 !</p>
126
+ """
127
+ )
128
+ with gr.Row():
129
+ with gr.Column():
130
+ image_in = gr.Image(label="Image input", type="filepath", elem_id="image-in")
131
+
132
+ submit_btn = gr.Button('Tell me a story')
133
+ with gr.Column():
134
+ #caption = gr.Textbox(label="Generated Caption")
135
+ fragrance = gr.Textbox(label="generated Fragrance", elem_id="fragrance")
136
+
137
+ gr.Examples(examples=[["./examples/crabby.png", "Children"],["./examples/hopper.jpeg", "Adult"]],
138
+ fn=infer,
139
+ inputs=[image_in],
140
+ outputs=[fragrance],
141
+ cache_examples=False
142
+ )
143
+
144
+ submit_btn.click(fn=infer, inputs=[image_in], outputs=[fragrance])
145
+
146
+ demo.queue(max_size=12).launch(ssr_mode=False, mcp_server=True)