Spaces:
Running
on
Zero
Running
on
Zero
switch to moondream for caption
Browse files
app.py
CHANGED
@@ -8,7 +8,8 @@ hf_token = os.environ.get('HF_TOKEN')
|
|
8 |
|
9 |
from gradio_client import Client, handle_file
|
10 |
|
11 |
-
|
|
|
12 |
|
13 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
14 |
|
@@ -17,10 +18,6 @@ model_path = "meta-llama/Llama-2-7b-chat-hf"
|
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
|
18 |
model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
|
19 |
|
20 |
-
#client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
|
21 |
-
|
22 |
-
clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
|
23 |
-
|
24 |
# FLUX
|
25 |
|
26 |
import numpy as np
|
@@ -344,18 +341,16 @@ def find_best_perfumes_from_json(data: Union[str, dict], top_n: int = 5, thresho
|
|
344 |
|
345 |
def infer(image_input):
|
346 |
|
347 |
-
gr.Info('Calling
|
348 |
|
349 |
yield None, None, None, None
|
350 |
-
|
351 |
image=handle_file(image_input),
|
352 |
-
|
353 |
-
best_max_flavors=4,
|
354 |
-
api_name="/clipi2"
|
355 |
)
|
356 |
-
print(
|
357 |
|
358 |
-
llama_q =
|
359 |
|
360 |
gr.Info('Calling Llama2 ...')
|
361 |
result = llama_gen_fragrance(llama_q)
|
|
|
8 |
|
9 |
from gradio_client import Client, handle_file
|
10 |
|
11 |
+
client = Client("fffiloni/moondream2", token=hf_token)
|
12 |
+
|
13 |
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
15 |
|
|
|
18 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
|
19 |
model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
|
20 |
|
|
|
|
|
|
|
|
|
21 |
# FLUX
|
22 |
|
23 |
import numpy as np
|
|
|
341 |
|
342 |
def infer(image_input):
|
343 |
|
344 |
+
gr.Info('Calling Moondream model for caption ...')
|
345 |
|
346 |
yield None, None, None, None
|
347 |
+
moondream_result = client.predict(
|
348 |
image=handle_file(image_input),
|
349 |
+
api_name="/infer"
|
|
|
|
|
350 |
)
|
351 |
+
print(moondream_result)
|
352 |
|
353 |
+
llama_q = moondream_result
|
354 |
|
355 |
gr.Info('Calling Llama2 ...')
|
356 |
result = llama_gen_fragrance(llama_q)
|