fffiloni commited on
Commit
1a4a9b7
·
verified ·
1 Parent(s): 944d8b9

switch to moondream for caption

Browse files
Files changed (1) hide show
  1. app.py +7 -12
app.py CHANGED
@@ -8,7 +8,8 @@ hf_token = os.environ.get('HF_TOKEN')
8
 
9
  from gradio_client import Client, handle_file
10
 
11
- clipi_client = Client("fffiloni/CLIP-Interrogator-2")
 
12
 
13
  from transformers import AutoTokenizer, AutoModelForCausalLM
14
 
@@ -17,10 +18,6 @@ model_path = "meta-llama/Llama-2-7b-chat-hf"
17
  tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
18
  model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
19
 
20
- #client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
21
-
22
- clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
23
-
24
  # FLUX
25
 
26
  import numpy as np
@@ -344,18 +341,16 @@ def find_best_perfumes_from_json(data: Union[str, dict], top_n: int = 5, thresho
344
 
345
  def infer(image_input):
346
 
347
- gr.Info('Calling CLIP Interrogator ...')
348
 
349
  yield None, None, None, None
350
- clipi_result = clipi_client.predict(
351
  image=handle_file(image_input),
352
- mode="best",
353
- best_max_flavors=4,
354
- api_name="/clipi2"
355
  )
356
- print(clipi_result)
357
 
358
- llama_q = clipi_result
359
 
360
  gr.Info('Calling Llama2 ...')
361
  result = llama_gen_fragrance(llama_q)
 
8
 
9
  from gradio_client import Client, handle_file
10
 
11
+ client = Client("fffiloni/moondream2", token=hf_token)
12
+
13
 
14
  from transformers import AutoTokenizer, AutoModelForCausalLM
15
 
 
18
  tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, use_auth_token=hf_token)
19
  model = AutoModelForCausalLM.from_pretrained(model_path, use_auth_token=hf_token).half().cuda()
20
 
 
 
 
 
21
  # FLUX
22
 
23
  import numpy as np
 
341
 
342
  def infer(image_input):
343
 
344
+ gr.Info('Calling Moondream model for caption ...')
345
 
346
  yield None, None, None, None
347
+ moondream_result = client.predict(
348
  image=handle_file(image_input),
349
+ api_name="/infer"
 
 
350
  )
351
+ print(moondream_result)
352
 
353
+ llama_q = moondream_result
354
 
355
  gr.Info('Calling Llama2 ...')
356
  result = llama_gen_fragrance(llama_q)