Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,9 @@ import torch
|
|
6 |
from PIL import Image
|
7 |
import gradio as gr
|
8 |
import subprocess
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from
|
12 |
|
13 |
# Load the LLaVA model and processor
|
14 |
llava_model_path = "/My_new_LLaVA/llava-fine_tune_model"
|
@@ -32,30 +32,6 @@ class Args:
|
|
32 |
self.num_beams = num_beams
|
33 |
self.max_new_tokens = max_new_tokens
|
34 |
|
35 |
-
# # Function to predict using Idefics2
|
36 |
-
# def predict_idefics2(image, question, temperature, max_tokens):
|
37 |
-
# image = image.convert("RGB")
|
38 |
-
# images = [image]
|
39 |
-
|
40 |
-
# messages = [
|
41 |
-
# {
|
42 |
-
# "role": "user",
|
43 |
-
# "content": [
|
44 |
-
# {"type": "image"},
|
45 |
-
# {"type": "text", "text": question}
|
46 |
-
# ]
|
47 |
-
# }
|
48 |
-
# ]
|
49 |
-
# input_text = idefics2_processor.apply_chat_template(messages, add_generation_prompt=False).strip()
|
50 |
-
|
51 |
-
# inputs = idefics2_processor(text=[input_text], images=images, return_tensors="pt", padding=True).to("cuda:0")
|
52 |
-
|
53 |
-
# with torch.no_grad():
|
54 |
-
# outputs = idefics2_model.generate(**inputs, max_length=max_tokens, max_new_tokens=max_tokens, temperature=temperature)
|
55 |
-
|
56 |
-
# predictions = idefics2_processor.decode(outputs[0], skip_special_tokens=True)
|
57 |
-
|
58 |
-
# return predictions
|
59 |
|
60 |
# Function to predict using LLaVA
|
61 |
def predict_llava(image, question, temperature, max_tokens):
|
|
|
6 |
from PIL import Image
|
7 |
import gradio as gr
|
8 |
import subprocess
|
9 |
+
from llava.model.builder import load_pretrained_model
|
10 |
+
from llava.mm_utils import get_model_name_from_path
|
11 |
+
from llava.eval.run_llava import eval_model
|
12 |
|
13 |
# Load the LLaVA model and processor
|
14 |
llava_model_path = "/My_new_LLaVA/llava-fine_tune_model"
|
|
|
32 |
self.num_beams = num_beams
|
33 |
self.max_new_tokens = max_new_tokens
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# Function to predict using LLaVA
|
37 |
def predict_llava(image, question, temperature, max_tokens):
|