Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,7 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from transformers import (
|
4 |
Idefics2Processor, Idefics2ForConditionalGeneration,
|
5 |
-
Blip2Processor, Blip2ForConditionalGeneration
|
6 |
-
BitsAndBytesConfig
|
7 |
)
|
8 |
from PIL import Image
|
9 |
import time
|
@@ -27,7 +26,7 @@ models = {
|
|
27 |
"model_id": "HuggingFaceM4/idefics2-8b",
|
28 |
"processor_class": Idefics2Processor,
|
29 |
"model_class": Idefics2ForConditionalGeneration,
|
30 |
-
"caption_prompt": "<image>Describe
|
31 |
},
|
32 |
"BLIP2": {
|
33 |
"model_id": "Salesforce/blip2-opt-2.7b",
|
@@ -37,32 +36,25 @@ models = {
|
|
37 |
}
|
38 |
}
|
39 |
|
40 |
-
# Cargar modelos
|
41 |
model_instances = {}
|
42 |
for model_name, config in models.items():
|
43 |
-
quantization_config = BitsAndBytesConfig(load_in_4bit=True) if "IDEFICS2" in model_name else None
|
44 |
processor = config["processor_class"].from_pretrained(config["model_id"])
|
45 |
-
model = config["model_class"].from_pretrained(config["model_id"]
|
46 |
model_instances[model_name] = (processor, model)
|
47 |
|
48 |
# Preguntas VQA predefinidas
|
49 |
vqa_questions = [
|
50 |
-
"
|
51 |
-
"
|
52 |
]
|
53 |
|
54 |
-
# Referencia genérica para BLEU (puedes
|
55 |
-
|
56 |
-
# Placeholder: Implementa lógica para mapear image_path a captions de COCO
|
57 |
-
# Por ahora, usamos una referencia genérica mejorada
|
58 |
-
return ["Una sala de estar con muebles y una chimenea"] # Ejemplo
|
59 |
|
60 |
-
|
61 |
-
results = []
|
62 |
-
|
63 |
-
def infer_and_store(image, model_name, task, question=None):
|
64 |
if image is None:
|
65 |
-
return "Por favor, sube una imagen.", None, None, None, None, None
|
66 |
|
67 |
# Abrir y preparar la imagen
|
68 |
image = Image.open(image).convert("RGB")
|
@@ -87,28 +79,13 @@ def infer_and_store(image, model_name, task, question=None):
|
|
87 |
caption = processor.decode(output_ids[0], skip_special_tokens=True)
|
88 |
inference_time = time.time() - start_time
|
89 |
|
90 |
-
#
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
# Almacenar resultados
|
95 |
-
results.append({
|
96 |
-
"Imagen": image.name if hasattr(image, "name") else "desconocida",
|
97 |
-
"Modelo": model_name,
|
98 |
-
"Tarea": task,
|
99 |
-
"Subtítulo": caption,
|
100 |
-
"Tiempo Captioning (s)": inference_time,
|
101 |
-
"Pregunta VQA": None,
|
102 |
-
"Respuesta VQA": None,
|
103 |
-
"Tiempo VQA (s)": None,
|
104 |
-
"VRAM (GB)": vram,
|
105 |
-
"Puntuación BLEU": bleu_score
|
106 |
-
})
|
107 |
-
|
108 |
-
return (caption, inference_time, None, None, vram, bleu_score, f"Modelo: {model_name}\nTarea: Captioning\nSubtítulo: {caption}\nTiempo: {inference_time:.3f} s\nVRAM: {vram:.3f} GB\nBLEU: {bleu_score:.3f}")
|
109 |
|
110 |
elif task == "vqa" and question:
|
111 |
-
vqa_text = question if "BLIP2" in model_name else f"<image>
|
112 |
inputs = processor(images=image, text=vqa_text, return_tensors="pt").to(device)
|
113 |
output_ids = model.generate(
|
114 |
**inputs,
|
@@ -119,53 +96,38 @@ def infer_and_store(image, model_name, task, question=None):
|
|
119 |
vqa_answer = processor.decode(output_ids[0], skip_special_tokens=True)
|
120 |
inference_time = time.time() - start_time
|
121 |
|
122 |
-
|
123 |
-
results.append({
|
124 |
-
"Imagen": image.name if hasattr(image, "name") else "desconocida",
|
125 |
-
"Modelo": model_name,
|
126 |
-
"Tarea": task,
|
127 |
-
"Subtítulo": None,
|
128 |
-
"Tiempo Captioning (s)": None,
|
129 |
-
"Pregunta VQA": question,
|
130 |
-
"Respuesta VQA": vqa_answer,
|
131 |
-
"Tiempo VQA (s)": inference_time,
|
132 |
-
"VRAM (GB)": vram,
|
133 |
-
"Puntuación BLEU": None
|
134 |
-
})
|
135 |
-
|
136 |
-
return (None, None, vqa_answer, inference_time, vram, None, f"Modelo: {model_name}\nTarea: VQA\nPregunta: {question}\nRespuesta: {vqa_answer}\nTiempo: {inference_time:.3f} s\nVRAM: {vram:.3f} GB")
|
137 |
|
138 |
-
return "Selecciona una tarea válida y, para VQA, una pregunta
|
139 |
|
140 |
# Interfaz Gradio
|
141 |
-
with gr.Blocks(title="
|
142 |
gr.Markdown("# Benchmark para Modelos Multimodales (MLLMs)")
|
143 |
-
gr.Markdown("Sube una imagen, selecciona un modelo y una tarea, y obtén resultados de
|
144 |
|
145 |
with gr.Row():
|
146 |
with gr.Column():
|
147 |
image_input = gr.Image(type="filepath", label="Subir Imagen")
|
148 |
model_dropdown = gr.Dropdown(choices=["IDEFICS2", "BLIP2"], label="Seleccionar Modelo", value="IDEFICS2")
|
149 |
task_dropdown = gr.Dropdown(choices=["captioning", "vqa"], label="Seleccionar Tarea", value="captioning")
|
150 |
-
question_input = gr.
|
151 |
submit_btn = gr.Button("Generar")
|
152 |
|
153 |
with gr.Column():
|
154 |
caption_output = gr.Textbox(label="Subtítulo Generado")
|
155 |
vqa_output = gr.Textbox(label="Respuesta VQA")
|
156 |
metrics_output = gr.Textbox(label="Métricas (Tiempo, VRAM, BLEU)")
|
157 |
-
results_output = gr.Textbox(label="Resumen de Resultados", lines=10)
|
158 |
|
159 |
submit_btn.click(
|
160 |
-
fn=
|
161 |
inputs=[image_input, model_dropdown, task_dropdown, question_input],
|
162 |
-
outputs=[caption_output, gr.Number(label="Tiempo Captioning (s)"), vqa_output, gr.Number(label="Tiempo VQA (s)"), gr.Number(label="VRAM (GB)"), gr.Number(label="
|
163 |
)
|
164 |
|
165 |
gr.Markdown("### Notas")
|
166 |
gr.Markdown("""
|
167 |
-
-
|
168 |
-
- La
|
169 |
- Para más detalles, consulta el [repositorio del paper](https://huggingface.co/spaces/Pdro-ruiz/MLLM_Estado_del_Arte_Feb25/tree/main).
|
170 |
""")
|
171 |
|
|
|
2 |
import torch
|
3 |
from transformers import (
|
4 |
Idefics2Processor, Idefics2ForConditionalGeneration,
|
5 |
+
Blip2Processor, Blip2ForConditionalGeneration
|
|
|
6 |
)
|
7 |
from PIL import Image
|
8 |
import time
|
|
|
26 |
"model_id": "HuggingFaceM4/idefics2-8b",
|
27 |
"processor_class": Idefics2Processor,
|
28 |
"model_class": Idefics2ForConditionalGeneration,
|
29 |
+
"caption_prompt": "<image>Describe the image in detail"
|
30 |
},
|
31 |
"BLIP2": {
|
32 |
"model_id": "Salesforce/blip2-opt-2.7b",
|
|
|
36 |
}
|
37 |
}
|
38 |
|
39 |
+
# Cargar modelos (pre-cargados para evitar retrasos)
|
40 |
model_instances = {}
|
41 |
for model_name, config in models.items():
|
|
|
42 |
processor = config["processor_class"].from_pretrained(config["model_id"])
|
43 |
+
model = config["model_class"].from_pretrained(config["model_id"]).to(device)
|
44 |
model_instances[model_name] = (processor, model)
|
45 |
|
46 |
# Preguntas VQA predefinidas
|
47 |
vqa_questions = [
|
48 |
+
"Are there people in the image?",
|
49 |
+
"Which color predominates in the image?"
|
50 |
]
|
51 |
|
52 |
+
# Referencia genérica para BLEU (puedes ajustar según necesidades)
|
53 |
+
reference_caption = ["An image with people and various objects"]
|
|
|
|
|
|
|
54 |
|
55 |
+
def infer(image, model_name, task, question=None):
|
|
|
|
|
|
|
56 |
if image is None:
|
57 |
+
return "Por favor, sube una imagen.", None, None, None, None, None
|
58 |
|
59 |
# Abrir y preparar la imagen
|
60 |
image = Image.open(image).convert("RGB")
|
|
|
79 |
caption = processor.decode(output_ids[0], skip_special_tokens=True)
|
80 |
inference_time = time.time() - start_time
|
81 |
|
82 |
+
# Calcular BLEU (simplificado, usando referencia genérica)
|
83 |
+
bleu_score = sentence_bleu([reference_caption[0].split()], caption.split()) if caption else 0.0
|
84 |
+
|
85 |
+
return (caption, inference_time, None, None, vram, bleu_score)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
elif task == "vqa" and question:
|
88 |
+
vqa_text = question if "BLIP2" in model_name else f"<image>Q: {question}"
|
89 |
inputs = processor(images=image, text=vqa_text, return_tensors="pt").to(device)
|
90 |
output_ids = model.generate(
|
91 |
**inputs,
|
|
|
96 |
vqa_answer = processor.decode(output_ids[0], skip_special_tokens=True)
|
97 |
inference_time = time.time() - start_time
|
98 |
|
99 |
+
return (None, None, vqa_answer, inference_time, vram, None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
+
return "Selecciona una tarea válida y, para VQA, una pregunta.", None, None, None, None, None
|
102 |
|
103 |
# Interfaz Gradio
|
104 |
+
with gr.Blocks(title="MLLM Benchmark Demo") as demo:
|
105 |
gr.Markdown("# Benchmark para Modelos Multimodales (MLLMs)")
|
106 |
+
gr.Markdown("Sube una imagen, selecciona un modelo y una tarea, y obtén resultados de captioning o VQA.")
|
107 |
|
108 |
with gr.Row():
|
109 |
with gr.Column():
|
110 |
image_input = gr.Image(type="filepath", label="Subir Imagen")
|
111 |
model_dropdown = gr.Dropdown(choices=["IDEFICS2", "BLIP2"], label="Seleccionar Modelo", value="IDEFICS2")
|
112 |
task_dropdown = gr.Dropdown(choices=["captioning", "vqa"], label="Seleccionar Tarea", value="captioning")
|
113 |
+
question_input = gr.Textbox(label="Pregunta VQA (opcional, solo para VQA)", placeholder="Ej: Are there people in the image?")
|
114 |
submit_btn = gr.Button("Generar")
|
115 |
|
116 |
with gr.Column():
|
117 |
caption_output = gr.Textbox(label="Subtítulo Generado")
|
118 |
vqa_output = gr.Textbox(label="Respuesta VQA")
|
119 |
metrics_output = gr.Textbox(label="Métricas (Tiempo, VRAM, BLEU)")
|
|
|
120 |
|
121 |
submit_btn.click(
|
122 |
+
fn=infer,
|
123 |
inputs=[image_input, model_dropdown, task_dropdown, question_input],
|
124 |
+
outputs=[caption_output, gr.Number(label="Tiempo Captioning (s)"), vqa_output, gr.Number(label="Tiempo VQA (s)"), gr.Number(label="VRAM (GB)"), gr.Number(label="BLEU Score")]
|
125 |
)
|
126 |
|
127 |
gr.Markdown("### Notas")
|
128 |
gr.Markdown("""
|
129 |
+
- para mejroar la velocidad de inferencia, descarga en local y usar GPU avanzada.
|
130 |
+
- La métrica BLEU usa una referencia genérica y puede no reflejar la calidad real.
|
131 |
- Para más detalles, consulta el [repositorio del paper](https://huggingface.co/spaces/Pdro-ruiz/MLLM_Estado_del_Arte_Feb25/tree/main).
|
132 |
""")
|
133 |
|