|
import gradio as gr
|
|
from transformers import AutoProcessor, Idefics3ForConditionalGeneration
|
|
import re
|
|
import time
|
|
from PIL import Image
|
|
import torch
|
|
import spaces
|
|
import subprocess
|
|
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
|
|
|
processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
|
|
model = Idefics3ForConditionalGeneration.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3",
|
|
torch_dtype=torch.bfloat16,
|
|
trust_remote_code=True).to("cuda")
|
|
|
|
BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
|
|
EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
|
|
|
|
@spaces.GPU
|
|
def model_inference(
|
|
images, text, assistant_prefix, decoding_strategy, temperature, max_new_tokens,
|
|
repetition_penalty, top_p
|
|
):
|
|
if text == "" and not images:
|
|
gr.Error("Please input a query and optionally image(s).")
|
|
|
|
if text == "" and images:
|
|
gr.Error("Please input a text query along the image(s).")
|
|
|
|
|
|
marketing_keywords = ["product", "brand", "advertisement", "marketing", "strategy", "comparison", "analysis", "trend", "audience"]
|
|
if not any(keyword in text.lower() for keyword in marketing_keywords):
|
|
return "Your question is not in the marketing area. Please upload another image or try again."
|
|
|
|
if isinstance(images, Image.Image):
|
|
images = [images]
|
|
|
|
resulting_messages = [
|
|
{
|
|
"role": "user",
|
|
"content": [{"type": "image"}] + [
|
|
{"type": "text", "text": text}
|
|
]
|
|
}
|
|
]
|
|
|
|
if assistant_prefix:
|
|
text = f"{assistant_prefix} {text}"
|
|
|
|
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
|
|
inputs = processor(text=prompt, images=[images], return_tensors="pt")
|
|
inputs = {k: v.to("cuda") for k, v in inputs.items()}
|
|
|
|
generation_args = {
|
|
"max_new_tokens": max_new_tokens,
|
|
"repetition_penalty": repetition_penalty,
|
|
}
|
|
|
|
assert decoding_strategy in [
|
|
"Greedy",
|
|
"Top P Sampling",
|
|
]
|
|
if decoding_strategy == "Greedy":
|
|
generation_args["do_sample"] = False
|
|
elif decoding_strategy == "Top P Sampling":
|
|
generation_args["temperature"] = temperature
|
|
generation_args["do_sample"] = True
|
|
generation_args["top_p"] = top_p
|
|
|
|
generation_args.update(inputs)
|
|
|
|
|
|
generated_ids = model.generate(**generation_args)
|
|
|
|
generated_texts = processor.batch_decode(generated_ids[:, generation_args["input_ids"].size(1):], skip_special_tokens=True)
|
|
return generated_texts[0]
|
|
|
|
with gr.Blocks(fill_height=True) as demo:
|
|
gr.Markdown("## Marketing Vision App π")
|
|
gr.Markdown("This app uses the [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) model to answer questions related to marketing. Upload an image and a text query, or try one of the examples.")
|
|
gr.Markdown("**Disclaimer:** This app may not consistently follow prompts or handle complex tasks. However, adding a prefix to the assistant's response can significantly improve the output. You could also play with the parameters such as the temperature in non-greedy mode.")
|
|
with gr.Column():
|
|
image_input = gr.Image(label="Upload your Image", type="pil", scale=1)
|
|
query_input = gr.Textbox(label="Prompt")
|
|
assistant_prefix = gr.Textbox(label="Assistant Prefix", placeholder="Let's think step by step.")
|
|
|
|
submit_btn = gr.Button("Submit")
|
|
output = gr.Textbox(label="Output")
|
|
|
|
with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
|
|
examples=[
|
|
["example_images/iphone_vs_samsung.jpg", "I want to buy a smartphone with features similar to the one in the photo. Suggest models and provide a comparison.", None, "Product Recommendation", 0.5, 768, 1.1, 0.85],
|
|
["example_images/ad_analysis.png", "Analyze this advertisement and explain its marketing strategy.", None, "Advertisement Analysis", 0.5, 768, 1.1, 0.85],
|
|
["example_images/market_trends.png", "Analyze this marketing chart and explain the market trends it represents.", None, "Market Trend Analysis", 0.5, 768, 1.1, 0.85],
|
|
["example_images/social_media_post.png", "Critique this social media post about product quality and suggest improvements.", None, "Social Media Post Analysis", 0.5, 768, 1.1, 0.85],
|
|
["example_images/brand_comparison.jpg", "Compare these three brands based on their price strategies.", None, "Brand Comparison", 0.5, 768, 1.1, 0.85],
|
|
["example_images/target_audience.jpg", "Analyze this image and suggest a target audience for the product.", None, "Target Audience Analysis", 0.5, 768, 1.1, 0.85]
|
|
]
|
|
|
|
|
|
max_new_tokens = gr.Slider(
|
|
minimum=8,
|
|
maximum=1024,
|
|
value=512,
|
|
step=1,
|
|
interactive=True,
|
|
label="Maximum number of new tokens to generate",
|
|
)
|
|
repetition_penalty = gr.Slider(
|
|
minimum=0.01,
|
|
maximum=5.0,
|
|
value=1.2,
|
|
step=0.01,
|
|
interactive=True,
|
|
label="Repetition penalty",
|
|
info="1.0 is equivalent to no penalty",
|
|
)
|
|
temperature = gr.Slider(
|
|
minimum=0.0,
|
|
maximum=5.0,
|
|
value=0.4,
|
|
step=0.1,
|
|
interactive=True,
|
|
label="Sampling temperature",
|
|
info="Higher values will produce more diverse outputs.",
|
|
)
|
|
top_p = gr.Slider(
|
|
minimum=0.01,
|
|
maximum=0.99,
|
|
value=0.8,
|
|
step=0.01,
|
|
interactive=True,
|
|
label="Top P",
|
|
info="Higher values is equivalent to sampling more low-probability tokens.",
|
|
)
|
|
decoding_strategy = gr.Radio(
|
|
[
|
|
"Greedy",
|
|
"Top P Sampling",
|
|
],
|
|
value="Greedy",
|
|
label="Decoding strategy",
|
|
interactive=True,
|
|
info="Higher values is equivalent to sampling more low-probability tokens.",
|
|
)
|
|
decoding_strategy.change(
|
|
fn=lambda selection: gr.Slider(
|
|
visible=(
|
|
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
|
)
|
|
),
|
|
inputs=decoding_strategy,
|
|
outputs=temperature,
|
|
)
|
|
|
|
decoding_strategy.change(
|
|
fn=lambda selection: gr.Slider(
|
|
visible=(
|
|
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
|
)
|
|
),
|
|
inputs=decoding_strategy,
|
|
outputs=repetition_penalty,
|
|
)
|
|
decoding_strategy.change(
|
|
fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
|
|
inputs=decoding_strategy,
|
|
outputs=top_p,
|
|
)
|
|
gr.Examples(
|
|
examples = examples,
|
|
inputs=[image_input, query_input, assistant_prefix, decoding_strategy, temperature,
|
|
max_new_tokens, repetition_penalty, top_p],
|
|
outputs=output,
|
|
fn=model_inference
|
|
)
|
|
|
|
submit_btn.click(model_inference, inputs = [image_input, query_input, assistant_prefix, decoding_strategy, temperature,
|
|
max_new_tokens, repetition_penalty, top_p], outputs=output)
|
|
|
|
demo.launch(debug=True)
|
|
|