CGQN commited on
Commit
7201b2d
Β·
verified Β·
1 Parent(s): ff121ef

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ from transformers import AutoModel, AutoTokenizer
5
+
6
+ # Initialize model and tokenizer
7
+ model = AutoModel.from_pretrained('openbmb/MiniCPM-V-4_5', trust_remote_code=True,
8
+ attn_implementation='sdpa', torch_dtype=torch.bfloat16)
9
+ model = model.eval().cuda()
10
+ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-4_5', trust_remote_code=True)
11
+
12
+ # Default chat parameters
13
+ enable_thinking = False
14
+
15
+ def chat_with_model(image, question, history):
16
+ """
17
+ Chat with the MiniCPM model.
18
+
19
+ Args:
20
+ image: PIL Image or None
21
+ question: str, user's text question
22
+ history: list of previous conversation turns
23
+
24
+ Returns:
25
+ str: model's response
26
+ list: updated conversation history
27
+ """
28
+ # Build messages with history context
29
+ msgs = []
30
+ for h in history:
31
+ msgs.append({"role": "user", "content": h[0]})
32
+ msgs.append({"role": "assistant", "content": h[1]})
33
+
34
+ # Add current user message
35
+ if image is not None:
36
+ msgs.append({"role": "user", "content": [image, question]})
37
+ else:
38
+ msgs.append({"role": "user", "content": question})
39
+
40
+ # Generate model response
41
+ answer = model.chat(
42
+ msgs=msgs,
43
+ tokenizer=tokenizer,
44
+ enable_thinking=enable_thinking
45
+ )
46
+
47
+ # Update history
48
+ history.append((question if image is None else [image, question], answer))
49
+ return answer, history
50
+
51
+ # Create Gradio interface
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown("# MiniCPM-V-4.5 Chat Interface")
54
+ gr.Markdown("Upload an image and ask questions, or chat without an image")
55
+
56
+ # Store conversation history
57
+ chat_history = gr.State([])
58
+
59
+ with gr.Row():
60
+ with gr.Column():
61
+ image_input = gr.Image(type="pil", label="Upload Image (optional)")
62
+ question_input = gr.Textbox(label="Your Question", placeholder="Enter your question here...")
63
+ submit_btn = gr.Button("Submit")
64
+ clear_btn = gr.Button("Clear History")
65
+
66
+ with gr.Column():
67
+ response_output = gr.Textbox(label="Model Response", interactive=False)
68
+ chat_display = gr.Chatbot(label="Conversation History")
69
+
70
+ # Handle submit action
71
+ submit_btn.click(
72
+ fn=chat_with_model,
73
+ inputs=[image_input, question_input, chat_history],
74
+ outputs=[response_output, chat_history]
75
+ ).then(
76
+ fn=lambda history: history,
77
+ inputs=[chat_history],
78
+ outputs=[chat_display]
79
+ )
80
+
81
+ # Clear history
82
+ def clear_history():
83
+ return [], [], ""
84
+
85
+ clear_btn.click(
86
+ fn=clear_history,
87
+ outputs=[chat_history, chat_display, response_output]
88
+ )
89
+
90
+ # Launch demo
91
+ if __name__ == "__main__":
92
+ demo.launch()