Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,11 +5,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
5 |
import gradio as gr
|
6 |
import matplotlib.pyplot as plt
|
7 |
|
8 |
-
|
9 |
-
device = torch.device('cpu') # Use 'cuda' if available
|
10 |
model_name_or_path = 'GoodBaiBai88/M3D-LaMed-Phi-3-4B'
|
11 |
|
12 |
-
# Load model and tokenizer
|
13 |
model = AutoModelForCausalLM.from_pretrained(
|
14 |
model_name_or_path,
|
15 |
torch_dtype=torch.float32,
|
@@ -24,7 +22,6 @@ tokenizer = AutoTokenizer.from_pretrained(
|
|
24 |
trust_remote_code=True
|
25 |
)
|
26 |
|
27 |
-
# Storage
|
28 |
chat_history = []
|
29 |
current_image = None
|
30 |
|
@@ -45,6 +42,14 @@ def extract_and_display_images(image_path):
|
|
45 |
plt.close()
|
46 |
return output_path
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
def process_question(question):
|
49 |
global current_image
|
50 |
if current_image is None:
|
@@ -60,88 +65,132 @@ def process_question(question):
|
|
60 |
generated_texts = tokenizer.batch_decode(generation, skip_special_tokens=True)
|
61 |
return generated_texts[0]
|
62 |
|
63 |
-
def upload_image(image):
|
64 |
-
global current_image
|
65 |
-
current_image = image.name
|
66 |
-
preview_path = extract_and_display_images(current_image)
|
67 |
-
return "Image uploaded successfully!", preview_path
|
68 |
-
|
69 |
def chat_with_model(user_message):
|
70 |
global chat_history
|
|
|
|
|
71 |
response = process_question(user_message)
|
72 |
chat_history.append((user_message, response))
|
73 |
return chat_history
|
74 |
|
75 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
with gr.Blocks(css="""
|
77 |
body {
|
78 |
-
background:
|
79 |
-
font-family: '
|
80 |
-
color:
|
81 |
-
}
|
82 |
-
|
83 |
-
.gr-box {
|
84 |
-
border-radius: 16px;
|
85 |
-
background: rgba(255,255,255,0.1);
|
86 |
-
padding: 20px;
|
87 |
-
backdrop-filter: blur(10px);
|
88 |
-
box-shadow: 0 8px 32px 0 rgba( 31, 38, 135, 0.37 );
|
89 |
}
|
90 |
|
91 |
h1 {
|
92 |
text-align: center;
|
93 |
-
font-size:
|
94 |
margin-bottom: 20px;
|
95 |
-
color: #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
}
|
97 |
|
98 |
.gr-chatbot-container {
|
99 |
overflow-y: auto;
|
100 |
max-height: 500px;
|
|
|
101 |
}
|
102 |
|
103 |
.gr-chatbot-message {
|
104 |
-
margin-bottom:
|
105 |
-
padding:
|
106 |
-
border-radius:
|
107 |
-
background:
|
108 |
-
|
109 |
-
}
|
110 |
-
|
111 |
-
.gr-chatbot-message:hover {
|
112 |
-
transform: scale(1.02);
|
113 |
-
background: rgba(255,255,255,0.1);
|
114 |
}
|
115 |
|
116 |
.gr-button {
|
117 |
-
background-color: #
|
118 |
-
border: none;
|
119 |
-
padding: 10px 20px;
|
120 |
-
border-radius: 20px;
|
121 |
color: white;
|
122 |
-
|
123 |
-
|
|
|
|
|
124 |
}
|
125 |
|
126 |
.gr-button:hover {
|
127 |
-
background-color: #
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
}
|
130 |
""") as app:
|
131 |
-
gr.Markdown("#
|
132 |
|
133 |
with gr.Row():
|
134 |
-
with gr.Column(scale=1
|
135 |
-
|
|
|
136 |
with gr.Column(scale=2):
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
app.launch()
|
|
|
5 |
import gradio as gr
|
6 |
import matplotlib.pyplot as plt
|
7 |
|
8 |
+
device = torch.device('cpu')
|
|
|
9 |
model_name_or_path = 'GoodBaiBai88/M3D-LaMed-Phi-3-4B'
|
10 |
|
|
|
11 |
model = AutoModelForCausalLM.from_pretrained(
|
12 |
model_name_or_path,
|
13 |
torch_dtype=torch.float32,
|
|
|
22 |
trust_remote_code=True
|
23 |
)
|
24 |
|
|
|
25 |
chat_history = []
|
26 |
current_image = None
|
27 |
|
|
|
42 |
plt.close()
|
43 |
return output_path
|
44 |
|
45 |
+
def upload_image(image):
|
46 |
+
global current_image
|
47 |
+
if image is None:
|
48 |
+
return "", None
|
49 |
+
current_image = image.name
|
50 |
+
preview_path = extract_and_display_images(current_image)
|
51 |
+
return "Image uploaded successfully!", preview_path
|
52 |
+
|
53 |
def process_question(question):
|
54 |
global current_image
|
55 |
if current_image is None:
|
|
|
65 |
generated_texts = tokenizer.batch_decode(generation, skip_special_tokens=True)
|
66 |
return generated_texts[0]
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
def chat_with_model(user_message):
|
69 |
global chat_history
|
70 |
+
if not user_message.strip():
|
71 |
+
return chat_history
|
72 |
response = process_question(user_message)
|
73 |
chat_history.append((user_message, response))
|
74 |
return chat_history
|
75 |
|
76 |
+
# Function to export chat history to a text file
|
77 |
+
def export_chat_history():
|
78 |
+
history_text = ""
|
79 |
+
for user_msg, model_reply in chat_history:
|
80 |
+
history_text += f"User: {user_msg}\nAI: {model_reply}\n\n"
|
81 |
+
with open("chat_history.txt", "w") as f:
|
82 |
+
f.write(history_text)
|
83 |
+
return "Chat history exported as chat_history.txt"
|
84 |
+
|
85 |
+
# UI
|
86 |
with gr.Blocks(css="""
|
87 |
body {
|
88 |
+
background: #f5f5f5;
|
89 |
+
font-family: 'Inter', sans-serif;
|
90 |
+
color: #333333;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
}
|
92 |
|
93 |
h1 {
|
94 |
text-align: center;
|
95 |
+
font-size: 2em;
|
96 |
margin-bottom: 20px;
|
97 |
+
color: #222;
|
98 |
+
}
|
99 |
+
|
100 |
+
.gr-box {
|
101 |
+
background: #ffffff;
|
102 |
+
padding: 20px;
|
103 |
+
border-radius: 10px;
|
104 |
+
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.1);
|
105 |
}
|
106 |
|
107 |
.gr-chatbot-container {
|
108 |
overflow-y: auto;
|
109 |
max-height: 500px;
|
110 |
+
scroll-behavior: smooth;
|
111 |
}
|
112 |
|
113 |
.gr-chatbot-message {
|
114 |
+
margin-bottom: 10px;
|
115 |
+
padding: 8px;
|
116 |
+
border-radius: 8px;
|
117 |
+
background: #f5f5f5;
|
118 |
+
animation: fadeIn 0.5s ease-out;
|
|
|
|
|
|
|
|
|
|
|
119 |
}
|
120 |
|
121 |
.gr-button {
|
122 |
+
background-color: #4CAF50;
|
|
|
|
|
|
|
123 |
color: white;
|
124 |
+
border: none;
|
125 |
+
padding: 8px 16px;
|
126 |
+
border-radius: 6px;
|
127 |
+
cursor: pointer;
|
128 |
}
|
129 |
|
130 |
.gr-button:hover {
|
131 |
+
background-color: #45a049;
|
132 |
+
}
|
133 |
+
|
134 |
+
.gr-upload-btn {
|
135 |
+
background-color: #4CAF50;
|
136 |
+
color: white;
|
137 |
+
border-radius: 50%;
|
138 |
+
width: 50px;
|
139 |
+
height: 50px;
|
140 |
+
font-size: 24px;
|
141 |
+
display: flex;
|
142 |
+
align-items: center;
|
143 |
+
justify-content: center;
|
144 |
+
cursor: pointer;
|
145 |
+
border: none;
|
146 |
+
margin-top: 10px;
|
147 |
+
}
|
148 |
+
|
149 |
+
.gr-spinner {
|
150 |
+
margin: auto;
|
151 |
+
display: block;
|
152 |
+
}
|
153 |
+
|
154 |
+
@keyframes fadeIn {
|
155 |
+
0% { opacity: 0; }
|
156 |
+
100% { opacity: 1; }
|
157 |
}
|
158 |
""") as app:
|
159 |
+
gr.Markdown("# AI Powered Medical Image Analysis System")
|
160 |
|
161 |
with gr.Row():
|
162 |
+
with gr.Column(scale=1):
|
163 |
+
with gr.Box(elem_id="chat_box", label="Chat Area", visible=True):
|
164 |
+
chatbot_ui = gr.Chatbot(value=[], label="Chat History")
|
165 |
with gr.Column(scale=2):
|
166 |
+
with gr.Box(elem_id="upload_box", label="Upload Section"):
|
167 |
+
upload_button = gr.Button("+", elem_id="upload_btn", visible=True, interactive=True)
|
168 |
+
upload_section = gr.File(label="Upload NPY Image", type="filepath", visible=False)
|
169 |
+
upload_status = gr.Textbox(label="Status", interactive=False)
|
170 |
+
preview_img = gr.Image(label="Image Preview", interactive=False)
|
171 |
+
message_input = gr.Textbox(placeholder="Type your question here...", label="Your Message")
|
172 |
+
send_button = gr.Button("Send")
|
173 |
+
export_button = gr.Button("Export Chat History")
|
174 |
+
loading_spinner = gr.Spinner(visible=False)
|
175 |
+
|
176 |
+
# Handle file upload when "+" button is clicked
|
177 |
+
upload_button.click(lambda: upload_section.update(visible=True), None, upload_section)
|
178 |
+
|
179 |
+
# Display loading spinner when uploading an image
|
180 |
+
upload_section.upload(lambda *args: loading_spinner.update(visible=True), upload_section, None)
|
181 |
+
upload_section.upload(upload_image, upload_section, [upload_status, preview_img, loading_spinner.update(visible=False)])
|
182 |
+
|
183 |
+
# Display loading spinner while processing question
|
184 |
+
send_button.click(lambda *args: loading_spinner.update(visible=True), None, None)
|
185 |
+
send_button.click(chat_with_model, message_input, chatbot_ui)
|
186 |
+
send_button.click(lambda *args: loading_spinner.update(visible=False), None, None)
|
187 |
+
message_input.submit(chat_with_model, message_input, chatbot_ui)
|
188 |
+
|
189 |
+
# Export chat history functionality
|
190 |
+
export_button.click(export_chat_history)
|
191 |
+
|
192 |
+
# Auto-focus typing box and scroll to bottom after message sent
|
193 |
+
message_input.submit(lambda: gr.update(focus=True), None, message_input)
|
194 |
+
send_button.click(lambda: gr.update(focus=True), None, message_input)
|
195 |
|
196 |
app.launch()
|