Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,20 +12,30 @@ model_name_or_path = 'GoodBaiBai88/M3D-LaMed-Phi-3-4B'
|
|
12 |
proj_out_num = 256
|
13 |
|
14 |
# Load model and tokenizer
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Chat history storage
|
31 |
chat_history = []
|
@@ -117,10 +127,7 @@ def clear_chat():
|
|
117 |
|
118 |
# Gradio UI
|
119 |
with gr.Blocks(theme=gr.themes.Soft()) as chat_ui:
|
120 |
-
#
|
121 |
-
with gr.Row():
|
122 |
-
gr.Markdown("# 🏥 Medical Image Analysis Chatbot")
|
123 |
-
search_bar = gr.Textbox(label="Search", placeholder="Search...", interactive=True)
|
124 |
|
125 |
# File upload section
|
126 |
with gr.Row():
|
|
|
12 |
proj_out_num = 256
|
13 |
|
14 |
# Load model and tokenizer
|
15 |
+
try:
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
17 |
+
model_name_or_path,
|
18 |
+
torch_dtype=dtype,
|
19 |
+
device_map=device,
|
20 |
+
trust_remote_code=True
|
21 |
+
)
|
22 |
+
print("Model loaded successfully!")
|
23 |
+
except Exception as e:
|
24 |
+
print(f"Error loading model: {e}")
|
25 |
+
raise
|
26 |
|
27 |
+
try:
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
29 |
+
model_name_or_path,
|
30 |
+
model_max_length=512,
|
31 |
+
padding_side="right",
|
32 |
+
use_fast=False,
|
33 |
+
trust_remote_code=True
|
34 |
+
)
|
35 |
+
print("Tokenizer loaded successfully!")
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Error loading tokenizer: {e}")
|
38 |
+
raise
|
39 |
|
40 |
# Chat history storage
|
41 |
chat_history = []
|
|
|
127 |
|
128 |
# Gradio UI
|
129 |
with gr.Blocks(theme=gr.themes.Soft()) as chat_ui:
|
130 |
+
gr.Markdown("# 🏥 Medical Image Analysis Chatbot")
|
|
|
|
|
|
|
131 |
|
132 |
# File upload section
|
133 |
with gr.Row():
|