Spaces:
Running
Running
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +7 -4
src/streamlit_app.py
CHANGED
@@ -74,13 +74,16 @@ st.write("Dataset columns:", data.features.keys())
|
|
74 |
text_field = "text" if "text" in data.features else list(data.features.keys())[0]
|
75 |
|
76 |
# Then use dynamic access:
|
77 |
-
text_embeddings = embed_texts(data[text_field])
|
78 |
|
79 |
# ========== 🧠 Embedding Function ==========
|
80 |
@st.cache_data(show_spinner=False)
|
81 |
def embed_texts(texts):
|
82 |
return text_model.encode(texts, convert_to_tensor=True)
|
83 |
|
|
|
|
|
|
|
84 |
# ========== 🧑⚕️ App UI ==========
|
85 |
st.title("🩺 Multimodal Medical Chatbot")
|
86 |
|
@@ -88,7 +91,7 @@ query = st.text_input("Enter your medical question or symptom description:")
|
|
88 |
|
89 |
if query:
|
90 |
with st.spinner("Searching medical cases..."):
|
91 |
-
text_embeddings = embed_texts(data[
|
92 |
query_embedding = embed_texts([query])[0]
|
93 |
|
94 |
# Compute similarity
|
@@ -101,11 +104,11 @@ if query:
|
|
101 |
st.image(selected['image'], caption="Most relevant medical image", use_column_width=True)
|
102 |
|
103 |
# Show Text
|
104 |
-
st.markdown(f"**Case Description:** {selected[
|
105 |
|
106 |
# GPT Explanation
|
107 |
if openai.api_key:
|
108 |
-
prompt = f"Explain this case in plain English: {selected[
|
109 |
response = openai.ChatCompletion.create(
|
110 |
model="gpt-4",
|
111 |
messages=[{"role": "user", "content": prompt}],
|
|
|
74 |
text_field = "text" if "text" in data.features else list(data.features.keys())[0]
|
75 |
|
76 |
# Then use dynamic access:
|
77 |
+
#text_embeddings = embed_texts(data[text_field])
|
78 |
|
79 |
# ========== 🧠 Embedding Function ==========
|
80 |
@st.cache_data(show_spinner=False)
|
81 |
def embed_texts(texts):
|
82 |
return text_model.encode(texts, convert_to_tensor=True)
|
83 |
|
84 |
+
# Pick which text column to use
|
85 |
+
TEXT_COLUMN = "complaints" # or "general_complaint", depending on your needs
|
86 |
+
|
87 |
# ========== 🧑⚕️ App UI ==========
|
88 |
st.title("🩺 Multimodal Medical Chatbot")
|
89 |
|
|
|
91 |
|
92 |
if query:
|
93 |
with st.spinner("Searching medical cases..."):
|
94 |
+
text_embeddings = embed_texts(data[TEXT_COLUMN])
|
95 |
query_embedding = embed_texts([query])[0]
|
96 |
|
97 |
# Compute similarity
|
|
|
104 |
st.image(selected['image'], caption="Most relevant medical image", use_column_width=True)
|
105 |
|
106 |
# Show Text
|
107 |
+
st.markdown(f"**Case Description:** {selected[TEXT_COLUMN]}")
|
108 |
|
109 |
# GPT Explanation
|
110 |
if openai.api_key:
|
111 |
+
prompt = f"Explain this case in plain English: {selected[TEXT_COLUMN]}"
|
112 |
response = openai.ChatCompletion.create(
|
113 |
model="gpt-4",
|
114 |
messages=[{"role": "user", "content": prompt}],
|