Spaces:
Sleeping
Sleeping
Arjun Moorthy
commited on
Commit
Β·
7d7d479
1
Parent(s):
46bd96a
Transform into OncoLife Symptom & Triage Assistant with structured workflow and safety protocols
Browse files
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
emoji: π₯
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: green
|
|
@@ -9,27 +9,53 @@ app_file: app.py
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
| 12 |
-
#
|
| 13 |
|
| 14 |
-
A medical chatbot
|
| 15 |
|
| 16 |
## Features
|
| 17 |
|
| 18 |
- π€ Powered by BioMistral-7B medical foundation model
|
| 19 |
-
-
|
| 20 |
-
-
|
| 21 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
## Usage
|
| 24 |
|
| 25 |
-
Simply
|
| 26 |
|
| 27 |
## Examples
|
| 28 |
|
| 29 |
-
- "
|
| 30 |
-
- "
|
| 31 |
-
- "
|
| 32 |
-
- "I
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
---
|
| 35 |
|
|
|
|
| 1 |
---
|
| 2 |
+
title: OncoLife Symptom & Triage Assistant
|
| 3 |
emoji: π₯
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: green
|
|
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
| 12 |
+
# OncoLife Symptom & Triage Assistant
|
| 13 |
|
| 14 |
+
A specialized medical chatbot that performs both symptom assessment and clinical triage for chemotherapy patients. The assistant guides users through structured symptom reporting and determines whether responses require escalation to their care team.
|
| 15 |
|
| 16 |
## Features
|
| 17 |
|
| 18 |
- π€ Powered by BioMistral-7B medical foundation model
|
| 19 |
+
- π₯ Oncology-specific symptom assessment and triage
|
| 20 |
+
- π CTCAE and UKONS severity grading
|
| 21 |
+
- π¨ Red flag detection and immediate escalation
|
| 22 |
+
- π¬ Structured conversation workflow
|
| 23 |
+
- β‘ Real-time triage recommendations
|
| 24 |
+
- π‘οΈ Safety protocols and legal disclaimers
|
| 25 |
+
|
| 26 |
+
## Workflow
|
| 27 |
+
|
| 28 |
+
1. **Symptom Collection**: Ask for and identify patient symptoms
|
| 29 |
+
2. **Severity Assessment**: Rate each symptom (mild/moderate/severe)
|
| 30 |
+
3. **Red Flag Detection**: Check for immediate escalation needs
|
| 31 |
+
4. **Grading**: Apply CTCAE or UKONS severity criteria
|
| 32 |
+
5. **Targeted Questions**: Ask utility-based follow-up questions
|
| 33 |
+
6. **Triage Decision**: Determine if care team contact is needed
|
| 34 |
+
7. **Summary**: Provide structured assessment with recommendations
|
| 35 |
+
|
| 36 |
+
## Safety Protocols
|
| 37 |
+
|
| 38 |
+
- **No Medical Advice**: Never provide treatment recommendations
|
| 39 |
+
- **Oncology Team Redirect**: Always refer to care team for medical decisions
|
| 40 |
+
- **Immediate Escalation**: Escalate dangerous symptoms immediately
|
| 41 |
+
- **Legal Disclaimers**: Include required legal language at session end
|
| 42 |
+
- **Emergency Protocols**: Handle suicidal ideation and dangerous language
|
| 43 |
|
| 44 |
## Usage
|
| 45 |
|
| 46 |
+
Simply describe your symptoms or concerns, and the assistant will guide you through a structured assessment to determine if you need to contact your care team.
|
| 47 |
|
| 48 |
## Examples
|
| 49 |
|
| 50 |
+
- "I'm feeling nauseous and tired"
|
| 51 |
+
- "I have a fever of 101"
|
| 52 |
+
- "My neuropathy is getting worse"
|
| 53 |
+
- "I'm having trouble eating"
|
| 54 |
+
- "I feel dizzy and lightheaded"
|
| 55 |
+
|
| 56 |
+
## Important Disclaimers
|
| 57 |
+
|
| 58 |
+
This assistant is designed to help assess symptoms and determine if escalation to your care team is needed. It cannot provide medical advice or treatment recommendations. Always contact your oncology team for medical decisions.
|
| 59 |
|
| 60 |
---
|
| 61 |
|
app.py
CHANGED
|
@@ -1,13 +1,14 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
-
|
| 4 |
-
|
| 5 |
Updated: Using BioMistral-7B base model for medical conversations.
|
| 6 |
REBUILD: Simplified to use only base model, no adapters.
|
| 7 |
"""
|
| 8 |
|
| 9 |
import gradio as gr
|
| 10 |
import os
|
|
|
|
| 11 |
from transformers import AutoTokenizer, MistralForCausalLM
|
| 12 |
import torch
|
| 13 |
from spaces import GPU
|
|
@@ -18,12 +19,12 @@ def force_gpu_detection():
|
|
| 18 |
"""Force GPU detection for Hugging Face Spaces"""
|
| 19 |
return torch.cuda.is_available()
|
| 20 |
|
| 21 |
-
class
|
| 22 |
def __init__(self):
|
| 23 |
# BioMistral base model configuration
|
| 24 |
BASE = "BioMistral/BioMistral-7B"
|
| 25 |
|
| 26 |
-
print("π Initializing
|
| 27 |
print(f"π¦ Loading base model: {BASE}")
|
| 28 |
|
| 29 |
# Force GPU detection first
|
|
@@ -35,6 +36,15 @@ class HFMedicalChatbot:
|
|
| 35 |
gpu_available = torch.cuda.is_available()
|
| 36 |
|
| 37 |
self._load_model(BASE, gpu_available)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
def _load_model(self, model_id, gpu_available):
|
| 40 |
"""Load the BioMistral base model"""
|
|
@@ -79,22 +89,48 @@ class HFMedicalChatbot:
|
|
| 79 |
self.model = None
|
| 80 |
self.tokenizer = None
|
| 81 |
|
| 82 |
-
def
|
|
|
|
| 83 |
try:
|
| 84 |
if self.model is None or self.tokenizer is None:
|
| 85 |
return """β **Model Loading Error**
|
| 86 |
|
| 87 |
-
The
|
| 88 |
1. Model not available
|
| 89 |
2. Memory constraints
|
| 90 |
3. Network issues
|
| 91 |
|
| 92 |
Please check the Space logs for details."""
|
| 93 |
|
| 94 |
-
print(f"π Generating
|
| 95 |
|
| 96 |
-
# Create
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
# Tokenize
|
| 100 |
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
|
|
@@ -117,7 +153,7 @@ Please check the Space logs for details."""
|
|
| 117 |
try:
|
| 118 |
outputs = self.model.generate(
|
| 119 |
**inputs,
|
| 120 |
-
max_new_tokens=
|
| 121 |
temperature=0.7,
|
| 122 |
do_sample=True,
|
| 123 |
top_p=0.9,
|
|
@@ -135,7 +171,7 @@ Please check the Space logs for details."""
|
|
| 135 |
|
| 136 |
outputs = self.model.generate(
|
| 137 |
**inputs,
|
| 138 |
-
max_new_tokens=
|
| 139 |
temperature=0.7,
|
| 140 |
do_sample=True,
|
| 141 |
top_p=0.9,
|
|
@@ -148,17 +184,21 @@ Please check the Space logs for details."""
|
|
| 148 |
# Decode response
|
| 149 |
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 150 |
|
| 151 |
-
# Extract just the
|
| 152 |
-
if "
|
| 153 |
-
answer = response.split("
|
| 154 |
else:
|
| 155 |
answer = response.strip()
|
| 156 |
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
return answer
|
| 159 |
|
| 160 |
except Exception as e:
|
| 161 |
-
print(f"β Error generating
|
| 162 |
return f"""β **Generation Error**
|
| 163 |
|
| 164 |
Error: {str(e)}
|
|
@@ -168,33 +208,52 @@ This could be due to:
|
|
| 168 |
2. Memory constraints
|
| 169 |
3. Input format problems
|
| 170 |
|
| 171 |
-
Please try a simpler
|
| 172 |
|
| 173 |
-
def
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
# Create interface
|
| 179 |
-
|
| 180 |
-
interface = gr.
|
| 181 |
-
fn=
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
title="π₯ BioMistral Medical Chatbot",
|
| 185 |
-
description="Ask medical questions and get AI-powered responses using the BioMistral-7B medical foundation model.",
|
| 186 |
examples=[
|
| 187 |
-
["
|
| 188 |
-
["
|
| 189 |
-
["
|
| 190 |
-
["I
|
| 191 |
-
["
|
| 192 |
],
|
| 193 |
theme=gr.themes.Soft()
|
| 194 |
)
|
| 195 |
|
| 196 |
if __name__ == "__main__":
|
| 197 |
print("=" * 60)
|
| 198 |
-
print("
|
| 199 |
print("=" * 60)
|
| 200 |
interface.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
+
OncoLife Symptom & Triage Assistant
|
| 4 |
+
A medical chatbot that performs both symptom assessment and clinical triage for chemotherapy patients.
|
| 5 |
Updated: Using BioMistral-7B base model for medical conversations.
|
| 6 |
REBUILD: Simplified to use only base model, no adapters.
|
| 7 |
"""
|
| 8 |
|
| 9 |
import gradio as gr
|
| 10 |
import os
|
| 11 |
+
import json
|
| 12 |
from transformers import AutoTokenizer, MistralForCausalLM
|
| 13 |
import torch
|
| 14 |
from spaces import GPU
|
|
|
|
| 19 |
"""Force GPU detection for Hugging Face Spaces"""
|
| 20 |
return torch.cuda.is_available()
|
| 21 |
|
| 22 |
+
class OncoLifeAssistant:
|
| 23 |
def __init__(self):
|
| 24 |
# BioMistral base model configuration
|
| 25 |
BASE = "BioMistral/BioMistral-7B"
|
| 26 |
|
| 27 |
+
print("π Initializing OncoLife Symptom & Triage Assistant")
|
| 28 |
print(f"π¦ Loading base model: {BASE}")
|
| 29 |
|
| 30 |
# Force GPU detection first
|
|
|
|
| 36 |
gpu_available = torch.cuda.is_available()
|
| 37 |
|
| 38 |
self._load_model(BASE, gpu_available)
|
| 39 |
+
|
| 40 |
+
# Initialize conversation state
|
| 41 |
+
self.conversation_state = {
|
| 42 |
+
"symptoms": [],
|
| 43 |
+
"asked_ids": [],
|
| 44 |
+
"answers": {},
|
| 45 |
+
"current_symptom": None,
|
| 46 |
+
"conversation_phase": "initial" # initial, symptom_assessment, triage, summary
|
| 47 |
+
}
|
| 48 |
|
| 49 |
def _load_model(self, model_id, gpu_available):
|
| 50 |
"""Load the BioMistral base model"""
|
|
|
|
| 89 |
self.model = None
|
| 90 |
self.tokenizer = None
|
| 91 |
|
| 92 |
+
def generate_oncolife_response(self, user_input, conversation_history):
|
| 93 |
+
"""Generate response using OncoLife Symptom & Triage Assistant protocol"""
|
| 94 |
try:
|
| 95 |
if self.model is None or self.tokenizer is None:
|
| 96 |
return """β **Model Loading Error**
|
| 97 |
|
| 98 |
+
The OncoLife assistant model failed to load. This could be due to:
|
| 99 |
1. Model not available
|
| 100 |
2. Memory constraints
|
| 101 |
3. Network issues
|
| 102 |
|
| 103 |
Please check the Space logs for details."""
|
| 104 |
|
| 105 |
+
print(f"π Generating OncoLife response for: {user_input}")
|
| 106 |
|
| 107 |
+
# Create OncoLife-specific prompt
|
| 108 |
+
system_prompt = """You are the OncoLife Symptom & Triage Assistant, a medical chatbot that performs both symptom assessment and clinical triage for chemotherapy patients. Your task is to guide users through structured symptom reporting and decide whether any responses require escalation to their care team.
|
| 109 |
+
|
| 110 |
+
Follow this workflow:
|
| 111 |
+
1. Ask for symptoms if none provided
|
| 112 |
+
2. For each symptom, ask severity rating (mild/moderate/severe)
|
| 113 |
+
3. Check for red flags and immediate escalation needs
|
| 114 |
+
4. Grade severity using CTCAE or UKONS criteria
|
| 115 |
+
5. Ask targeted questions based on utility scoring
|
| 116 |
+
6. Provide structured summary with triage recommendations
|
| 117 |
+
|
| 118 |
+
Safety protocols:
|
| 119 |
+
- Never provide medical advice or treatment recommendations
|
| 120 |
+
- Always redirect to oncology team for medical decisions
|
| 121 |
+
- Escalate immediately for dangerous symptoms
|
| 122 |
+
- Add legal disclaimer at session end
|
| 123 |
+
|
| 124 |
+
Current conversation state: {conversation_state}"""
|
| 125 |
+
|
| 126 |
+
# Format conversation history
|
| 127 |
+
history_text = ""
|
| 128 |
+
if conversation_history:
|
| 129 |
+
for entry in conversation_history:
|
| 130 |
+
history_text += f"User: {entry['user']}\nAssistant: {entry['assistant']}\n\n"
|
| 131 |
+
|
| 132 |
+
# Create full prompt
|
| 133 |
+
prompt = f"{system_prompt}\n\nConversation History:\n{history_text}\nUser: {user_input}\nAssistant:"
|
| 134 |
|
| 135 |
# Tokenize
|
| 136 |
inputs = self.tokenizer(prompt, return_tensors="pt", padding=True)
|
|
|
|
| 153 |
try:
|
| 154 |
outputs = self.model.generate(
|
| 155 |
**inputs,
|
| 156 |
+
max_new_tokens=512, # Longer responses for detailed medical assessment
|
| 157 |
temperature=0.7,
|
| 158 |
do_sample=True,
|
| 159 |
top_p=0.9,
|
|
|
|
| 171 |
|
| 172 |
outputs = self.model.generate(
|
| 173 |
**inputs,
|
| 174 |
+
max_new_tokens=512,
|
| 175 |
temperature=0.7,
|
| 176 |
do_sample=True,
|
| 177 |
top_p=0.9,
|
|
|
|
| 184 |
# Decode response
|
| 185 |
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 186 |
|
| 187 |
+
# Extract just the assistant's response
|
| 188 |
+
if "Assistant:" in response:
|
| 189 |
+
answer = response.split("Assistant:")[-1].strip()
|
| 190 |
else:
|
| 191 |
answer = response.strip()
|
| 192 |
|
| 193 |
+
# Add legal disclaimer if this appears to be end of session
|
| 194 |
+
if any(keyword in user_input.lower() for keyword in ['done', 'finished', 'complete', 'summary']):
|
| 195 |
+
answer += "\n\n" + self._get_legal_disclaimer()
|
| 196 |
+
|
| 197 |
+
print("β
OncoLife response generated successfully")
|
| 198 |
return answer
|
| 199 |
|
| 200 |
except Exception as e:
|
| 201 |
+
print(f"β Error generating OncoLife response: {e}")
|
| 202 |
return f"""β **Generation Error**
|
| 203 |
|
| 204 |
Error: {str(e)}
|
|
|
|
| 208 |
2. Memory constraints
|
| 209 |
3. Input format problems
|
| 210 |
|
| 211 |
+
Please try a simpler question or check the logs for more details."""
|
| 212 |
|
| 213 |
+
def _get_legal_disclaimer(self):
|
| 214 |
+
"""Return the legal disclaimer as specified in the instructions"""
|
| 215 |
+
return """**Legal Disclaimer:**
|
| 216 |
+
|
| 217 |
+
Patient verbalizes agreement with plan of care and understanding of the information we have gone over today and has no further comments, questions or concerns at this time. Will follow up with Doctor or ONN if symptoms worsen, do not improve, or any other symptoms develop. Agrees to seek emergency care if pt believes is needed, including for increased dizziness, depression, or any thoughts of SI.
|
| 218 |
+
|
| 219 |
+
**Important:** I cannot provide medical advice or treatment recommendations. Please call your oncology team to confirm what's appropriate for your specific situation."""
|
| 220 |
+
|
| 221 |
+
def chat(self, message, history):
|
| 222 |
+
"""Main chat interface for OncoLife Assistant"""
|
| 223 |
+
if not message.strip():
|
| 224 |
+
return "Please describe your symptoms or concerns."
|
| 225 |
+
|
| 226 |
+
# Convert history to the format expected by generate_oncolife_response
|
| 227 |
+
conversation_history = []
|
| 228 |
+
for user_msg, assistant_msg in history:
|
| 229 |
+
conversation_history.append({
|
| 230 |
+
"user": user_msg,
|
| 231 |
+
"assistant": assistant_msg
|
| 232 |
+
})
|
| 233 |
+
|
| 234 |
+
# Generate response using OncoLife protocol
|
| 235 |
+
response = self.generate_oncolife_response(message, conversation_history)
|
| 236 |
+
|
| 237 |
+
return response
|
| 238 |
|
| 239 |
# Create interface
|
| 240 |
+
assistant = OncoLifeAssistant()
|
| 241 |
+
interface = gr.ChatInterface(
|
| 242 |
+
fn=assistant.chat,
|
| 243 |
+
title="π₯ OncoLife Symptom & Triage Assistant",
|
| 244 |
+
description="I'm here to help assess your symptoms and determine if you need to contact your care team. Please describe your symptoms or concerns.",
|
|
|
|
|
|
|
| 245 |
examples=[
|
| 246 |
+
["I'm feeling nauseous and tired"],
|
| 247 |
+
["I have a fever of 101"],
|
| 248 |
+
["My neuropathy is getting worse"],
|
| 249 |
+
["I'm having trouble eating"],
|
| 250 |
+
["I feel dizzy and lightheaded"]
|
| 251 |
],
|
| 252 |
theme=gr.themes.Soft()
|
| 253 |
)
|
| 254 |
|
| 255 |
if __name__ == "__main__":
|
| 256 |
print("=" * 60)
|
| 257 |
+
print("OncoLife Symptom & Triage Assistant")
|
| 258 |
print("=" * 60)
|
| 259 |
interface.launch(server_name="0.0.0.0", server_port=7860)
|