Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,33 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
import torch
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
patient
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
def generate_conversation(topic, turns):
|
15 |
conversation = []
|
@@ -18,16 +36,9 @@ def generate_conversation(topic, turns):
|
|
18 |
patient_tokens = 0
|
19 |
|
20 |
# Initial prompt for the patient
|
21 |
-
patient_input = f"
|
22 |
print(f"Patient Initial Input: {patient_input}") # Debugging
|
23 |
-
patient_response =
|
24 |
-
patient_input,
|
25 |
-
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
26 |
-
num_return_sequences=1,
|
27 |
-
truncation=True, # Explicitly enable truncation
|
28 |
-
do_sample=True, # Enable sampling
|
29 |
-
temperature=0.7 # Control randomness
|
30 |
-
)[0]['generated_text']
|
31 |
print(f"Patient Response: {patient_response}") # Debugging
|
32 |
patient_tokens += len(patient_response.split())
|
33 |
conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
|
@@ -35,28 +46,14 @@ def generate_conversation(topic, turns):
|
|
35 |
for turn in range(turns):
|
36 |
# Physician's turn
|
37 |
print(f"Physician Turn {turn} Prompt: {patient_response}") # Debugging
|
38 |
-
physician_response =
|
39 |
-
f"Physician: {patient_response}",
|
40 |
-
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
41 |
-
num_return_sequences=1,
|
42 |
-
truncation=True, # Explicitly enable truncation
|
43 |
-
do_sample=True, # Enable sampling
|
44 |
-
temperature=0.7 # Control randomness
|
45 |
-
)[0]['generated_text']
|
46 |
print(f"Physician Response: {physician_response}") # Debugging
|
47 |
physician_tokens += len(physician_response.split())
|
48 |
conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
|
49 |
|
50 |
# Patient's turn
|
51 |
print(f"Patient Turn {turn} Prompt: {physician_response}") # Debugging
|
52 |
-
patient_response =
|
53 |
-
f"Patient: {physician_response}",
|
54 |
-
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
55 |
-
num_return_sequences=1,
|
56 |
-
truncation=True, # Explicitly enable truncation
|
57 |
-
do_sample=True, # Enable sampling
|
58 |
-
temperature=0.7 # Control randomness
|
59 |
-
)[0]['generated_text']
|
60 |
print(f"Patient Response: {patient_response}") # Debugging
|
61 |
patient_tokens += len(patient_response.split())
|
62 |
conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
|
@@ -102,4 +99,4 @@ with gr.Blocks() as demo:
|
|
102 |
outputs=output_json
|
103 |
)
|
104 |
|
105 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from smolagents import CodeAgent, HfApiModel
|
|
|
3 |
|
4 |
+
# Define system prompts for the agents
|
5 |
+
patient_system_prompt = """
|
6 |
+
You are a patient describing your symptoms to a physician. You are here to talk about a health issue.
|
7 |
+
Be concise and provide relevant information about your symptoms.
|
8 |
+
"""
|
9 |
|
10 |
+
physician_system_prompt = """
|
11 |
+
You are a physician responding to a patient's symptoms.
|
12 |
+
Ask relevant questions to understand the patient's condition and provide appropriate advice.
|
13 |
+
"""
|
14 |
|
15 |
+
# Load the models for the agents
|
16 |
+
patient_model = HfApiModel(model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
17 |
+
physician_model = HfApiModel(model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
18 |
+
|
19 |
+
# Initialize the agents
|
20 |
+
patient_agent = CodeAgent(
|
21 |
+
model=patient_model,
|
22 |
+
system_prompt=patient_system_prompt,
|
23 |
+
planning_interval=1 # Allow the agent to plan after each turn
|
24 |
+
)
|
25 |
+
|
26 |
+
physician_agent = CodeAgent(
|
27 |
+
model=physician_model,
|
28 |
+
system_prompt=physician_system_prompt,
|
29 |
+
planning_interval=1 # Allow the agent to plan after each turn
|
30 |
+
)
|
31 |
|
32 |
def generate_conversation(topic, turns):
|
33 |
conversation = []
|
|
|
36 |
patient_tokens = 0
|
37 |
|
38 |
# Initial prompt for the patient
|
39 |
+
patient_input = f"I'm here to talk about {topic}."
|
40 |
print(f"Patient Initial Input: {patient_input}") # Debugging
|
41 |
+
patient_response = patient_agent.run(patient_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
print(f"Patient Response: {patient_response}") # Debugging
|
43 |
patient_tokens += len(patient_response.split())
|
44 |
conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
|
|
|
46 |
for turn in range(turns):
|
47 |
# Physician's turn
|
48 |
print(f"Physician Turn {turn} Prompt: {patient_response}") # Debugging
|
49 |
+
physician_response = physician_agent.run(patient_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
print(f"Physician Response: {physician_response}") # Debugging
|
51 |
physician_tokens += len(physician_response.split())
|
52 |
conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
|
53 |
|
54 |
# Patient's turn
|
55 |
print(f"Patient Turn {turn} Prompt: {physician_response}") # Debugging
|
56 |
+
patient_response = patient_agent.run(physician_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
print(f"Patient Response: {patient_response}") # Debugging
|
58 |
patient_tokens += len(patient_response.split())
|
59 |
conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
|
|
|
99 |
outputs=output_json
|
100 |
)
|
101 |
|
102 |
+
demo.launch()
|