Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,10 @@ import torch
|
|
6 |
physician = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
7 |
patient = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
8 |
|
|
|
|
|
|
|
|
|
9 |
def generate_conversation(topic, turns):
|
10 |
conversation = []
|
11 |
total_tokens = 0
|
@@ -13,10 +17,10 @@ def generate_conversation(topic, turns):
|
|
13 |
patient_tokens = 0
|
14 |
|
15 |
# Initial prompt for the patient
|
16 |
-
patient_prompt = f"
|
17 |
print(f"Patient Initial Prompt: {patient_prompt}") # Debugging
|
18 |
patient_response = patient(
|
19 |
-
patient_prompt,
|
20 |
max_length=50, # Reduce max_length for faster responses
|
21 |
num_return_sequences=1,
|
22 |
truncation=True, # Explicitly enable truncation
|
@@ -29,10 +33,9 @@ def generate_conversation(topic, turns):
|
|
29 |
|
30 |
for turn in range(turns):
|
31 |
# Physician's turn
|
32 |
-
|
33 |
-
print(f"Physician Turn {turn} Prompt: {physician_prompt}") # Debugging
|
34 |
physician_response = physician(
|
35 |
-
|
36 |
max_length=50, # Reduce max_length for faster responses
|
37 |
num_return_sequences=1,
|
38 |
truncation=True, # Explicitly enable truncation
|
@@ -44,10 +47,9 @@ def generate_conversation(topic, turns):
|
|
44 |
conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
|
45 |
|
46 |
# Patient's turn
|
47 |
-
|
48 |
-
print(f"Patient Turn {turn} Prompt: {patient_prompt}") # Debugging
|
49 |
patient_response = patient(
|
50 |
-
|
51 |
max_length=50, # Reduce max_length for faster responses
|
52 |
num_return_sequences=1,
|
53 |
truncation=True, # Explicitly enable truncation
|
|
|
6 |
physician = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
7 |
patient = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct")
|
8 |
|
9 |
+
# System prompts to define roles
|
10 |
+
patient_system_prompt = "You are a patient describing your symptoms to a physician."
|
11 |
+
physician_system_prompt = "You are a physician responding to a patient's symptoms."
|
12 |
+
|
13 |
def generate_conversation(topic, turns):
|
14 |
conversation = []
|
15 |
total_tokens = 0
|
|
|
17 |
patient_tokens = 0
|
18 |
|
19 |
# Initial prompt for the patient
|
20 |
+
patient_prompt = f"I'm here to talk about {topic}."
|
21 |
print(f"Patient Initial Prompt: {patient_prompt}") # Debugging
|
22 |
patient_response = patient(
|
23 |
+
f"{patient_system_prompt} {patient_prompt}",
|
24 |
max_length=50, # Reduce max_length for faster responses
|
25 |
num_return_sequences=1,
|
26 |
truncation=True, # Explicitly enable truncation
|
|
|
33 |
|
34 |
for turn in range(turns):
|
35 |
# Physician's turn
|
36 |
+
print(f"Physician Turn {turn} Prompt: {patient_response}") # Debugging
|
|
|
37 |
physician_response = physician(
|
38 |
+
f"{physician_system_prompt} Patient says: {patient_response}",
|
39 |
max_length=50, # Reduce max_length for faster responses
|
40 |
num_return_sequences=1,
|
41 |
truncation=True, # Explicitly enable truncation
|
|
|
47 |
conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
|
48 |
|
49 |
# Patient's turn
|
50 |
+
print(f"Patient Turn {turn} Prompt: {physician_response}") # Debugging
|
|
|
51 |
patient_response = patient(
|
52 |
+
f"{patient_system_prompt} Physician says: {physician_response}",
|
53 |
max_length=50, # Reduce max_length for faster responses
|
54 |
num_return_sequences=1,
|
55 |
truncation=True, # Explicitly enable truncation
|