Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ patient = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct
|
|
10 |
patient_system_prompt = "You are a patient describing your symptoms to a physician."
|
11 |
physician_system_prompt = "You are a physician responding to a patient's symptoms."
|
12 |
|
|
|
13 |
def generate_conversation(topic, turns):
|
14 |
conversation = []
|
15 |
total_tokens = 0
|
@@ -20,8 +21,8 @@ def generate_conversation(topic, turns):
|
|
20 |
patient_input = f"Patient: I'm here to talk about {topic}."
|
21 |
print(f"Patient Initial Input: {patient_input}") # Debugging
|
22 |
patient_response = patient(
|
23 |
-
patient_input,
|
24 |
-
|
25 |
num_return_sequences=1,
|
26 |
truncation=True, # Explicitly enable truncation
|
27 |
do_sample=True, # Enable sampling
|
@@ -35,8 +36,8 @@ def generate_conversation(topic, turns):
|
|
35 |
# Physician's turn
|
36 |
print(f"Physician Turn {turn} Prompt: {patient_response}") # Debugging
|
37 |
physician_response = physician(
|
38 |
-
f"Physician: {patient_response}",
|
39 |
-
|
40 |
num_return_sequences=1,
|
41 |
truncation=True, # Explicitly enable truncation
|
42 |
do_sample=True, # Enable sampling
|
@@ -49,8 +50,8 @@ def generate_conversation(topic, turns):
|
|
49 |
# Patient's turn
|
50 |
print(f"Patient Turn {turn} Prompt: {physician_response}") # Debugging
|
51 |
patient_response = patient(
|
52 |
-
f"Patient: {physician_response}",
|
53 |
-
|
54 |
num_return_sequences=1,
|
55 |
truncation=True, # Explicitly enable truncation
|
56 |
do_sample=True, # Enable sampling
|
|
|
10 |
patient_system_prompt = "You are a patient describing your symptoms to a physician."
|
11 |
physician_system_prompt = "You are a physician responding to a patient's symptoms."
|
12 |
|
13 |
+
|
14 |
def generate_conversation(topic, turns):
|
15 |
conversation = []
|
16 |
total_tokens = 0
|
|
|
21 |
patient_input = f"Patient: I'm here to talk about {topic}."
|
22 |
print(f"Patient Initial Input: {patient_input}") # Debugging
|
23 |
patient_response = patient(
|
24 |
+
patient_input,
|
25 |
+
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
26 |
num_return_sequences=1,
|
27 |
truncation=True, # Explicitly enable truncation
|
28 |
do_sample=True, # Enable sampling
|
|
|
36 |
# Physician's turn
|
37 |
print(f"Physician Turn {turn} Prompt: {patient_response}") # Debugging
|
38 |
physician_response = physician(
|
39 |
+
f"Physician: {patient_response}",
|
40 |
+
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
41 |
num_return_sequences=1,
|
42 |
truncation=True, # Explicitly enable truncation
|
43 |
do_sample=True, # Enable sampling
|
|
|
50 |
# Patient's turn
|
51 |
print(f"Patient Turn {turn} Prompt: {physician_response}") # Debugging
|
52 |
patient_response = patient(
|
53 |
+
f"Patient: {physician_response}",
|
54 |
+
max_new_tokens=50, # Allow the model to generate up to 50 new tokens
|
55 |
num_return_sequences=1,
|
56 |
truncation=True, # Explicitly enable truncation
|
57 |
do_sample=True, # Enable sampling
|