ari7cr commited on
Commit
a5f7bd5
Β·
verified Β·
1 Parent(s): 11038f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -2,9 +2,9 @@ import gradio as gr
2
  from transformers import pipeline
3
  import torch
4
 
5
- # Load the physician and patient models via Hugging Face Model Hub
6
- physician = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B") # Replace with actual medical model
7
- patient = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B") # General conversational model
8
 
9
  def generate_conversation(topic, turns):
10
  conversation = []
@@ -14,20 +14,26 @@ def generate_conversation(topic, turns):
14
 
15
  # Initial prompt for the patient
16
  patient_prompt = f"I'm here to talk about {topic}."
 
17
  patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
 
18
  patient_tokens += len(patient_response.split())
19
  conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
20
 
21
  for turn in range(turns):
22
  # Physician's turn
23
  physician_prompt = f"As a physician, how would you respond to: {patient_response}"
 
24
  physician_response = physician(physician_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
 
25
  physician_tokens += len(physician_response.split())
26
  conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
27
 
28
  # Patient's turn
29
  patient_prompt = f"As a patient, how would you respond to: {physician_response}"
 
30
  patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
 
31
  patient_tokens += len(patient_response.split())
32
  conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
33
 
@@ -54,7 +60,7 @@ with gr.Blocks() as demo:
54
  gr.Markdown("## πŸ‘¨β€βš•οΈ Synthetic Data Generation: Physician-Patient Role-Play πŸ‘€")
55
  with gr.Row():
56
  topic_input = gr.Textbox(label="Enter Disease/Topic", placeholder="e.g., chest pain")
57
- turns_input = gr.Number(label="Number of Turns", value=5)
58
  submit_button = gr.Button("πŸš€ Start Interaction")
59
  output_json = gr.JSON(label="Generated Conversation")
60
 
@@ -72,4 +78,4 @@ with gr.Blocks() as demo:
72
  outputs=output_json
73
  )
74
 
75
- demo.launch()
 
2
  from transformers import pipeline
3
  import torch
4
 
5
+ # Load the smaller models
6
+ physician = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B")
7
+ patient = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B")
8
 
9
  def generate_conversation(topic, turns):
10
  conversation = []
 
14
 
15
  # Initial prompt for the patient
16
  patient_prompt = f"I'm here to talk about {topic}."
17
+ print(f"Patient Initial Prompt: {patient_prompt}") # Debugging
18
  patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
19
+ print(f"Patient Response: {patient_response}") # Debugging
20
  patient_tokens += len(patient_response.split())
21
  conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
22
 
23
  for turn in range(turns):
24
  # Physician's turn
25
  physician_prompt = f"As a physician, how would you respond to: {patient_response}"
26
+ print(f"Physician Turn {turn} Prompt: {physician_prompt}") # Debugging
27
  physician_response = physician(physician_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
28
+ print(f"Physician Response: {physician_response}") # Debugging
29
  physician_tokens += len(physician_response.split())
30
  conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
31
 
32
  # Patient's turn
33
  patient_prompt = f"As a patient, how would you respond to: {physician_response}"
34
+ print(f"Patient Turn {turn} Prompt: {patient_prompt}") # Debugging
35
  patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
36
+ print(f"Patient Response: {patient_response}") # Debugging
37
  patient_tokens += len(patient_response.split())
38
  conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
39
 
 
60
  gr.Markdown("## πŸ‘¨β€βš•οΈ Synthetic Data Generation: Physician-Patient Role-Play πŸ‘€")
61
  with gr.Row():
62
  topic_input = gr.Textbox(label="Enter Disease/Topic", placeholder="e.g., chest pain")
63
+ turns_input = gr.Number(label="Number of Turns", value=1) # Default to 1 turn for debugging
64
  submit_button = gr.Button("πŸš€ Start Interaction")
65
  output_json = gr.JSON(label="Generated Conversation")
66
 
 
78
  outputs=output_json
79
  )
80
 
81
+ demo.launch()