ari7cr commited on
Commit
56a8be9
Β·
verified Β·
1 Parent(s): 363fe21

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the physician and patient models via Hugging Face Model Hub
5
+ physician = pipeline("text-generation", model="distilmed") # Replace with actual medical model
6
+ patient = pipeline("text-generation", model="distilgpt2") # General conversational model
7
+
8
+ def generate_conversation(topic, turns):
9
+ conversation = []
10
+ total_tokens = 0
11
+ physician_tokens = 0
12
+ patient_tokens = 0
13
+
14
+ # Initial prompt for the patient
15
+ patient_prompt = f"I'm here to talk about {topic}."
16
+ patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
17
+ patient_tokens += len(patient_response.split())
18
+ conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
19
+
20
+ for turn in range(turns):
21
+ # Physician's turn
22
+ physician_prompt = f"As a physician, how would you respond to: {patient_response}"
23
+ physician_response = physician(physician_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
24
+ physician_tokens += len(physician_response.split())
25
+ conversation.append({"role": "physician", "message": physician_response, "tokens": len(physician_response.split())})
26
+
27
+ # Patient's turn
28
+ patient_prompt = f"As a patient, how would you respond to: {physician_response}"
29
+ patient_response = patient(patient_prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
30
+ patient_tokens += len(patient_response.split())
31
+ conversation.append({"role": "patient", "message": patient_response, "tokens": len(patient_response.split())})
32
+
33
+ # Summarize the conversation
34
+ summary = {
35
+ "total_tokens": physician_tokens + patient_tokens,
36
+ "physician_tokens": physician_tokens,
37
+ "patient_tokens": patient_tokens
38
+ }
39
+
40
+ return conversation, summary
41
+
42
+ def app_interface(topic, turns):
43
+ conversation, summary = generate_conversation(topic, turns)
44
+ output = {
45
+ "input": {"topic": topic, "turns": turns},
46
+ "conversation": conversation,
47
+ "summary": summary
48
+ }
49
+ return output
50
+
51
+ # Gradio interface
52
+ with gr.Blocks() as demo:
53
+ gr.Markdown("## πŸ‘¨β€βš•οΈ Synthetic Data Generation: Physician-Patient Role-Play πŸ‘€")
54
+ with gr.Row():
55
+ topic_input = gr.Textbox(label="Enter Disease/Topic", placeholder="e.g., chest pain")
56
+ turns_input = gr.Number(label="Number of Turns", value=5)
57
+ submit_button = gr.Button("πŸš€ Start Interaction")
58
+ output_json = gr.JSON(label="Generated Conversation")
59
+
60
+ # Download button for the conversation
61
+ download_button = gr.Button("πŸ“₯ Download Conversation")
62
+ download_button.click(
63
+ fn=lambda data: gr.File.download(data),
64
+ inputs=output_json,
65
+ outputs=gr.File()
66
+ )
67
+
68
+ submit_button.click(
69
+ fn=app_interface,
70
+ inputs=[topic_input, turns_input],
71
+ outputs=output_json
72
+ )
73
+
74
+ demo.launch()