MedCall-AI / vocca_ai /ai_response.py
Yuvrajspd09's picture
Update vocca_ai/ai_response.py
6b84ea4 verified
raw
history blame contribute delete
893 Bytes
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
def generate_call_summary(transcript):
"""
Generates a structured and useful summary of the call.
"""
input_text = f"Summarize this medical call conversation:\n{transcript}"
inputs = tokenizer(input_text, return_tensors="pt", truncation=True)
outputs = model.generate(**inputs, max_length=100, min_length=20, length_penalty=2.0, num_beams=5)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
if __name__ == "__main__":
sample_text = "Patient: Hi, I need to schedule an appointment as soon as possible. I’ve been feeling really weak and dizzy for the past few days."
print(f"Call Summary: {generate_call_summary(sample_text)}")