import gradio as gr from transformers import AutoModelForQuestionAnswering, AutoTokenizer import torch # Load BioBERT Model model_name = "dmis-lab/biobert-v1.1" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) # Define chatbot function def chatbot_response(user_message): if not user_message: return "⚠️ অনুগ্রহ করে একটি বৈধ প্রশ্ন লিখুন।" # Process the input inputs = tokenizer(user_message, return_tensors="pt") # Get model response with torch.no_grad(): output = model(**inputs) return ["✅ নিরাময় AI আপনার প্রশ্ন বিশ্লেষণ করেছে..."] # 🚀 Enable API Mode (Fix `enable_queue` issue) iface = gr.Interface( fn=chatbot_response, inputs=gr.Textbox(label="User Message"), outputs=gr.Textbox(label="Chatbot Response"), title="🔹 Niramoy Bangla Medical Chatbot 🔹", flagging_mode="never" # ✅ Fix deprecated `allow_flagging` ) # 🔹 **Remove `enable_queue=True` to fix the issue** iface.launch(share=True)