File size: 2,167 Bytes
d5fd2c1
 
1d6ce0d
 
d5fd2c1
1d6ce0d
d5fd2c1
 
1d6ce0d
d5fd2c1
 
 
 
 
 
 
 
 
 
1d6ce0d
 
 
d5fd2c1
1d6ce0d
 
 
 
d5fd2c1
1d6ce0d
 
 
d5fd2c1
1d6ce0d
 
d5fd2c1
1d6ce0d
 
 
d5fd2c1
1d6ce0d
 
d5fd2c1
 
 
 
1d6ce0d
 
 
d5fd2c1
1d6ce0d
 
 
d5fd2c1
 
0fd05b6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.llms import HuggingFacePipeline
from langchain.chains import RetrievalQA

# Load PDF
loader = PyPDFLoader("Medical_Book.pdf")
documents = loader.load()

# Split text
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=20)
all_splits = text_splitter.split_documents(documents)

# Embeddings
embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(model_name=embedding_model)
vectorstore = FAISS.from_documents(all_splits, embeddings)

# Load lightweight model (Flan-T5)
model_name = "google/flan-t5-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# LangChain wrapper
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)
llm = HuggingFacePipeline(pipeline=pipe)

# RetrievalQA chain (no chat history, only answer)
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever(), return_source_documents=False)

# Gradio app
def chatbot_response(question, max_tokens, temperature):
    pipe.model.config.max_new_tokens = int(max_tokens)
    pipe.model.config.temperature = float(temperature)
    result = qa_chain.run(question)
    return result.strip()

interface = gr.Interface(
    fn=chatbot_response,
    inputs=[
        gr.Textbox(label="Your Medical Question", placeholder="e.g. What are the symptoms of pneumonia?"),
        gr.Slider(label="Max Tokens", minimum=10, maximum=512, value=256, step=1),
        gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, value=0.7, step=0.01)
    ],
    outputs=gr.Textbox(label="Answer"),
    title="🩺 Light Medical Chatbot",
    description="Ask medical questions answered from the Medical Book using a lightweight Flan-T5 model."
)

interface.launch(share=True)