Spaces:
Sleeping
Sleeping
File size: 3,049 Bytes
26b1c4d e4e87d3 1f314d3 e4e87d3 26b1c4d fb14459 e4e87d3 fb14459 e4e87d3 fb14459 e4e87d3 26b1c4d e4e87d3 26b1c4d e4e87d3 26b1c4d e4e87d3 26b1c4d e4e87d3 26b1c4d c4fff82 e4e87d3 26b1c4d c4fff82 e2faa8d 18fdd9e 8f2d18b e2faa8d c4fff82 e2faa8d 2adfa4c c4fff82 2adfa4c c4fff82 2adfa4c c4fff82 f074e74 2adfa4c c4fff82 00347f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import pandas as pd
context_data = pd.read_csv("./drugs_side_effects_drugs_com.csv")
import os
from langchain_groq import ChatGroq
llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=os.environ.get("GROQ_API_KEY"))
## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
# create vector store!
from langchain_chroma import Chroma
vectorstore = Chroma(
collection_name="medical_dataset_store",
embedding_function=embed_model,
)
# add data to vector nstore
vectorstore.add_texts(context_data)
retriever = vectorstore.as_retriever()
from langchain_core.prompts import PromptTemplate
template = ("""You are a medical expert.
Use the provided context to answer the question.
If you don't know the answer, say so. Explain your answer in detail.
Do not discuss the context in your response; just provide the answer directly.
Context: {context}
Question: {question}
Answer:""")
rag_prompt = PromptTemplate.from_template(template)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt
| llm
| StrOutputParser()
)
import gradio as gr
def rag_memory_stream(message, history):
# Define possible greeting messages and their responses
greetings = {
"hello": "Hello! How can I assist you today?",
"hi": "Hi there! How can I help you?",
"good morning": "Good morning! How can I assist you?",
"good afternoon": "Good afternoon! What can I help you with?",
"good evening": "Good evening! Do you have any questions for me?",
}
# Normalize the input message to lowercase for comparison
normalized_message = message.strip().lower()
# Check if the message is a greeting
if normalized_message in greetings:
yield greetings[normalized_message]
return # End early as the greeting is handled
# Default behavior for non-greeting messages
partial_text = ""
for new_text in rag_chain.stream(message):
partial_text += new_text
yield partial_text
examples = [
"What is Aspirin",
"Can Doxycycline Treat Acnes",
]
description = "Hello! Welcome to MediGuide ChatBot,AI-powered assistant designed to facilitate healthcare providers to make informed decision-making by providing reliable information about various medical drugs, including their uses, side effects, contraindications and classification"
title = "MediGuide ChatBot"
demo = gr.ChatInterface(fn=rag_memory_stream,
type="messages",
title=title,
description=description,
fill_height=True,
examples=examples,
theme=gr.themes.Soft(),
)
if __name__ == "__main__":
demo.launch()
|