eudoxie commited on
Commit
e4e87d3
·
verified ·
1 Parent(s): 0a32c0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -40
app.py CHANGED
@@ -1,54 +1,52 @@
1
  import pandas as pd
 
 
 
2
  import os
3
  from langchain_groq import ChatGroq
4
- from langchain_huggingface import HuggingFaceEmbeddings
5
- from langchain_chroma import Chroma
6
- from langchain_core.prompts import PromptTemplate
7
- from langchain_core.output_parsers import StrOutputParser
8
- from langchain_core.runnables import RunnablePassthrough
9
- import gradio as gr
10
 
11
- # Load data
12
- context_data = pd.read_csv("drugs_side_effects_drugs_com.csv")
13
 
14
- # Initialize LLM
15
- llm = ChatGroq(
16
- model="llama-3.1-70b-versatile",
17
- api_key=os.environ.get("GROQ_API_KEY")
18
- )
19
 
20
- # Initialize embedding model
21
- embed_model = HuggingFaceEmbeddings(
22
- model_name="mixedbread-ai/mxbai-embed-large-v1"
23
- )
24
 
25
- # Create and populate vector store
26
  vectorstore = Chroma(
27
  collection_name="medical_dataset_store",
28
  embedding_function=embed_model,
29
  persist_directory="./",
30
  )
31
- vectorstore.add_texts(texts=context_data.values.tolist()) # Convert DataFrame to list of texts
 
 
 
32
  retriever = vectorstore.as_retriever()
33
 
34
- # Set up RAG prompt template
35
- template = """You are a medical expert. Use the provided context to answer the question.
36
- If you don't know the answer, say so. Explain your answer in detail.
37
- Do not discuss the context in your response; just provide the answer directly.
38
 
39
- Context: {context}
40
- Question: {question}
41
- Answer:"""
 
 
 
 
42
 
43
  rag_prompt = PromptTemplate.from_template(template)
44
 
45
- # Create RAG chain
 
 
46
  rag_chain = (
47
  {"context": retriever, "question": RunnablePassthrough()}
48
  | rag_prompt
49
  | llm
50
  | StrOutputParser()
51
  )
 
52
 
53
  def rag_memory_stream(message, history):
54
  partial_text = ""
@@ -56,26 +54,26 @@ def rag_memory_stream(message, history):
56
  partial_text += new_text
57
  yield partial_text
58
 
59
- # Gradio interface setup
60
- greetings_message = """Hello! Welcome to MediGuide ChatBot. I'm here to provide you with quick and
61
- accurate information on medical drugs. Whether you need details on usage, side effects, etc. feel
62
- free to ask. Let's enhance patient care together!"""
 
 
63
 
64
- initial_history = [("", greetings_message)]
65
 
66
- # Create Gradio interface
67
  demo = gr.Interface(
68
- title="MediGuide ChatBot",
69
  fn=rag_memory_stream,
70
  inputs=[
71
- gr.Chatbot(value=initial_history, label="Chat History"),
72
- gr.Textbox(label="Your Message", placeholder="Type your message here...")
73
  ],
74
- outputs=gr.Chatbot(label="Chat History"),
75
  allow_flagging="never",
76
- theme="glass"
 
77
  )
78
 
 
79
  if __name__ == "__main__":
80
- # Launch with sharing enabled to avoid localhost issues
81
- demo.launch(share=True)
 
1
  import pandas as pd
2
+
3
+ context_data = pd.read_csv("drugs_side_effects_drugs_com.csv")
4
+
5
  import os
6
  from langchain_groq import ChatGroq
 
 
 
 
 
 
7
 
8
+ llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=os.environ.get("GROQ_API_KEY"))
 
9
 
10
+ ## Embedding model!
11
+ from langchain_huggingface import HuggingFaceEmbeddings
12
+ embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
 
 
13
 
14
+ # create vector store!
15
+ from langchain_chroma import Chroma
 
 
16
 
 
17
  vectorstore = Chroma(
18
  collection_name="medical_dataset_store",
19
  embedding_function=embed_model,
20
  persist_directory="./",
21
  )
22
+
23
+ # add data to vector nstore
24
+ vectorstore.add_texts(context_data)
25
+
26
  retriever = vectorstore.as_retriever()
27
 
28
+ from langchain_core.prompts import PromptTemplate
 
 
 
29
 
30
+ template = ("""You are a medical expert.
31
+ Use the provided context to answer the question.
32
+ If you don't know the answer, say so. Explain your answer in detail.
33
+ Do not discuss the context in your response; just provide the answer directly.
34
+ Context: {context}
35
+ Question: {question}
36
+ Answer:""")
37
 
38
  rag_prompt = PromptTemplate.from_template(template)
39
 
40
+ from langchain_core.output_parsers import StrOutputParser
41
+ from langchain_core.runnables import RunnablePassthrough
42
+
43
  rag_chain = (
44
  {"context": retriever, "question": RunnablePassthrough()}
45
  | rag_prompt
46
  | llm
47
  | StrOutputParser()
48
  )
49
+ import gradio as gr
50
 
51
  def rag_memory_stream(message, history):
52
  partial_text = ""
 
54
  partial_text += new_text
55
  yield partial_text
56
 
57
+ greetingsmessage = """Hello! Welcome to MediGuide ChatBot. I'm here to provide you with quick and accurate information on medical drugs.
58
+ Whether you need details on usage, side effects , etc feel free to ask. Let's enhance patient care together!"""
59
+
60
+ initial_history = [("", greetingsmessage)]
61
+
62
+ title = "MediGuide ChatBot"
63
 
 
64
 
 
65
  demo = gr.Interface(
66
+ title=title,
67
  fn=rag_memory_stream,
68
  inputs=[
69
+ gr.Chatbot(value=initial_history, label="Chat History"),"text"
 
70
  ],
71
+ outputs=[gr.Chatbot(label="Chat History"),"text"],
72
  allow_flagging="never",
73
+ fill_height=True,
74
+ theme="glass",
75
  )
76
 
77
+
78
  if __name__ == "__main__":
79
+ demo.launch()