Spaces:
Sleeping
Sleeping
File size: 8,542 Bytes
1c3f393 4e1b4b6 1c3f393 4e1b4b6 1c3f393 49465e1 1c3f393 49465e1 1c3f393 4e1b4b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
from dotenv import load_dotenv
import os
from docx import Document
from llama_index.llms.together import TogetherLLM
from llama_index.core.llms import ChatMessage, MessageRole
from Bio import Entrez
import ssl
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import streamlit as st
from googleapiclient.discovery import build
from typing import List, Optional
load_dotenv()
# 995d5f1a8de125c5b39bb48c2613e85f57d53c0e498a87d1ff33f0ec89a26ec7
os.environ["TOGETHER_API"] = os.getenv("TOGETHER_API")
os.environ["GOOGLE_SEARCH_API_KEY"] = os.getenv("GOOGLE_SEARCH_API_KEY")
def search_pubmed(query: str) -> Optional[List[str]]:
"""
Searches PubMed for a given query and returns a list of formatted results
(or None if no results are found).
"""
Entrez.email = "harisellahi888@gmail.com" # Replace with your email
try:
ssl._create_default_https_context = ssl._create_unverified_context
handle = Entrez.esearch(db="pubmed", term=query, retmax=3)
record = Entrez.read(handle)
id_list = record["IdList"]
if not id_list:
return None
handle = Entrez.efetch(db="pubmed", id=id_list, retmode="xml")
articles = Entrez.read(handle)
results = []
for article in articles['PubmedArticle']:
try:
medline_citation = article['MedlineCitation']
article_data = medline_citation['Article']
title = article_data['ArticleTitle']
abstract = article_data.get('Abstract', {}).get('AbstractText', [""])[0]
result = f"**Title:** {title}\n**Abstract:** {abstract}\n"
result += f"**Link:** https://pubmed.ncbi.nlm.nih.gov/{medline_citation['PMID']} \n\n"
results.append(result)
except KeyError as e:
print(f"Error parsing article: {article}, Error: {e}")
return results
except Exception as e:
print(f"Error accessing PubMed: {e}")
return None
def chat_with_pubmed(article_text, article_link):
"""
Engages in a chat-like interaction with a PubMed article using TogetherLLM.
"""
try:
llm = TogetherLLM(model="QWEN/QWEN1.5-14B-CHAT", api_key=os.environ['TOGETHER_API'])
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful AI assistant summarizing and answering questions about the following medical research article: " + article_link),
ChatMessage(role=MessageRole.USER, content=article_text)
]
response = llm.chat(messages)
return str(response) if response else "I'm sorry, I couldn't generate a summary for this article."
except Exception as e:
print(f"Error in chat_with_pubmed: {e}")
return "An error occurred while generating a summary."
def search_web(query: str, num_results: int = 3) -> Optional[List[str]]:
"""
Searches the web using the Google Search API and returns a list of formatted results
(or None if no results are found).
"""
try:
service = build("customsearch", "v1", developerKey=os.environ["GOOGLE_SEARCH_API_KEY"])
# Execute the search request
res = service.cse().list(q=query, cx="e31a5857f45ef4d2a", num=num_results).execute()
if "items" not in res:
return None
results = []
for item in res["items"]:
title = item["title"]
link = item["link"]
snippet = item["snippet"]
result = f"**Title:** {title}\n**Link:** {link} \n**Snippet:** {snippet}\n\n"
results.append(result)
return results
except Exception as e:
print(f"Error performing web search: {e}")
return None
from together import Together
def medmind_chatbot(user_input, chat_history=None):
"""
Processes user input, interacts with various resources, and generates a response.
Handles potential errors, maintains chat history,
"""
if chat_history is None:
chat_history = []
response_parts = [] # Collect responses from different sources
final_response = "";
try:
# PubMed Search and Chat
pubmed_results = search_pubmed(user_input)
if pubmed_results:
for article_text in pubmed_results:
title, abstract, link = article_text.split("\n")[:3]
# print(article_text)
response_parts.append(f"{title}\n{abstract}\n{link}\n")
else:
response_parts.append("No relevant PubMed articles found.")
# Web Search
web_results = search_web(user_input)
if web_results:
response_parts.append("\n\n**Web Search Results:**")
response_parts.extend(web_results)
else:
response_parts.append("No relevant web search results found.")
# Combine response parts into a single string
response_text = "\n\n".join(response_parts)
prompt = f"""You are a Health Assistant AI designed to provide detailed responses to health-related questions.
Based on the information retrieved from the PubMed and Web Search below, answer the user's query appropriately.
- If the user's query is health-related, provide a detailed and helpful response based on the retrieved information. Or if there is
some previous conversation then answer the health by seeing the previous conversation also.
- If the query is a general greeting (e.g., 'Hello', 'Hi'), respond as a friendly assistant.
- If the query is irrelevant or unrelated to health, respond with: 'I am a health assistant. Please ask only health-related questions.'
- Don't mention in response that where you reterived the information.
Previous Conversation:
{chat_history}
User's Query: {user_input}
Information retrieved from PubMed and Web Search:
{response_text}
Your response:"""
client = Together(api_key=os.environ.get('TOGETHER_API'))
response = client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
messages=[{"role": "user", "content": prompt}],
)
final_response = response.choices[0].message.content
except Exception as e:
print(f"Error in chatbot: {e}")
response_text = "An error occurred. Please try again later."
chat_history.append((user_input, final_response))
return final_response, chat_history
medmind_chatbot("What are the symptoms of COVID-19?")
import gradio as gr
def show_info_popup():
info = """
**HealthHive is an AI-powered chatbot designed to assist with medical information.**
...
"""
return info
def main():
# Initialize Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("# HealthHive Chatbot")
gr.Markdown("Ask your medical questions and get reliable information!")
# Example Questions (Sidebar)
gr.Markdown("### Example Questions")
example_questions = [
"What are the symptoms of COVID-19?",
"How can I manage my diabetes?",
"What are the potential side effects of ibuprofen?",
"What lifestyle changes can help prevent heart disease?"
]
for question in example_questions:
gr.Markdown(f"- {question}")
# Chat History and User Input
with gr.Row():
user_input = gr.Textbox(label="You:", placeholder="Type your medical question here...", lines=2)
chat_history = gr.State([])
# Output Container
with gr.Row():
response = gr.Textbox(label="HealthHive:", placeholder="Response will appear here...", interactive=False, lines=10)
def clear_chat():
return "", ""
# Define function to update chat history and response
def on_submit(user_input, chat_history):
result, updated_history = medmind_chatbot(user_input, chat_history)
info = show_info_popup()
return result, updated_history, info
# Link the submit button to the chatbot function
gr.Button("Submit").click(on_submit, inputs=[user_input, chat_history], outputs=[response, chat_history])
# gr.Button("Start New Chat").click(lambda: [], outputs=[chat_history])
gr.Button("Start New Chat").click(clear_chat, outputs=[user_input, response])
demo.launch()
if __name__ == "__main__":
main()
|