Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
from streamlit_chat import message | |
from PyPDF2 import PdfReader | |
import bs4 | |
import google.generativeai as genai | |
from langchain.prompts import PromptTemplate | |
from langchain import LLMChain | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
import nest_asyncio | |
from langchain.document_loaders import WebBaseLoader | |
nest_asyncio.apply() | |
os.environ["GOOGLE_API_KEY"] = os.getenv("GOOGLE_API_KEY") | |
genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) | |
llm = ChatGoogleGenerativeAI(model="gemini-pro", | |
temperature=0.4) | |
template = """You are a friendly chatbot called "Chatto" who give clear an well having a conversation with a human and you are created by suriya an AI Enthusiastic.If user query anything about link try to use "provied_url_extracted_text content". | |
provied_url_extracted_text: | |
{extracted_text} | |
provided document: | |
{provided_docs} | |
previous_chat: | |
{chat_history} | |
Human: {human_input} | |
Chatbot:""" | |
prompt = PromptTemplate( | |
input_variables=["chat_history", "provided_docs", "extracted_text", "human_input"], | |
template=template | |
) | |
llm_chain = LLMChain( | |
llm=llm, | |
prompt=prompt, | |
verbose=True, | |
) | |
previous_response = "" | |
provided_docs = "" | |
extracted_text = "" | |
def conversational_chat(query): | |
global previous_response, provided_docs,extracted_text | |
for i in st.session_state['history']: | |
if i is not None: | |
previous_response += f"Human: {i[0]}\n Chatbot: {i[1]}\n" | |
provided_docs = "".join(st.session_state["docs"]) | |
extracted_text = "".join(st.session_state["extracted_text"]) | |
result = llm_chain.predict( | |
chat_history=previous_response, | |
human_input=query, | |
provided_docs=provided_docs, | |
extracted_text=extracted_text | |
) | |
st.session_state['history'].append((query, result)) | |
return result | |
st.title("Chat Bot:") | |
st.text("I am Chatto Your Friendly Assitant") | |
st.markdown("Built by [Suriya❤️](https://github.com/theSuriya)") | |
if 'history' not in st.session_state: | |
st.session_state['history'] = [] | |
if 'docs' not in st.session_state: | |
st.session_state['docs'] = [] | |
if "extracted_text" not in st.session_state: | |
st.session_state["extracted_text"] = [] | |
def get_pdf_text(pdf_docs): | |
text = "" | |
for pdf in pdf_docs: | |
pdf_reader = PdfReader(pdf) | |
for page in pdf_reader.pages: | |
text += page.extract_text() | |
return text | |
def response_streaming(text): | |
for i in text: | |
yield i | |
time.sleep(0.01) | |
def get_url_text(url_link): | |
try: | |
loader = WebBaseLoader(url_link) | |
loader.requests_per_second = 1 | |
docs = loader.aload() | |
extracted_text = "" | |
for page in docs: | |
extracted_text += page.page_content | |
return extracted_text | |
except Exception as e: | |
print(f"Error fetching or processing URL: {e}") | |
return "" | |
with st.sidebar: | |
st.title("Add a file for Chatto memory:") | |
uploaded_files = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True) | |
uploaded_url = st.text_input("Please upload a URL:") | |
if st.button("Submit & Process"): | |
if uploaded_files or uploaded_url: | |
with st.spinner("Processing..."): | |
if uploaded_files: | |
pdf_text = get_pdf_text(uploaded_files) | |
st.session_state["docs"] += get_pdf_text(uploaded_files) | |
if uploaded_url: | |
url_text = get_url_text(uploaded_url) | |
st.session_state["extracted_text"] += get_url_text(uploaded_url) | |
st.success("Processing complete!") | |
else: | |
st.error("Please upload at least one PDF file or provide a URL.") | |
if 'messages' not in st.session_state: | |
st.session_state.messages = [{'role': 'assistant', "content": "I'm Here to help you questions"}] | |
for message in st.session_state.messages: | |
with st.chat_message(message['role']): | |
st.write(message['content']) | |
user_input = st.chat_input("Ask Your Questions 👉..") | |
if user_input: | |
st.session_state.messages.append({'role': 'user', "content": user_input}) | |
with st.chat_message("user"): | |
st.write(user_input) | |
response = conversational_chat(user_input) | |
# stream = response_streaming(response) | |
with st.chat_message("assistant"): | |
full_response = st.write_stream(response_streaming(response)) | |
message = {"role": "assistant", "content": response} | |
st.session_state.messages.append(message) |