Spaces:
Sleeping
Sleeping
from search import SemanticSearch, GoogleSearch, Document | |
import streamlit as st | |
from model import RAGModel, load_configs | |
def run_on_start(): | |
try: | |
if "configs" not in st.session_state: | |
st.session_state.configs = configs = load_configs(config_file="rag.configs.yml") | |
st.write("Configs Loaded:", configs) | |
if "model" not in st.session_state: | |
st.session_state.model = RAGModel(st.session_state.configs) | |
st.write("RAGModel Initialized Successfully") | |
except Exception as e: | |
st.error(f"Initialization Error: {e}") | |
raise | |
run_on_start() | |
def search(query): | |
try: | |
g = GoogleSearch(query) | |
data = g.all_page_data | |
st.write("Google Search Data:", data) # Debug GoogleSearch output | |
d = Document(data, min_char_len=st.session_state.configs["document"]["min_char_length"]) | |
st.session_state.doc = d.doc() | |
st.write("Document Created Successfully") | |
except Exception as e: | |
st.error(f"Search Error: {e}") | |
raise | |
st.title("Search Here Instead of Google") | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "doc" not in st.session_state: | |
st.session_state.doc = None | |
if "refresh" not in st.session_state: | |
st.session_state.refresh = True | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input("Search Here instead of Google"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
if st.session_state.refresh: | |
st.session_state.refresh = False | |
search(prompt) | |
try: | |
s = SemanticSearch( | |
st.session_state.doc, | |
st.session_state.configs["model"]["embeding_model"], | |
st.session_state.configs["model"]["device"], | |
) | |
topk, u = s.semantic_search(query=prompt, k=32) | |
st.write("Semantic Search Results:", topk) | |
output = st.session_state.model.answer_query(query=prompt, topk_items=topk) | |
response = output | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
st.error(f"Error in Semantic Search or Model Response: {e}") | |
raise |