import streamlit as st import os from dotenv import load_dotenv from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.embeddings.gemini import GeminiEmbedding import google.generativeai as genai from llama_index.llms.gemini import Gemini from llama_index.core.node_parser import SentenceSplitter from llama_index.core import Settings st.title("LLM-Powered QA System") load_dotenv() api_key = os.getenv('GOOGLE_API_KEY') model = Gemini(model_name="models/gemini-pro") def validate_api_key(): if not api_key: st.error("GOOGLE_API_KEY environment variable not found!") st.stop() genai.configure(api_key=api_key) validate_api_key() url = st.text_input("Enter Webpage URL to Process:") question = st.text_input("Enter your question from the webpage:") if st.button("Process Webpage"): if url: try: st.info("Fetching webpage content...") reader = SimpleWebPageReader(html_to_text=True) documents = reader.load_data(urls=[url]) embed_model = GeminiEmbedding(model_name="models/embedding-001") Settings.llm = model Settings.embed_model = embed_model Settings.node_parser = SentenceSplitter(chunk_size=512, chunk_overlap=20) Settings.num_output = 512 Settings.context_window = 3900 index = VectorStoreIndex.from_documents(documents,settings = Settings) query_engine = index.as_query_engine() response = query_engine.query(question) st.write("Response:" + response.response) except Exception as e: st.error(f"Error occurred: {str(e)}")