Stars / app.py
Aarnaburji's picture
Update app.py
9fb3dd5 verified
raw
history blame
7.5 kB
import gradio as gr
from sentence_transformers import SentenceTransformer, util
import openai
import os
#trying so when user puts la or los angles specific pic comes:
# URL or path to your image file
#PICTURE_URL = "Stars/sf.png"
#def respond(user_input):
# if "los angeles" in user_input.lower() or "la" in user_input.lower():
#return f"Here's a picture of Los Angeles!", PICTURE_URL
# else:
# return "How can I help you with astronomy?", None
# Define the Gradio interface
#iface = gr.Interface(
# fn=respond,
# inputs="text",
# outputs=["text", "image"]
#)
# Launch the interface
#iface.launch()
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Initialize paths and model identifiers for easy configuration and maintenance
filename = "output_topic_details.txt" # Path to the file storing chess-specific details
retrieval_model_name = 'output/sentence-transformer-finetuned/'
# Define paths to images
path_to_sf_image = "Stars/sf.png"
path_to_sacramento_image = "Stars/sacramento.png"
path_to_la_image = "Stars/la.png"
openai.api_key = os.environ["OPENAI_API_KEY"]
system_message = "You are an astronomy chatbot named Starfinder specialized in providing information on stargazing, astronomical events, and outer space."
# Initial system message to set the behavior of the assistant
messages = [{"role": "system", "content": system_message}]
# Attempt to load the necessary models and provide feedback on success or failure
try:
retrieval_model = SentenceTransformer(retrieval_model_name)
print("Models loaded successfully.")
except Exception as e:
print(f"Failed to load models: {e}")
def load_and_preprocess_text(filename):
"""
Load and preprocess text from a file, removing empty lines and stripping whitespace.
"""
try:
with open(filename, 'r', encoding='utf-8') as file:
segments = [line.strip() for line in file if line.strip()]
print("Text loaded and preprocessed successfully.")
return segments
except Exception as e:
print(f"Failed to load or preprocess text: {e}")
return []
segments = load_and_preprocess_text(filename)
def find_relevant_segment(user_query, segments):
"""
Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings.
This version finds the best match based on the content of the query.
"""
try:
# Lowercase the query for better matching
lower_query = user_query.lower()
# Encode the query and the segments
query_embedding = retrieval_model.encode(lower_query)
segment_embeddings = retrieval_model.encode(segments)
# Compute cosine similarities between the query and the segments
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
# Find the index of the most similar segment
best_idx = similarities.argmax()
# Return the most relevant segment
return segments[best_idx]
except Exception as e:
print(f"Error in finding relevant segment: {e}")
return ""
def generate_response(user_query, relevant_segment):
"""
Generate a response emphasizing the bot's capability in providing astronomical information.
"""
try:
user_message = f"Here's the information on outer space: {relevant_segment}"
# Append user's message to messages list
messages.append({"role": "user", "content": user_message})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=150,
temperature=0.2,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Extract the response text
output_text = response['choices'][0]['message']['content'].strip()
# Append assistant's message to messages list for context
messages.append({"role": "assistant", "content": output_text})
return output_text
except Exception as e:
print(f"Error in generating response: {e}")
return f"Error in generating response: {e}"
def query_model(question):
"""
Process a question, find relevant information, and generate a response.
"""
if question == "":
return "Welcome to Starfinder! Ask me anything about outer space, stargazing, and upcoming astronomical events.", None
if "san francisco" in question.lower():
return "There are many locations near San Francisco where you can stargaze: Lick Observatory (Mount Hamilton), Chabot Space & Science Center (Oakland) , Twin Peaks (SF), Sibley Volcanic National Reserve (Oakland), Mount Tamalpais (Marin), San Francisco State University Observatory (SF), Mount Diablo (East Bay)!", "https://huggingface.co/spaces/Starfinders/Stars/resolve/main/sf.png"
if "sacramento" in question.lower():
return "There are many locations near Sacramento where you can stargaze: Kalithea Park, Northstar Park, Curtis Park, Lake Theodore, Casa Bella Verde, McKinley Park, Tiscornia Park, Old Sacramento Waterfront.", "https://huggingface.co/spaces/Starfinders/Stars/resolve/main/sacramento.png"
if "los angeles" in question.lower() or "la" in question.lower():
return "There are many locations near Los Angeles where you can stargaze: Leo Carrillo State Beach (Malibu), Malibu Creek State Park (Malibu), Griffith Observatory (Griffith Park), Mount Wilson Observatory (Angeles Crest)", "https://huggingface.co/spaces/Starfinders/Stars/resolve/main/la.png"
relevant_segment = find_relevant_segment(question, segments)
if not relevant_segment:
return "Could not find specific information. Please refine your question.", None
response = generate_response(question, relevant_segment)
return response, None
# Define the welcome message and specific topics the chatbot can provide information about
welcome_message = """
# ✧ Welcome to Starfinder!
## Your AI-driven assistant for all astronomy-related queries. Created by Aarna, Aditi, and Anastasia of the 2024 Kode With Klossy SF Camp.
"""
topics = """
### Feel Free to ask me anything from the topics below!
- The Night sky
- Outer space insights
- Light pollution
- Stargazing spots
- Celestial events
- Astronomy tips
"""
# Setup the Gradio Blocks interface with custom layout components
with gr.Blocks(theme='earneleh/paris') as demo:
gr.Image(display_title(), show_label = False, show_share_button = False, show_download_button = False)
gr.Markdown(welcome_message) # Display the formatted welcome message
with gr.Row():
with gr.Column():
gr.Markdown(topics) # Show the topics on the left side
with gr.Row():
with gr.Column():
question = gr.Textbox(label="Your question", placeholder="What do you want to ask about?")
answer = gr.Textbox(label="StarFinder Response", placeholder="StarFinder will respond here...", interactive=False, lines=10)
image_output = gr.Image(label="Image Output") # Add an Image component
submit_button = gr.Button("Submit")
submit_button.click(fn=query_model, inputs=question, outputs=[answer, image_output]) # Update outputs to include the image component
# Launch the Gradio app to allow user interaction
demo.launch(share=True)