hruday96's picture
Update app.py
376ee30 verified
import streamlit as st
import requests
from openai import OpenAI
import google.generativeai as genai
# -----------------------------------------------------
# Retrieve API keys from Hugging Face Secrets
# -----------------------------------------------------
OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
# -----------------------------------------------------
# Initialize OpenAI & Gemini
# -----------------------------------------------------
client = OpenAI(api_key=OPENAI_API_KEY) # OpenAI client
genai.configure(api_key=GEMINI_API_KEY)
gemini_model = genai.GenerativeModel("gemini-2.0-flash")
# -----------------------------------------------------
# Configure Streamlit Page
# -----------------------------------------------------
st.set_page_config(page_title="AI Model Comparator", layout="wide")
st.title("๐Ÿค– AI Model Comparator")
st.subheader("Compare responses across multiple LLMs.")
# -----------------------------------------------------
# User Input: Prompt
# -----------------------------------------------------
user_prompt = st.text_area("โœ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")
# -----------------------------------------------------
# Sidebar: Model Settings
# -----------------------------------------------------
st.sidebar.header("โš™๏ธ Model Parameters")
temperature = st.sidebar.slider("๐ŸŽ› Temperature", 0.0, 1.5, 0.7)
max_tokens = st.sidebar.slider("๐Ÿ“ Max Tokens", 50, 1000, 500)
# -----------------------------------------------------
# Sidebar Footer: Future Works Section
# -----------------------------------------------------
with st.sidebar:
st.markdown("---")
st.markdown("## ๐Ÿ”ฎ Future Works: 'Prompt Tester'")
st.write("""
๐Ÿš€ The next phase of this project will focus on **prompt testing & optimization.**
Features will include:
- Measuring prompt effectiveness (coherence, conciseness, accuracy)
- Generating alternative prompt variations
- Ranking and evaluating LLM responses
- Providing prompt improvement suggestions
Stay tuned for 'Prompt Tester'!
""")
# -----------------------------------------------------
# API Request Functions
# -----------------------------------------------------
def get_openai_response(prompt):
try:
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
temperature=temperature,
max_tokens=max_tokens
)
return completion.choices[0].message.content
except Exception as e:
return f"โŒ OpenAI error: {e}"
def get_gemini_response(prompt):
try:
response = gemini_model.generate_content(
prompt,
generation_config=genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_tokens,
candidate_count=1
)
)
return response.text
except Exception as e:
return f"โŒ Gemini error: {e}"
# -----------------------------------------------------
# Generate Responses when Button is Clicked
# -----------------------------------------------------
if st.button("๐Ÿš€ Generate Responses"):
with st.spinner("Fetching responses..."):
openai_text = get_openai_response(user_prompt)
gemini_text = get_gemini_response(user_prompt)
# -----------------------------------------------------
# Calculate output token counts (using a simple word count)
# -----------------------------------------------------
openai_tokens = len(openai_text.split())
gemini_tokens = len(gemini_text.split())
# -----------------------------------------------------
# Display responses in 2 columns with token count info boxes
# -----------------------------------------------------
col1, col2 = st.columns(2)
with col1:
st.markdown("### ๐Ÿง  OpenAI GPT-4o")
st.info(f"Output Tokens: {openai_tokens}")
st.write(f"๐Ÿ“ {openai_text}")
with col2:
st.markdown("### ๐ŸŒ Gemini AI")
st.info(f"Output Tokens: {gemini_tokens}")
st.write(f"๐Ÿ“ {gemini_text}")