File size: 4,293 Bytes
35b7fda
ad23146
e3dc8c5
35b7fda
 
2d9e7c5
 
 
04a898b
 
35b7fda
e3dc8c5
ade0644
e3dc8c5
4a2c354
35b7fda
376ee30
35b7fda
2d9e7c5
4a2c354
2d9e7c5
4a2c354
 
 
35b7fda
2d9e7c5
4a2c354
56f34b2
4a2c354
56f34b2
 
4a2c354
2d9e7c5
ade0644
ad23146
 
35b7fda
4a2c354
 
 
7b34c88
 
 
 
 
 
 
 
 
 
 
 
 
2d9e7c5
4a2c354
ade0644
ad23146
 
ade0644
ad23146
ade0644
ad23146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ade0644
ad23146
 
 
 
2d9e7c5
ad23146
ade0644
 
35b7fda
ad23146
 
 
2d9e7c5
7b34c88
 
 
 
 
 
 
2d9e7c5
c3e620a
ade0644
ad23146
 
7b34c88
ad23146
ade0644
ad23146
 
7b34c88
ad23146
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import streamlit as st
import requests
from openai import OpenAI
import google.generativeai as genai

# -----------------------------------------------------
# Retrieve API keys from Hugging Face Secrets
# -----------------------------------------------------
OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]

# -----------------------------------------------------
# Initialize OpenAI & Gemini
# -----------------------------------------------------
client = OpenAI(api_key=OPENAI_API_KEY)  # OpenAI client
genai.configure(api_key=GEMINI_API_KEY)
gemini_model = genai.GenerativeModel("gemini-2.0-flash")

# -----------------------------------------------------
# Configure Streamlit Page
# -----------------------------------------------------
st.set_page_config(page_title="AI Model Comparator", layout="wide")
st.title("๐Ÿค– AI Model Comparator")
st.subheader("Compare responses across multiple LLMs.")

# -----------------------------------------------------
# User Input: Prompt
# -----------------------------------------------------
user_prompt = st.text_area("โœ๏ธ Enter your prompt:", "Explain quantum computing in simple terms.")

# -----------------------------------------------------
# Sidebar: Model Settings
# -----------------------------------------------------
st.sidebar.header("โš™๏ธ Model Parameters")
temperature = st.sidebar.slider("๐ŸŽ› Temperature", 0.0, 1.5, 0.7)
max_tokens = st.sidebar.slider("๐Ÿ“ Max Tokens", 50, 1000, 500)

# -----------------------------------------------------
# Sidebar Footer: Future Works Section
# -----------------------------------------------------
with st.sidebar:
    st.markdown("---")
    st.markdown("## ๐Ÿ”ฎ Future Works: 'Prompt Tester'")
    st.write("""
    ๐Ÿš€ The next phase of this project will focus on **prompt testing & optimization.**  
    Features will include:
    - Measuring prompt effectiveness (coherence, conciseness, accuracy)
    - Generating alternative prompt variations
    - Ranking and evaluating LLM responses
    - Providing prompt improvement suggestions  
    Stay tuned for 'Prompt Tester'!
    """)

# -----------------------------------------------------
# API Request Functions
# -----------------------------------------------------

def get_openai_response(prompt):
    try:
        completion = client.chat.completions.create(
            model="gpt-4o",
            messages=[{"role": "user", "content": prompt}],
            temperature=temperature,
            max_tokens=max_tokens
        )
        return completion.choices[0].message.content
    except Exception as e:
        return f"โŒ OpenAI error: {e}"

def get_gemini_response(prompt):
    try:
        response = gemini_model.generate_content(
            prompt,
            generation_config=genai.types.GenerationConfig(
                temperature=temperature,
                max_output_tokens=max_tokens,
                candidate_count=1
            )
        )
        return response.text
    except Exception as e:
        return f"โŒ Gemini error: {e}"

# -----------------------------------------------------
# Generate Responses when Button is Clicked
# -----------------------------------------------------
if st.button("๐Ÿš€ Generate Responses"):
    with st.spinner("Fetching responses..."):
        openai_text = get_openai_response(user_prompt)
        gemini_text = get_gemini_response(user_prompt)

        # -----------------------------------------------------
        # Calculate output token counts (using a simple word count)
        # -----------------------------------------------------
        openai_tokens = len(openai_text.split())
        gemini_tokens = len(gemini_text.split())

        # -----------------------------------------------------
        # Display responses in 2 columns with token count info boxes
        # -----------------------------------------------------
        col1, col2 = st.columns(2)

        with col1:
            st.markdown("### ๐Ÿง  OpenAI GPT-4o")
            st.info(f"Output Tokens: {openai_tokens}")
            st.write(f"๐Ÿ“ {openai_text}")

        with col2:
            st.markdown("### ๐ŸŒ Gemini AI")
            st.info(f"Output Tokens: {gemini_tokens}")
            st.write(f"๐Ÿ“ {gemini_text}")