Update app.py
Browse files
app.py
CHANGED
@@ -9,76 +9,87 @@ OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
|
|
9 |
GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
|
10 |
|
11 |
# -----------------------------------------------------
|
12 |
-
# Initialize OpenAI
|
13 |
# -----------------------------------------------------
|
14 |
-
client = OpenAI(api_key=OPENAI_API_KEY) #
|
15 |
-
|
16 |
-
# Initialize Gemini Model
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
-
gemini_model = genai.GenerativeModel("gemini-
|
19 |
|
20 |
# -----------------------------------------------------
|
21 |
# Configure Streamlit Page
|
22 |
# -----------------------------------------------------
|
23 |
-
st.set_page_config(page_title="AI
|
24 |
-
st.title("
|
25 |
-
st.subheader("
|
26 |
|
27 |
# -----------------------------------------------------
|
28 |
-
#
|
29 |
# -----------------------------------------------------
|
30 |
-
|
31 |
|
32 |
# -----------------------------------------------------
|
33 |
-
#
|
34 |
# -----------------------------------------------------
|
35 |
-
|
|
|
|
|
36 |
|
37 |
# -----------------------------------------------------
|
38 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
# -----------------------------------------------------
|
40 |
-
|
|
|
|
|
41 |
with st.spinner("Fetching responses..."):
|
|
|
42 |
# --- OpenAI GPT-4o ---
|
43 |
try:
|
44 |
-
|
45 |
model="gpt-4o",
|
46 |
-
messages=[{"role": "user", "content": user_prompt}]
|
|
|
|
|
47 |
)
|
48 |
-
openai_text =
|
49 |
except Exception as e:
|
50 |
-
openai_text = f"OpenAI error: {e}"
|
51 |
|
52 |
# --- Gemini AI ---
|
53 |
try:
|
54 |
response = gemini_model.generate_content(
|
55 |
user_prompt,
|
56 |
generation_config=genai.types.GenerationConfig(
|
57 |
-
temperature=
|
58 |
-
max_output_tokens=
|
59 |
-
candidate_count=1
|
60 |
)
|
61 |
)
|
62 |
-
gemini_text = response.text
|
63 |
except Exception as e:
|
64 |
-
gemini_text = f"Gemini error: {e}"
|
65 |
|
66 |
# -----------------------------------------------------
|
67 |
-
# Display responses in
|
68 |
# -----------------------------------------------------
|
69 |
-
with
|
70 |
-
st.markdown("
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
st.write(gemini_text)
|
76 |
-
|
77 |
# -----------------------------------------------------
|
78 |
-
#
|
79 |
# -----------------------------------------------------
|
80 |
-
st.
|
81 |
-
st.
|
82 |
-
|
83 |
-
st.markdown("### Ollama")
|
84 |
-
st.write("Ollama integration will be added later..")
|
|
|
9 |
GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
|
10 |
|
11 |
# -----------------------------------------------------
|
12 |
+
# Initialize OpenAI & Gemini
|
13 |
# -----------------------------------------------------
|
14 |
+
client = OpenAI(api_key=OPENAI_API_KEY) # OpenAI client
|
|
|
|
|
15 |
genai.configure(api_key=GEMINI_API_KEY)
|
16 |
+
gemini_model = genai.GenerativeModel("gemini-pro")
|
17 |
|
18 |
# -----------------------------------------------------
|
19 |
# Configure Streamlit Page
|
20 |
# -----------------------------------------------------
|
21 |
+
st.set_page_config(page_title="AI Prompt Tester", layout="wide")
|
22 |
+
st.title("π§ͺ AI Prompt Tester")
|
23 |
+
st.subheader("Fine-tune your prompts & compare LLM responses.")
|
24 |
|
25 |
# -----------------------------------------------------
|
26 |
+
# User Input: Prompt
|
27 |
# -----------------------------------------------------
|
28 |
+
user_prompt = st.text_area("βοΈ Enter your prompt:", "Explain quantum computing in simple terms.")
|
29 |
|
30 |
# -----------------------------------------------------
|
31 |
+
# π₯ New Feature 1: Temperature & Max Tokens Sliders
|
32 |
# -----------------------------------------------------
|
33 |
+
st.sidebar.header("βοΈ Model Parameters")
|
34 |
+
temperature = st.sidebar.slider("π Temperature (0 = predictable, 1 = creative)", 0.0, 1.0, 0.7)
|
35 |
+
max_tokens = st.sidebar.slider("π Max Tokens (limits response length)", 50, 1000, 500)
|
36 |
|
37 |
# -----------------------------------------------------
|
38 |
+
# π New Feature 2: "Reword My Prompt"
|
39 |
+
# -----------------------------------------------------
|
40 |
+
if st.button("β»οΈ Reword My Prompt"):
|
41 |
+
try:
|
42 |
+
reworded_prompt = client.chat.completions.create(
|
43 |
+
model="gpt-4o",
|
44 |
+
messages=[{"role": "user", "content": f"Reword this prompt differently: {user_prompt}"}]
|
45 |
+
)
|
46 |
+
st.write("π **Alternative Prompt:**", reworded_prompt.choices[0].message.content)
|
47 |
+
except Exception as e:
|
48 |
+
st.error(f"Error in rewording: {e}")
|
49 |
+
|
50 |
# -----------------------------------------------------
|
51 |
+
# Generate Responses from OpenAI & Gemini
|
52 |
+
# -----------------------------------------------------
|
53 |
+
if st.button("π Generate Responses"):
|
54 |
with st.spinner("Fetching responses..."):
|
55 |
+
|
56 |
# --- OpenAI GPT-4o ---
|
57 |
try:
|
58 |
+
openai_response = client.chat.completions.create(
|
59 |
model="gpt-4o",
|
60 |
+
messages=[{"role": "user", "content": user_prompt}],
|
61 |
+
temperature=temperature,
|
62 |
+
max_tokens=max_tokens
|
63 |
)
|
64 |
+
openai_text = openai_response.choices[0].message.content
|
65 |
except Exception as e:
|
66 |
+
openai_text = f"β OpenAI error: {e}"
|
67 |
|
68 |
# --- Gemini AI ---
|
69 |
try:
|
70 |
response = gemini_model.generate_content(
|
71 |
user_prompt,
|
72 |
generation_config=genai.types.GenerationConfig(
|
73 |
+
temperature=temperature,
|
74 |
+
max_output_tokens=max_tokens,
|
75 |
+
candidate_count=1
|
76 |
)
|
77 |
)
|
78 |
+
gemini_text = response.text
|
79 |
except Exception as e:
|
80 |
+
gemini_text = f"β Gemini error: {e}"
|
81 |
|
82 |
# -----------------------------------------------------
|
83 |
+
# π New Feature 3: Display responses in Expanders
|
84 |
# -----------------------------------------------------
|
85 |
+
with st.expander("π§ OpenAI GPT-4o Response"):
|
86 |
+
st.markdown(f"**GPT-4o says:**\n{openai_text}")
|
87 |
+
|
88 |
+
with st.expander("π Gemini AI Response"):
|
89 |
+
st.markdown(f"**Gemini says:**\n{gemini_text}")
|
90 |
+
|
|
|
|
|
91 |
# -----------------------------------------------------
|
92 |
+
# π New Feature 4: User Feedback Rating
|
93 |
# -----------------------------------------------------
|
94 |
+
rating = st.radio("π Which model gave the best response?", ["OpenAI GPT-4o", "Gemini"])
|
95 |
+
st.success(f"β
You selected: {rating}")
|
|
|
|
|
|