hruday96 commited on
Commit
ade0644
Β·
verified Β·
1 Parent(s): a430c13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -38
app.py CHANGED
@@ -9,76 +9,87 @@ OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
9
  GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
10
 
11
  # -----------------------------------------------------
12
- # Initialize OpenAI Client (New Method)
13
  # -----------------------------------------------------
14
- client = OpenAI(api_key=OPENAI_API_KEY) # New OpenAI API usage
15
-
16
- # Initialize Gemini Model
17
  genai.configure(api_key=GEMINI_API_KEY)
18
- gemini_model = genai.GenerativeModel("gemini-2.0-flash")
19
 
20
  # -----------------------------------------------------
21
  # Configure Streamlit Page
22
  # -----------------------------------------------------
23
- st.set_page_config(page_title="AI Model Comparison", layout="wide")
24
- st.title("πŸ” Compare AI Model Responses")
25
- st.subheader("Enter a prompt below and see how different AI models respond.")
26
 
27
  # -----------------------------------------------------
28
- # Create a 2-Column Layout for OpenAI and Gemini
29
  # -----------------------------------------------------
30
- col1, col2 = st.columns(2)
31
 
32
  # -----------------------------------------------------
33
- # Chatbox for User Input
34
  # -----------------------------------------------------
35
- user_prompt = st.text_area("Enter your prompt:", "Explain quantum computing in simple terms.")
 
 
36
 
37
  # -----------------------------------------------------
38
- # Generate Responses from OpenAI and Gemini
 
 
 
 
 
 
 
 
 
 
 
39
  # -----------------------------------------------------
40
- if st.button("Generate Responses"):
 
 
41
  with st.spinner("Fetching responses..."):
 
42
  # --- OpenAI GPT-4o ---
43
  try:
44
- completion = client.chat.completions.create(
45
  model="gpt-4o",
46
- messages=[{"role": "user", "content": user_prompt}]
 
 
47
  )
48
- openai_text = completion.choices[0].message.content
49
  except Exception as e:
50
- openai_text = f"OpenAI error: {e}"
51
 
52
  # --- Gemini AI ---
53
  try:
54
  response = gemini_model.generate_content(
55
  user_prompt,
56
  generation_config=genai.types.GenerationConfig(
57
- temperature=0.0, # Ensures deterministic output
58
- max_output_tokens=500, # Limits the response length to 500 tokens
59
- candidate_count=1 # Generates only one candidate
60
  )
61
  )
62
- gemini_text = response.text # Extract text output
63
  except Exception as e:
64
- gemini_text = f"Gemini error: {e}"
65
 
66
  # -----------------------------------------------------
67
- # Display responses in respective columns
68
  # -----------------------------------------------------
69
- with col1:
70
- st.markdown("### OpenAI GPT-4o")
71
- st.write(openai_text)
72
-
73
- with col2:
74
- st.markdown("### Gemini AI")
75
- st.write(gemini_text)
76
-
77
  # -----------------------------------------------------
78
- # Placeholders for future integration of DeepSeek and Ollama
79
  # -----------------------------------------------------
80
- st.markdown("### DeepSeek AI")
81
- st.write("DeepSeek integration will be added later.")
82
-
83
- st.markdown("### Ollama")
84
- st.write("Ollama integration will be added later..")
 
9
  GEMINI_API_KEY = st.secrets["GEMINI_API_KEY"]
10
 
11
  # -----------------------------------------------------
12
+ # Initialize OpenAI & Gemini
13
  # -----------------------------------------------------
14
+ client = OpenAI(api_key=OPENAI_API_KEY) # OpenAI client
 
 
15
  genai.configure(api_key=GEMINI_API_KEY)
16
+ gemini_model = genai.GenerativeModel("gemini-pro")
17
 
18
  # -----------------------------------------------------
19
  # Configure Streamlit Page
20
  # -----------------------------------------------------
21
+ st.set_page_config(page_title="AI Prompt Tester", layout="wide")
22
+ st.title("πŸ§ͺ AI Prompt Tester")
23
+ st.subheader("Fine-tune your prompts & compare LLM responses.")
24
 
25
  # -----------------------------------------------------
26
+ # User Input: Prompt
27
  # -----------------------------------------------------
28
+ user_prompt = st.text_area("✍️ Enter your prompt:", "Explain quantum computing in simple terms.")
29
 
30
  # -----------------------------------------------------
31
+ # πŸ”₯ New Feature 1: Temperature & Max Tokens Sliders
32
  # -----------------------------------------------------
33
+ st.sidebar.header("βš™οΈ Model Parameters")
34
+ temperature = st.sidebar.slider("πŸŽ› Temperature (0 = predictable, 1 = creative)", 0.0, 1.0, 0.7)
35
+ max_tokens = st.sidebar.slider("πŸ“ Max Tokens (limits response length)", 50, 1000, 500)
36
 
37
  # -----------------------------------------------------
38
+ # πŸ”„ New Feature 2: "Reword My Prompt"
39
+ # -----------------------------------------------------
40
+ if st.button("♻️ Reword My Prompt"):
41
+ try:
42
+ reworded_prompt = client.chat.completions.create(
43
+ model="gpt-4o",
44
+ messages=[{"role": "user", "content": f"Reword this prompt differently: {user_prompt}"}]
45
+ )
46
+ st.write("πŸ”„ **Alternative Prompt:**", reworded_prompt.choices[0].message.content)
47
+ except Exception as e:
48
+ st.error(f"Error in rewording: {e}")
49
+
50
  # -----------------------------------------------------
51
+ # Generate Responses from OpenAI & Gemini
52
+ # -----------------------------------------------------
53
+ if st.button("πŸš€ Generate Responses"):
54
  with st.spinner("Fetching responses..."):
55
+
56
  # --- OpenAI GPT-4o ---
57
  try:
58
+ openai_response = client.chat.completions.create(
59
  model="gpt-4o",
60
+ messages=[{"role": "user", "content": user_prompt}],
61
+ temperature=temperature,
62
+ max_tokens=max_tokens
63
  )
64
+ openai_text = openai_response.choices[0].message.content
65
  except Exception as e:
66
+ openai_text = f"❌ OpenAI error: {e}"
67
 
68
  # --- Gemini AI ---
69
  try:
70
  response = gemini_model.generate_content(
71
  user_prompt,
72
  generation_config=genai.types.GenerationConfig(
73
+ temperature=temperature,
74
+ max_output_tokens=max_tokens,
75
+ candidate_count=1
76
  )
77
  )
78
+ gemini_text = response.text
79
  except Exception as e:
80
+ gemini_text = f"❌ Gemini error: {e}"
81
 
82
  # -----------------------------------------------------
83
+ # πŸ†• New Feature 3: Display responses in Expanders
84
  # -----------------------------------------------------
85
+ with st.expander("🧠 OpenAI GPT-4o Response"):
86
+ st.markdown(f"**GPT-4o says:**\n{openai_text}")
87
+
88
+ with st.expander("🌍 Gemini AI Response"):
89
+ st.markdown(f"**Gemini says:**\n{gemini_text}")
90
+
 
 
91
  # -----------------------------------------------------
92
+ # πŸ†• New Feature 4: User Feedback Rating
93
  # -----------------------------------------------------
94
+ rating = st.radio("πŸ“Š Which model gave the best response?", ["OpenAI GPT-4o", "Gemini"])
95
+ st.success(f"βœ… You selected: {rating}")