XiangJinYu commited on
Commit
85cc548
·
verified ·
1 Parent(s): b9f6eef

modify app.py adapt to enter api

Browse files
Files changed (1) hide show
  1. app.py +16 -9
app.py CHANGED
@@ -97,18 +97,22 @@ def main():
97
 
98
  # LLM Settings
99
  st.subheader("LLM Settings")
 
 
 
 
100
  opt_model = st.selectbox(
101
- "Optimization Model", ["claude-3-5-sonnet-20240620", "gpt-4o", "gpt-4o-mini", "deepseek-chat"], index=0
102
  )
103
  opt_temp = st.slider("Optimization Temperature", 0.0, 1.0, 0.7)
104
 
105
  eval_model = st.selectbox(
106
- "Evaluation Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
107
  )
108
  eval_temp = st.slider("Evaluation Temperature", 0.0, 1.0, 0.3)
109
 
110
  exec_model = st.selectbox(
111
- "Execution Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
112
  )
113
  exec_temp = st.slider("Execution Temperature", 0.0, 1.0, 0.0)
114
 
@@ -202,9 +206,9 @@ def main():
202
  try:
203
  # Initialize LLM
204
  SPO_LLM.initialize(
205
- optimize_kwargs={"model": opt_model, "temperature": opt_temp},
206
- evaluate_kwargs={"model": eval_model, "temperature": eval_temp},
207
- execute_kwargs={"model": exec_model, "temperature": exec_temp},
208
  )
209
 
210
  # Create optimizer instance
@@ -252,9 +256,12 @@ def main():
252
  try:
253
  with st.spinner("Generating response..."):
254
  SPO_LLM.initialize(
255
- optimize_kwargs={"model": opt_model, "temperature": opt_temp},
256
- evaluate_kwargs={"model": eval_model, "temperature": eval_temp},
257
- execute_kwargs={"model": exec_model, "temperature": exec_temp},
 
 
 
258
  )
259
 
260
  llm = SPO_LLM.get_instance()
 
97
 
98
  # LLM Settings
99
  st.subheader("LLM Settings")
100
+
101
+ base_url = st.text_input("Base URL", value="https://api.example.com")
102
+ api_key = st.text_input("API Key", type="password")
103
+
104
  opt_model = st.selectbox(
105
+ "Optimization Model", ["gpt-4o-mini", "gpt-4o", "deepseek-chat"], index=0
106
  )
107
  opt_temp = st.slider("Optimization Temperature", 0.0, 1.0, 0.7)
108
 
109
  eval_model = st.selectbox(
110
+ "Evaluation Model", ["gpt-4o-mini", "gpt-4o", "deepseek-chat"], index=0
111
  )
112
  eval_temp = st.slider("Evaluation Temperature", 0.0, 1.0, 0.3)
113
 
114
  exec_model = st.selectbox(
115
+ "Execution Model", ["gpt-4o-mini", "gpt-4o", "deepseek-chat"], index=0
116
  )
117
  exec_temp = st.slider("Execution Temperature", 0.0, 1.0, 0.0)
118
 
 
206
  try:
207
  # Initialize LLM
208
  SPO_LLM.initialize(
209
+ optimize_kwargs={"model": opt_model, "temperature": opt_temp, "base_url": base_url, "api_key": api_key},
210
+ evaluate_kwargs={"model": eval_model, "temperature": eval_temp, "base_url": base_url, "api_key": api_key},
211
+ execute_kwargs={"model": exec_model, "temperature": exec_temp, "base_url": base_url, "api_key": api_key},
212
  )
213
 
214
  # Create optimizer instance
 
256
  try:
257
  with st.spinner("Generating response..."):
258
  SPO_LLM.initialize(
259
+ optimize_kwargs={"model": opt_model, "temperature": opt_temp, "base_url": base_url,
260
+ "api_key": api_key},
261
+ evaluate_kwargs={"model": eval_model, "temperature": eval_temp, "base_url": base_url,
262
+ "api_key": api_key},
263
+ execute_kwargs={"model": exec_model, "temperature": exec_temp, "base_url": base_url,
264
+ "api_key": api_key},
265
  )
266
 
267
  llm = SPO_LLM.get_instance()