Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ import numpy as np
|
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 8 |
import torch
|
|
|
|
| 9 |
|
| 10 |
# Set up OpenAI client
|
| 11 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
@@ -43,6 +44,15 @@ if "follow_up_mode" not in st.session_state:
|
|
| 43 |
if "generated_question" not in st.session_state:
|
| 44 |
st.session_state.generated_question = None # Stores the generated question for persistence
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
if "debug_logs" not in st.session_state:
|
| 47 |
st.session_state.debug_logs = None # Stores debug logs for toggling
|
| 48 |
|
|
@@ -78,6 +88,32 @@ def generate_response(messages):
|
|
| 78 |
|
| 79 |
return response.choices[0].message.content
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
# User input form for generating a new question
|
| 82 |
with st.form(key="input_form"):
|
| 83 |
company = st.text_input("Company", value="Google") # Default value: Google
|
|
@@ -97,13 +133,17 @@ if generate_button:
|
|
| 97 |
|
| 98 |
# Prepare a detailed prompt for GPT using the top question's details
|
| 99 |
detailed_prompt = (
|
| 100 |
-
f"Transform this LeetCode question into a real-world interview scenario
|
| 101 |
f"**Company**: {top_question['company']}\n"
|
| 102 |
f"**Question Name**: {top_question['questionName']}\n"
|
| 103 |
f"**Difficulty Level**: {top_question['difficulty level']}\n"
|
| 104 |
f"**Tags**: {top_question['Tags']}\n"
|
| 105 |
f"**Content**: {top_question['Content']}\n"
|
| 106 |
-
f"\nPlease create a real-world interview question based on this information."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
)
|
| 108 |
|
| 109 |
# Generate response using OpenAI API with detailed prompt and debugging logs
|
|
@@ -112,6 +152,12 @@ if generate_button:
|
|
| 112 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
| 113 |
st.session_state.generated_question = response
|
| 114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
# Enable follow-up mode after generating the initial question
|
| 116 |
st.session_state.follow_up_mode = True
|
| 117 |
|
|
@@ -164,23 +210,51 @@ if st.session_state.generated_question:
|
|
| 164 |
else:
|
| 165 |
st.sidebar.markdown("_No question generated yet._")
|
| 166 |
|
| 167 |
-
# Right sidebar toggleable debug logs and code interpreter section
|
| 168 |
-
with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
|
| 169 |
-
if st.session_state.debug_logs:
|
| 170 |
-
st.write(st.session_state.debug_logs)
|
| 171 |
-
|
| 172 |
st.sidebar.markdown("---")
|
| 173 |
st.sidebar.markdown("## Python Code Interpreter")
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
if st.sidebar.button("Run Code"):
|
| 176 |
try:
|
|
|
|
| 177 |
exec_globals = {}
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
else:
|
| 184 |
-
st.sidebar.
|
| 185 |
except Exception as e:
|
| 186 |
st.sidebar.error(f"Error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
from sentence_transformers import SentenceTransformer
|
| 7 |
from sklearn.metrics.pairwise import cosine_similarity
|
| 8 |
import torch
|
| 9 |
+
import re
|
| 10 |
|
| 11 |
# Set up OpenAI client
|
| 12 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
|
|
|
| 44 |
if "generated_question" not in st.session_state:
|
| 45 |
st.session_state.generated_question = None # Stores the generated question for persistence
|
| 46 |
|
| 47 |
+
if "code_template" not in st.session_state:
|
| 48 |
+
st.session_state.code_template = "" # Stores the code template
|
| 49 |
+
|
| 50 |
+
if "sample_test_case" not in st.session_state:
|
| 51 |
+
st.session_state.sample_test_case = "" # Stores the sample test case
|
| 52 |
+
|
| 53 |
+
if "expected_output" not in st.session_state:
|
| 54 |
+
st.session_state.expected_output = "" # Stores the expected output
|
| 55 |
+
|
| 56 |
if "debug_logs" not in st.session_state:
|
| 57 |
st.session_state.debug_logs = None # Stores debug logs for toggling
|
| 58 |
|
|
|
|
| 88 |
|
| 89 |
return response.choices[0].message.content
|
| 90 |
|
| 91 |
+
# Function to extract code template and sample test case from the generated question
|
| 92 |
+
def extract_code_and_test_case(generated_question):
|
| 93 |
+
code_template = ""
|
| 94 |
+
sample_test_case = ""
|
| 95 |
+
expected_output = ""
|
| 96 |
+
|
| 97 |
+
# Extract code template
|
| 98 |
+
code_match = re.search(r'```python(.*?)```', generated_question, re.DOTALL)
|
| 99 |
+
if code_match:
|
| 100 |
+
code_template = code_match.group(1).strip()
|
| 101 |
+
else:
|
| 102 |
+
# Default code template if none is found
|
| 103 |
+
code_template = "# Write your code here\n"
|
| 104 |
+
|
| 105 |
+
# Extract sample test case and expected output
|
| 106 |
+
test_case_match = re.search(r'Sample Input:\s*(.*?)\n', generated_question, re.DOTALL)
|
| 107 |
+
expected_output_match = re.search(r'Expected Output:\s*(.*?)\n', generated_question, re.DOTALL)
|
| 108 |
+
if test_case_match and expected_output_match:
|
| 109 |
+
sample_test_case = test_case_match.group(1).strip()
|
| 110 |
+
expected_output = expected_output_match.group(1).strip()
|
| 111 |
+
else:
|
| 112 |
+
sample_test_case = ""
|
| 113 |
+
expected_output = ""
|
| 114 |
+
|
| 115 |
+
return code_template, sample_test_case, expected_output
|
| 116 |
+
|
| 117 |
# User input form for generating a new question
|
| 118 |
with st.form(key="input_form"):
|
| 119 |
company = st.text_input("Company", value="Google") # Default value: Google
|
|
|
|
| 133 |
|
| 134 |
# Prepare a detailed prompt for GPT using the top question's details
|
| 135 |
detailed_prompt = (
|
| 136 |
+
f"Transform this LeetCode question into a real-world interview scenario.\n\n"
|
| 137 |
f"**Company**: {top_question['company']}\n"
|
| 138 |
f"**Question Name**: {top_question['questionName']}\n"
|
| 139 |
f"**Difficulty Level**: {top_question['difficulty level']}\n"
|
| 140 |
f"**Tags**: {top_question['Tags']}\n"
|
| 141 |
f"**Content**: {top_question['Content']}\n"
|
| 142 |
+
f"\nPlease create a real-world interview question based on this information. "
|
| 143 |
+
f"Include the following sections:\n\n"
|
| 144 |
+
f"- Problem Description\n"
|
| 145 |
+
f"- Code Template (in a Python code block)\n"
|
| 146 |
+
f"- Sample Input and Expected Output (clearly separated)\n"
|
| 147 |
)
|
| 148 |
|
| 149 |
# Generate response using OpenAI API with detailed prompt and debugging logs
|
|
|
|
| 152 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
| 153 |
st.session_state.generated_question = response
|
| 154 |
|
| 155 |
+
# Extract code template and sample test case
|
| 156 |
+
code_template, sample_test_case, expected_output = extract_code_and_test_case(response)
|
| 157 |
+
st.session_state.code_template = code_template
|
| 158 |
+
st.session_state.sample_test_case = sample_test_case
|
| 159 |
+
st.session_state.expected_output = expected_output
|
| 160 |
+
|
| 161 |
# Enable follow-up mode after generating the initial question
|
| 162 |
st.session_state.follow_up_mode = True
|
| 163 |
|
|
|
|
| 210 |
else:
|
| 211 |
st.sidebar.markdown("_No question generated yet._")
|
| 212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
st.sidebar.markdown("---")
|
| 214 |
st.sidebar.markdown("## Python Code Interpreter")
|
| 215 |
+
|
| 216 |
+
# Pre-fill code interpreter with code template after question generation
|
| 217 |
+
if st.session_state.code_template:
|
| 218 |
+
code_input = st.sidebar.text_area("Write your Python code here:", value=st.session_state.code_template, height=300)
|
| 219 |
+
else:
|
| 220 |
+
code_input = st.sidebar.text_area("Write your Python code here:", height=300)
|
| 221 |
+
|
| 222 |
if st.sidebar.button("Run Code"):
|
| 223 |
try:
|
| 224 |
+
# Prepare the code for execution
|
| 225 |
exec_globals = {}
|
| 226 |
+
# Create a function wrapper to execute the user's code
|
| 227 |
+
exec(f"def user_solution():\n{code_input}", exec_globals)
|
| 228 |
+
user_solution = exec_globals.get('user_solution', None)
|
| 229 |
+
|
| 230 |
+
# Prepare sample test case execution
|
| 231 |
+
if st.session_state.sample_test_case:
|
| 232 |
+
# Assume the sample test case is in the format of arguments to the function
|
| 233 |
+
test_case = st.session_state.sample_test_case
|
| 234 |
+
# Evaluate the test case safely
|
| 235 |
+
test_args = eval(test_case)
|
| 236 |
+
if not isinstance(test_args, tuple):
|
| 237 |
+
test_args = (test_args,)
|
| 238 |
+
# Capture the output
|
| 239 |
+
returned_output = user_solution(*test_args)
|
| 240 |
+
else:
|
| 241 |
+
returned_output = user_solution()
|
| 242 |
+
|
| 243 |
+
# Display the expected output and returned output
|
| 244 |
+
st.sidebar.markdown("### Sample Test Case Result:")
|
| 245 |
+
st.sidebar.markdown(f"**Sample Input:** {st.session_state.sample_test_case}")
|
| 246 |
+
st.sidebar.markdown(f"**Expected Output:** {st.session_state.expected_output}")
|
| 247 |
+
st.sidebar.markdown(f"**Your Output:** {returned_output}")
|
| 248 |
+
|
| 249 |
+
# Compare outputs
|
| 250 |
+
if str(returned_output) == st.session_state.expected_output:
|
| 251 |
+
st.sidebar.success("Your output matches the expected output!")
|
| 252 |
else:
|
| 253 |
+
st.sidebar.error("Your output does not match the expected output.")
|
| 254 |
except Exception as e:
|
| 255 |
st.sidebar.error(f"Error: {e}")
|
| 256 |
+
|
| 257 |
+
# Right sidebar toggleable debug logs and code interpreter section
|
| 258 |
+
with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
|
| 259 |
+
if st.session_state.debug_logs:
|
| 260 |
+
st.write(st.session_state.debug_logs)
|