import streamlit as st
import openai
from langchain_google_genai import ChatGoogleGenerativeAI
from datetime import datetime, timedelta
import time
# API keys
GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
# Initialize OpenAI
openai.api_key = OPENAI_API_KEY
# In-memory storage for progress tracking
progress_data = {
"questions_solved": {
"Behavioral": 0, "Technical": 0, "Situational": 0, "Case Study": 0, "Problem Solving": 0
},
"mock_interviews_taken": 0,
"feedback_provided": 0,
"tips_retrieved": 0
}
def get_llm(model_choice):
if model_choice == "Gemini" or model_choice == "Groq":
return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
elif model_choice == "OpenAI":
return None
else:
raise ValueError("Unsupported model choice.")
def generate_questions(model_choice, role, question_type, num_questions, difficulty):
prompt = (
f"Generate {num_questions} {difficulty} {question_type.lower()} interview questions for the role of {role}. "
f"Only include {question_type.lower()} questions."
)
if model_choice == "OpenAI":
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=150
)
return response.choices[0].text.strip().split("\n")
elif model_choice == "Gemini" or model_choice == "Groq":
llm = get_llm(model_choice)
response = llm.invoke(prompt)
return response.content.split("\n")
else:
raise ValueError("Unsupported model choice.")
def provide_feedback(model_choice, answer):
prompt = f"Provide constructive feedback on the following interview answer: {answer}"
if model_choice == "OpenAI":
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=150
)
return response.choices[0].text.strip()
elif model_choice == "Gemini" or model_choice == "Groq":
llm = get_llm(model_choice)
response = llm.invoke(prompt)
return response.content
else:
raise ValueError("Unsupported model choice.")
def get_tips(model_choice, role):
prompt = f"Provide useful interview tips for the role of {role}. Include body language, dress code, etiquette, and role-specific advice."
if model_choice == "OpenAI":
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=150
)
return response.choices[0].text.strip()
elif model_choice == "Gemini" or model_choice == "Groq":
llm = get_llm(model_choice)
response = llm.invoke(prompt)
return response.content
else:
raise ValueError("Unsupported model choice.")
def start_mock_interview():
st.write("### Mock Interview Starting")
st.write("The mock interview is starting now. Please connect with your interviewer.")
# Simulate video call window with recording message
st.markdown("""
Call is being RECORDED
Interview with: John Doe
""", unsafe_allow_html=True)
countdown_end = datetime.now() + timedelta(seconds=60) # 60 seconds timer
while datetime.now() < countdown_end:
remaining_time = countdown_end - datetime.now()
if remaining_time <= timedelta(seconds=3):
st.write(f"**Time Remaining:** {str(remaining_time).split('.')[0]}")
time.sleep(1)
st.write("Mock Interview Session Ended.")
progress_data["mock_interviews_taken"] += 1
def schedule_mock_interview():
st.subheader("Schedule a Mock Interview")
date = st.date_input("Select Date", min_value=datetime.today())
time = st.time_input("Select Time", value=datetime.now().time())
duration = st.selectbox("Duration (Minutes)", [30, 45, 60])
now = datetime.now()
selected_datetime = datetime.combine(date, time)
col1, col2 = st.columns(2)
with col1:
if st.button("Start Interview Now"):
start_mock_interview()
with col2:
if st.button("Schedule Interview"):
if selected_datetime < now:
st.error("Selected time is in the past. Please choose a future time.")
else:
# Simulate saving interview schedule
st.success(f"Mock interview scheduled for {selected_datetime.strftime('%Y-%m-%d %H:%M:%S')} with a duration of {duration} minutes.")
progress_data["mock_interviews_taken"] += 1
def track_progress():
st.subheader("Track Your Progress")
st.write("Here's your detailed progress data:")
st.markdown(f"""
Behavioral Questions Solved: {progress_data['questions_solved']['Behavioral']}
Technical Questions Solved: {progress_data['questions_solved']['Technical']}
Situational Questions Solved: {progress_data['questions_solved']['Situational']}
Case Study Questions Solved: {progress_data['questions_solved']['Case Study']}
Problem Solving Questions Solved: {progress_data['questions_solved']['Problem Solving']}
Mock Interviews Taken: {progress_data['mock_interviews_taken']}
Feedback Provided: {progress_data['feedback_provided']}
Tips Retrieved: {progress_data['tips_retrieved']}
""", unsafe_allow_html=True)
def connect_resources():
st.subheader("Connect with Resources")
st.write("### Articles & Books")
st.write("1. [The Complete Guide to Job Interviews](https://example.com)")
st.write("2. [Cracking the Coding Interview](https://example.com)")
st.write("### Videos")
st.write("1. [Top 10 Interview Tips](https://example.com)")
st.write("2. [Behavioral Interview Questions Explained](https://example.com)")
st.write("### Connect with Career Coaches")
st.write("If you need personalized help, please fill out the form below or contact us through [Career Coaches Contact](https://example.com).")
# Form to connect with career coaches or mentors
with st.form("contact_form"):
st.write("For personalized assistance, please fill out this form:")
name = st.text_input("Name")
email = st.text_input("Email")
message = st.text_area("Message")
submit_button = st.form_submit_button("Submit")
if submit_button:
if not name or not email or not message:
st.error("Please fill out all fields.")
else:
st.success("Thank you for contacting us! We will get back to you soon.")
def style_output(text, color):
return f'{text}
'
# Streamlit app layout
st.set_page_config(page_title="TechPrep", layout="wide")
st.markdown(
"""
""",
unsafe_allow_html=True
)
# Show welcome message with an icon for 3-4 seconds
welcome_message = st.empty()
with welcome_message.container():
st.markdown("""
Welcome to TechPrep!
""", unsafe_allow_html=True)
time.sleep(4) # Wait for 4 seconds
welcome_message.empty() # Remove the welcome message
# Initialize session state for questions, answers, and current index
if 'questions' not in st.session_state:
st.session_state.questions = []
if 'answers' not in st.session_state:
st.session_state.answers = []
if 'feedback' not in st.session_state:
st.session_state.feedback = []
if 'question_index' not in st.session_state:
st.session_state.question_index = 0
if 'show_results' not in st.session_state:
st.session_state.show_results = False
# Sidebar Navigation
st.sidebar.title("TechPrep Navigation")
nav_option = st.sidebar.radio("Choose an option:",
["Generate Questions", "Mock Interview", "Track Progress", "Connect with Resources"])
# Handling page navigation
if nav_option == "Generate Questions":
st.header("📝 Generate Interview Questions")
model_choice = st.selectbox("Choose Model:", ["OpenAI", "Gemini", "Groq"])
role = st.selectbox("Role:", ["GenAI", "ML", "DevOps", "Software Engineer", "Data Scientist", "Product Manager", "Designer", "Business Analyst"])
question_type = st.selectbox("Question Type:", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"])
num_questions = st.number_input("Number of Questions:", min_value=1, max_value=20, value=5)
difficulty = st.selectbox("Difficulty Level:", ["Basic", "Medium", "Complex"])
if st.button("Generate Questions", key="generate_questions"):
with st.spinner("Generating questions..."):
questions = generate_questions(model_choice, role, question_type, num_questions, difficulty)
st.session_state.questions = questions
st.session_state.answers = ["" for _ in questions]
st.session_state.feedback = ["" for _ in questions]
st.session_state.question_index = 0
st.session_state.show_results = False
progress_data["questions_solved"][question_type] += num_questions
# Display questions with navigation
if st.session_state.questions:
question_list = st.session_state.questions
index = st.session_state.question_index
if index < len(question_list):
st.write(f"**Question {index + 1}:** {question_list[index]}")
# Answer input box
answer = st.text_area("Your Answer", key="text_area_answer")
col1, col2 = st.columns(2)
with col1:
if index > 0:
if st.button("Previous"):
st.session_state.question_index -= 1
with col2:
if index < len(question_list) - 1:
if st.button("Next"):
st.session_state.question_index += 1
# Submit answer and provide feedback
if st.button("Submit Answer"):
if not answer:
st.error("Please enter an answer to receive feedback.")
else:
with st.spinner("Providing feedback..."):
feedback = provide_feedback(model_choice, answer)
st.session_state.answers[index] = answer
st.session_state.feedback[index] = feedback
st.markdown(style_output("Feedback Received:", "#FF5722"), unsafe_allow_html=True)
st.write(feedback)
progress_data["feedback_provided"] += 1
# Show results and score when all questions have been answered
if index == len(question_list) - 1:
st.session_state.show_results = True
if st.session_state.show_results:
st.write("### Your Results")
total_questions = len(st.session_state.questions)
answered_questions = sum(1 for ans in st.session_state.answers if ans)
score = (answered_questions / total_questions) * 100
st.write(f"**Score:** {score:.2f}%")
# Display feedback and tips
st.write("### Feedback Summary")
for i, (q, ans, fb) in enumerate(zip(st.session_state.questions, st.session_state.answers, st.session_state.feedback)):
st.write(f"**Question {i + 1}:** {q}")
st.write(f"**Your Answer:** {ans}")
st.write(f"**Feedback:** {fb}")
tips = get_tips(model_choice, role)
st.write("### Tips to Improve")
st.write(tips)
progress_data["tips_retrieved"] += 1
elif nav_option == "Mock Interview":
st.header("🎥 Mock Interview")
schedule_mock_interview()
elif nav_option == "Track Progress":
track_progress()
elif nav_option == "Connect with Resources":
connect_resources()
# Footer
st.markdown("""
""", unsafe_allow_html=True)