Krish30's picture
Upload 2 files
a2aee01 verified
import streamlit as st
from PyPDF2 import PdfReader
import google.generativeai as genai
import re
# Configure the Generative AI API
genai.configure(api_key="AIzaSyCtN-Ontp3bOU0g209_dXbDCMCfCULMFCQ")
# Updated Prompt Template
input_prompt = """
You are an AI question generator. Generate a technical mock test with 30 questions based on the following instructions:
1. Analyze the resume to identify the candidate's skills, experience, projects, and certifications.
2. Generate questions that thoroughly test the candidate's knowledge and expertise in these areas.
3. Analyze the job description to extract key requirements and generate questions that evaluate whether the candidate meets these requirements.
4. Ensure the questions assess the candidate's capability and alignment with the job's expectations, aiding the hiring team in evaluating the validity of the candidate's claims.
Resume:
{resume_text}
Job Description:
{jd_text}
"""
answer_prompt_template = """
You are an AI evaluator. Below are questions and a student's answers. Score each answer on a scale of 0 to 1, where 1 is completely correct and 0 is incorrect. Provide the correct answer for each question as well.
Questions and Answers:
{questions_and_answers}
Provide your response in the following format:
1. [Score] - [Probable Correct Answer]
2. [Score] - [Probable Correct Answer]
...
"""
# Function to extract text from PDF
def input_pdf_text(file):
try:
pdf = PdfReader(file)
text = "".join(page.extract_text() for page in pdf.pages)
return text
except Exception as e:
return f"Error extracting text: {e}"
# Function to generate questions using Generative AI
def get_gemini_response(prompt):
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(prompt)
if hasattr(response.candidates[0], "content") and hasattr(response.candidates[0].content, "parts"):
return response.candidates[0].content.parts[0].text
else:
return None
# Initialize session state for persistence
if "questions" not in st.session_state:
st.session_state.questions = None
if "user_answers" not in st.session_state:
st.session_state.user_answers = {}
if "test_submitted" not in st.session_state:
st.session_state.test_submitted = False
# Streamlit UI
st.title("Technical Mock Test Generator")
st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.")
resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"])
jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"])
if resume_file and jd_file:
# Reset session state when new files are uploaded
if st.session_state.questions is None or st.session_state.test_submitted:
resume_text = input_pdf_text(resume_file)
jd_text = input_pdf_text(jd_file)
if "Error" in resume_text or "Error" in jd_text:
st.error("Error reading one or both files. Please check the files and try again.")
else:
prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text)
st.write("Generating questions...")
try:
# Generate questions
questions_content = get_gemini_response(prompt)
if not questions_content:
st.error("Unexpected response format. Please check the Generative AI configuration.")
else:
# Extract questions using regular expression
questions = re.findall(r'\d+\.\s.*', questions_content)
questions = [re.sub(r'^\d+\.\s', '', question).strip() for question in questions]
# Save questions to session state
st.session_state.questions = questions
st.session_state.user_answers = {idx + 1: "" for idx in range(len(questions))}
st.session_state.test_submitted = False
except Exception as e:
st.error(f"Error generating questions: {e}")
# Display questions
if st.session_state.questions:
st.write("### Mock Test Questions")
for idx, question in enumerate(st.session_state.questions, 1):
st.write(f"**Q{idx}:** {question}")
st.session_state.user_answers[idx] = st.text_input(
f"Your Answer for Q{idx}:",
value=st.session_state.user_answers[idx],
key=f"q_{idx}"
)
# Submit button
if st.button("Submit Test"):
# Prepare input for LLM evaluation
qa_pairs = "\n".join(
f"{idx}. Q: {question} A: {st.session_state.user_answers[idx]}"
for idx, question in enumerate(st.session_state.questions, 1)
)
eval_prompt = answer_prompt_template.format(questions_and_answers=qa_pairs)
try:
# Get LLM evaluation
eval_response = get_gemini_response(eval_prompt)
# Debug: Display raw response
st.write("### Raw Evaluation Response")
st.write(eval_response)
# Parse response
results = re.findall(r'(\d+)\.\s\[([\d\.]+)\]\s-\s(.*)', eval_response)
# Display score and feedback
if results:
total_score = sum(float(score) for _, score, _ in results)
attempted = sum(1 for idx in st.session_state.user_answers.values() if idx.strip())
st.subheader(f"Your Score: {total_score:.2f} / {len(results)}")
st.write(f"Total Questions Attempted: {attempted} / {len(results)}")
# Display probable correct answers
st.subheader("Probable Correct Answers")
for idx, (_, score, correct_answer) in enumerate(results, 1):
st.write(f"Q{idx}: {correct_answer} (Score: {score})")
else:
st.error("Unexpected evaluation response format.")
except Exception as e:
st.error(f"Error evaluating answers: {e}")