Spaces:
Sleeping
Sleeping
File size: 6,333 Bytes
a2aee01 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
import streamlit as st
from PyPDF2 import PdfReader
import google.generativeai as genai
import re
# Configure the Generative AI API
genai.configure(api_key="AIzaSyCtN-Ontp3bOU0g209_dXbDCMCfCULMFCQ")
# Updated Prompt Template
input_prompt = """
You are an AI question generator. Generate a technical mock test with 30 questions based on the following instructions:
1. Analyze the resume to identify the candidate's skills, experience, projects, and certifications.
2. Generate questions that thoroughly test the candidate's knowledge and expertise in these areas.
3. Analyze the job description to extract key requirements and generate questions that evaluate whether the candidate meets these requirements.
4. Ensure the questions assess the candidate's capability and alignment with the job's expectations, aiding the hiring team in evaluating the validity of the candidate's claims.
Resume:
{resume_text}
Job Description:
{jd_text}
"""
answer_prompt_template = """
You are an AI evaluator. Below are questions and a student's answers. Score each answer on a scale of 0 to 1, where 1 is completely correct and 0 is incorrect. Provide the correct answer for each question as well.
Questions and Answers:
{questions_and_answers}
Provide your response in the following format:
1. [Score] - [Probable Correct Answer]
2. [Score] - [Probable Correct Answer]
...
"""
# Function to extract text from PDF
def input_pdf_text(file):
try:
pdf = PdfReader(file)
text = "".join(page.extract_text() for page in pdf.pages)
return text
except Exception as e:
return f"Error extracting text: {e}"
# Function to generate questions using Generative AI
def get_gemini_response(prompt):
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(prompt)
if hasattr(response.candidates[0], "content") and hasattr(response.candidates[0].content, "parts"):
return response.candidates[0].content.parts[0].text
else:
return None
# Initialize session state for persistence
if "questions" not in st.session_state:
st.session_state.questions = None
if "user_answers" not in st.session_state:
st.session_state.user_answers = {}
if "test_submitted" not in st.session_state:
st.session_state.test_submitted = False
# Streamlit UI
st.title("Technical Mock Test Generator")
st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.")
resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"])
jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"])
if resume_file and jd_file:
# Reset session state when new files are uploaded
if st.session_state.questions is None or st.session_state.test_submitted:
resume_text = input_pdf_text(resume_file)
jd_text = input_pdf_text(jd_file)
if "Error" in resume_text or "Error" in jd_text:
st.error("Error reading one or both files. Please check the files and try again.")
else:
prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text)
st.write("Generating questions...")
try:
# Generate questions
questions_content = get_gemini_response(prompt)
if not questions_content:
st.error("Unexpected response format. Please check the Generative AI configuration.")
else:
# Extract questions using regular expression
questions = re.findall(r'\d+\.\s.*', questions_content)
questions = [re.sub(r'^\d+\.\s', '', question).strip() for question in questions]
# Save questions to session state
st.session_state.questions = questions
st.session_state.user_answers = {idx + 1: "" for idx in range(len(questions))}
st.session_state.test_submitted = False
except Exception as e:
st.error(f"Error generating questions: {e}")
# Display questions
if st.session_state.questions:
st.write("### Mock Test Questions")
for idx, question in enumerate(st.session_state.questions, 1):
st.write(f"**Q{idx}:** {question}")
st.session_state.user_answers[idx] = st.text_input(
f"Your Answer for Q{idx}:",
value=st.session_state.user_answers[idx],
key=f"q_{idx}"
)
# Submit button
if st.button("Submit Test"):
# Prepare input for LLM evaluation
qa_pairs = "\n".join(
f"{idx}. Q: {question} A: {st.session_state.user_answers[idx]}"
for idx, question in enumerate(st.session_state.questions, 1)
)
eval_prompt = answer_prompt_template.format(questions_and_answers=qa_pairs)
try:
# Get LLM evaluation
eval_response = get_gemini_response(eval_prompt)
# Debug: Display raw response
st.write("### Raw Evaluation Response")
st.write(eval_response)
# Parse response
results = re.findall(r'(\d+)\.\s\[([\d\.]+)\]\s-\s(.*)', eval_response)
# Display score and feedback
if results:
total_score = sum(float(score) for _, score, _ in results)
attempted = sum(1 for idx in st.session_state.user_answers.values() if idx.strip())
st.subheader(f"Your Score: {total_score:.2f} / {len(results)}")
st.write(f"Total Questions Attempted: {attempted} / {len(results)}")
# Display probable correct answers
st.subheader("Probable Correct Answers")
for idx, (_, score, correct_answer) in enumerate(results, 1):
st.write(f"Q{idx}: {correct_answer} (Score: {score})")
else:
st.error("Unexpected evaluation response format.")
except Exception as e:
st.error(f"Error evaluating answers: {e}") |