import streamlit as st from PyPDF2 import PdfReader import google.generativeai as genai import re # Configure the Generative AI API genai.configure(api_key="AIzaSyACsjzbJ5wnCpGCSVNmpp--73l5U2YhEFo") # Updated Prompt Template input_prompt = """ You are an AI question generator. Generate a technical mock test with 30 questions based on the following instructions: 1. Analyze the resume to identify the candidate's skills, experience, projects, and certifications. 2. Generate questions that thoroughly test the candidate's knowledge and expertise in these areas. 3. Analyze the job description to extract key requirements and generate questions that evaluate whether the candidate meets these requirements. 4. Ensure the questions assess the candidate's capability and alignment with the job's expectations, aiding the hiring team in evaluating the validity of the candidate's claims. Resume: {resume_text} Job Description: {jd_text} """ answer_prompt_template = """ You are an AI evaluator. Below are questions and a student's answers. Score each answer on a scale of 0 to 1, where 1 is completely correct and 0 is incorrect. Provide the correct answer for each question as well. Questions and Answers: {questions_and_answers} Provide your response in the following format: 1. [Score] - [Probable Correct Answer] 2. [Score] - [Probable Correct Answer] ... """ # Function to extract text from PDF def input_pdf_text(file): try: pdf = PdfReader(file) text = "".join(page.extract_text() for page in pdf.pages) return text except Exception as e: return f"Error extracting text: {e}" # Function to generate questions using Generative AI def get_gemini_response(prompt): model = genai.GenerativeModel('gemini-pro') response = model.generate_content(prompt) if hasattr(response.candidates[0], "content") and hasattr(response.candidates[0].content, "parts"): return response.candidates[0].content.parts[0].text else: return None # Initialize session state for persistence if "questions" not in st.session_state: st.session_state.questions = None if "user_answers" not in st.session_state: st.session_state.user_answers = {} if "test_submitted" not in st.session_state: st.session_state.test_submitted = False # Streamlit UI st.title("Technical Mock Test Generator") st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.") resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"]) jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"]) if resume_file and jd_file: # Reset session state when new files are uploaded if st.session_state.questions is None or st.session_state.test_submitted: resume_text = input_pdf_text(resume_file) jd_text = input_pdf_text(jd_file) if "Error" in resume_text or "Error" in jd_text: st.error("Error reading one or both files. Please check the files and try again.") else: prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text) st.write("Generating questions...") try: # Generate questions questions_content = get_gemini_response(prompt) if not questions_content: st.error("Unexpected response format. Please check the Generative AI configuration.") else: # Extract questions using regular expression questions = re.findall(r'\d+\.\s.*', questions_content) questions = [re.sub(r'^\d+\.\s', '', question).strip() for question in questions] # Save questions to session state st.session_state.questions = questions st.session_state.user_answers = {idx + 1: "" for idx in range(len(questions))} st.session_state.test_submitted = False except Exception as e: st.error(f"Error generating questions: {e}") # Display questions if st.session_state.questions: st.write("### Mock Test Questions") for idx, question in enumerate(st.session_state.questions, 1): st.write(f"**Q{idx}:** {question}") st.session_state.user_answers[idx] = st.text_input( f"Your Answer for Q{idx}:", value=st.session_state.user_answers[idx], key=f"q_{idx}" ) # Submit button if st.button("Submit Test"): # Prepare input for LLM evaluation qa_pairs = "\n".join( f"{idx}. Q: {question} A: {st.session_state.user_answers[idx]}" for idx, question in enumerate(st.session_state.questions, 1) ) eval_prompt = answer_prompt_template.format(questions_and_answers=qa_pairs) try: # Get LLM evaluation eval_response = get_gemini_response(eval_prompt) # Debug: Display raw response st.write("### Raw Evaluation Response") st.write(eval_response) # Parse response results = re.findall(r'(\d+)\.\s\[([\d\.]+)\]\s-\s(.*)', eval_response) # Display score and feedback if results: total_score = sum(float(score) for _, score, _ in results) attempted = sum(1 for idx in st.session_state.user_answers.values() if idx.strip()) st.subheader(f"Your Score: {total_score:.2f} / {len(results)}") st.write(f"Total Questions Attempted: {attempted} / {len(results)}") # Display probable correct answers st.subheader("Probable Correct Answers") for idx, (_, score, correct_answer) in enumerate(results, 1): st.write(f"Q{idx}: {correct_answer} (Score: {score})") else: st.error("Unexpected evaluation response format.") except Exception as e: st.error(f"Error evaluating answers: {e}") # # Below code is working fine but it has certain problems as not given recommedations, displaying numbering two times,etc # import streamlit as st # from PyPDF2 import PdfReader # import google.generativeai as genai # import re # # Configure the Generative AI API # genai.configure(api_key="AIzaSyDXSXEFj4qLZNgxUY8x9F-ucXaOabWsBaQ") # # Updated Prompt Template # input_prompt = """ # You are an AI question generator. Generate a technical mock test with 30 questions based on the following instructions: # 1. Analyze the resume to identify the candidate's skills, experience, projects, and certifications. # 2. Generate questions that thoroughly test the candidate's knowledge and expertise in these areas. # 3. Analyze the job description to extract key requirements and generate questions that evaluate whether the candidate meets these requirements. # 4. Ensure the questions assess the candidate's capability and alignment with the job's expectations, aiding the hiring team in evaluating the validity of the candidate's claims. # Resume: # {resume_text} # Job Description: # {jd_text} # """ # # Function to extract text from PDF # def input_pdf_text(file): # try: # pdf = PdfReader(file) # text = "".join(page.extract_text() for page in pdf.pages) # return text # except Exception as e: # return f"Error extracting text: {e}" # # Function to generate content using Generative AI # def get_gemini_response(prompt): # model = genai.GenerativeModel('gemini-pro') # response = model.generate_content(prompt) # # Extract the text content from the response # if hasattr(response.candidates[0], "content") and hasattr(response.candidates[0].content, "parts"): # return response.candidates[0].content.parts[0].text # else: # return None # # Streamlit UI # st.title("Technical Mock Test Generator") # st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.") # resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"]) # jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"]) # if resume_file and jd_file: # resume_text = input_pdf_text(resume_file) # jd_text = input_pdf_text(jd_file) # if "Error" in resume_text or "Error" in jd_text: # st.error("Error reading one or both files. Please check the files and try again.") # else: # prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text) # st.write("Generating questions...") # try: # # Generate questions # questions_content = get_gemini_response(prompt) # if not questions_content: # st.error("Unexpected response format. Please check the Generative AI configuration.") # else: # # Extract questions using regular expression # questions = re.findall(r'\d+\..*', questions_content) # # If no questions are extracted, display an error # if not questions: # st.error("No questions found in the generated content. Please verify the prompt or the input data.") # else: # # Display questions and collect user answers # user_answers = {} # for idx, question in enumerate(questions, 1): # st.write(f"**{idx}) {question}**") # user_answers[idx] = st.text_input(f"Your Answer for Question {idx}:", key=f"q_{idx}") # # Submit button # if st.button("Submit Answers"): # # Placeholder correct answers for demonstration # correct_answers = { # idx: f"Correct Answer {idx}" for idx in range(1, len(questions) + 1) # } # # Calculate the score # score = 0 # for idx, correct_answer in correct_answers.items(): # if user_answers.get(idx) and user_answers[idx].strip().lower() == correct_answer.strip().lower(): # score += 1 # # Display the score # st.subheader(f"Your Score: {score} / {len(questions)}") # # Display correct answers # st.subheader("Correct Answers") # for idx, correct_answer in correct_answers.items(): # st.write(f"{idx}. {correct_answer}") # except Exception as e: # st.error(f"Error generating questions: {e}") # below code works properly but it does not gives questions appropriate # import streamlit as st # from PyPDF2 import PdfReader # import google.generativeai as genai # # Configure the Generative AI API # genai.configure(api_key="AIzaSyDXSXEFj4qLZNgxUY8x9F-ucXaOabWsBaQ") # # Prompt template # input_prompt = """ # Analyze the following resume and job description. Generate a technical mock test with 30 questions based on the skills from the resume and the requirements from the job description. # Resume: # {resume_text} # Job Description: # {jd_text} # """ # # Function to extract text from PDF # def input_pdf_text(file): # try: # pdf = PdfReader(file) # text = "".join(page.extract_text() for page in pdf.pages) # return text # except Exception as e: # return f"Error extracting text: {e}" # # Function to generate content using Generative AI # def get_gemini_response(prompt): # model = genai.GenerativeModel('gemini-pro') # response = model.generate_content(prompt) # return response # # Function to calculate score # def calculate_score(correct_answers, user_answers): # score = 0 # weights = {"Beginner": 1, "Competent": 2, "Intermediate": 3, "Expert": 4} # for idx, correct_answer in correct_answers.items(): # level = correct_answer['level'] # weight = weights.get(level, 0) # if user_answers.get(idx) == correct_answer['answer']: # score += weight # return score # # Streamlit UI # st.title("Technical Mock Test Generator") # st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.") # resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"]) # jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"]) # if resume_file and jd_file: # resume_text = input_pdf_text(resume_file) # jd_text = input_pdf_text(jd_file) # if "Error" in resume_text or "Error" in jd_text: # st.error("Error reading one or both files. Please check the files and try again.") # else: # prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text) # st.write("Generating questions...") # try: # response = get_gemini_response(prompt) # questions_content = response.candidates[0].content # st.success("Mock test generated successfully!") # # Example structured response (to be parsed from AI response) # questions = [f"Question {i+1}: {q}" for i, q in enumerate([ # "What is Python?", "Define a variable.", "What is a list?", # "Explain list comprehensions.", "What is a dictionary?", "Explain inheritance in OOP.", # "Explain threading in Python.", "How does garbage collection work?", "What is a decorator?", # "Explain metaclasses.", "What is memory management in Python?", "Discuss GIL in Python.", # # Add more questions to make 30 # ])] # correct_answers = { # idx: {"level": "Beginner", "answer": "Correct Answer"} for idx in range(30) # } # # Display questions and input fields # user_answers = {} # for idx, question in enumerate(questions): # st.write(question) # user_answers[idx] = st.text_input(f"Your answer for Question {idx + 1}:", key=f"q_{idx}") # if st.button("Submit Answers"): # score = calculate_score(correct_answers, user_answers) # st.write(f"Your score: {score}") # # Reveal correct answers # st.subheader("Correct Answers") # for idx, correct_answer in correct_answers.items(): # st.write(f"{idx + 1}. {correct_answer['answer']}") # except Exception as e: # st.error(f"Error generating questions: {e}")