NvkAnirudh commited on
Commit
b3670e0
·
1 Parent(s): 5ca5e6c

Finished the initial concept of the project

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
app/streamlit_app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sys
3
+ import os
4
+ import tempfile
5
+ import uuid
6
+ # Add the src directory to the Python path
7
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')))
8
+ from src.resume_analyzer import optimize_resume
9
+ from src.pdf_handler import read_pdf
10
+ from reportlab.pdfgen import canvas
11
+ from reportlab.lib.pagesizes import letter
12
+ from io import BytesIO
13
+
14
+ st.set_page_config(page_title="Resume Optimizer", layout="wide")
15
+
16
+ # Create a directory to store uploaded files
17
+ UPLOAD_DIR = os.path.join(os.path.dirname(__file__), '..', 'data', 'sample_resumes')
18
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
19
+
20
+ def create_pdf(text):
21
+ buffer = BytesIO()
22
+ c = canvas.Canvas(buffer, pagesize=letter)
23
+ width, height = letter
24
+ y = height - 50 # Start near the top of the page
25
+ for line in text.split('\n'):
26
+ if y < 50: # If we're near the bottom of the page
27
+ c.showPage() # Start a new page
28
+ y = height - 50 # Reset y to the top of the new page
29
+ c.drawString(50, y, line)
30
+ y -= 15 # Move down for the next line
31
+ c.save()
32
+ buffer.seek(0)
33
+ return buffer
34
+
35
+ def save_uploaded_file(uploaded_file):
36
+ """Save the uploaded file and return the file path."""
37
+ try:
38
+ file_path = os.path.join(UPLOAD_DIR, uploaded_file.name)
39
+ with open(file_path, 'wb') as f:
40
+ f.write(uploaded_file.getvalue())
41
+ # file_extension = os.path.splitext(uploaded_file.name)[1]
42
+ # file_name = f"{uuid.uuid4()}{file_extension}"
43
+ # file_path = os.path.join(UPLOAD_DIR, file_name)
44
+ # with open(file_path, "wb") as f:
45
+ # f.write(uploaded_file.getvalue())
46
+ return file_path
47
+ except Exception as e:
48
+ st.error(f"An error occurred while saving the file: {str(e)}")
49
+ return None
50
+
51
+ def main():
52
+ st.title("Resume Optimizer")
53
+
54
+ # File upload and manual entry option
55
+ upload_option = st.radio("Choose how to input your resume:", ("Upload PDF", "Enter manually"))
56
+
57
+ resume_content = None
58
+ if upload_option == "Upload PDF":
59
+ resume_file = st.file_uploader("Upload your resume (PDF)", type="pdf")
60
+ if resume_file is not None:
61
+ file_path = save_uploaded_file(resume_file)
62
+ if file_path:
63
+ resume_content = read_pdf(file_path)
64
+ if resume_content.startswith("An error occurred"):
65
+ st.error(resume_content)
66
+ resume_content = None
67
+ else:
68
+ st.success("PDF successfully read!")
69
+ st.text_area("Extracted Content", resume_content, height=200)
70
+ # Clean up: remove the file after reading
71
+ os.remove(file_path)
72
+ else:
73
+ resume_content = st.text_area("Enter your resume text", height=300)
74
+
75
+ job_description = st.text_area("Enter the job description")
76
+
77
+ # Always display buttons
78
+ col1, col2, col3 = st.columns(3)
79
+
80
+ with col1:
81
+ optimize_button = st.button("Optimize Resume")
82
+
83
+ with col2:
84
+ download_pdf_button = st.button("Download Optimized Resume (PDF)")
85
+
86
+ with col3:
87
+ download_txt_button = st.button("Download Optimized Resume (TXT)")
88
+
89
+ # Initialize session state for storing results
90
+ if 'analysis_dict' not in st.session_state:
91
+ st.session_state.analysis_dict = None
92
+ if 'updated_resume' not in st.session_state:
93
+ st.session_state.updated_resume = None
94
+
95
+ if optimize_button and resume_content and job_description:
96
+ with st.spinner("Analyzing and optimizing your resume..."):
97
+ st.session_state.analysis_dict, st.session_state.updated_resume = optimize_resume(resume_content, job_description)
98
+
99
+ if st.session_state.analysis_dict and st.session_state.updated_resume:
100
+ st.success("Analysis complete!")
101
+ # Display analysis results
102
+ for key, value in st.session_state.analysis_dict.items():
103
+ st.subheader(key.replace('_', ' ').title())
104
+ if isinstance(value, list):
105
+ for item in value:
106
+ st.write(f"- {item}")
107
+ else:
108
+ st.write(value)
109
+
110
+ # Display updated resume
111
+ st.subheader("Updated Resume")
112
+ st.text_area("", value=st.session_state.updated_resume, height=300)
113
+ else:
114
+ st.error("An error occurred during analysis. Please try again.")
115
+
116
+ if download_pdf_button and st.session_state.updated_resume:
117
+ pdf_buffer = create_pdf(st.session_state.updated_resume)
118
+ st.download_button(
119
+ label="Download Optimized Resume (PDF)",
120
+ data=pdf_buffer,
121
+ file_name="optimized_resume.pdf",
122
+ mime="application/pdf"
123
+ )
124
+
125
+ if download_txt_button and st.session_state.updated_resume:
126
+ st.download_button(
127
+ label="Download Optimized Resume (TXT)",
128
+ data=st.session_state.updated_resume,
129
+ file_name="optimized_resume.txt",
130
+ mime="text/plain"
131
+ )
132
+
133
+ if __name__ == "__main__":
134
+ main()
data/.DS_Store ADDED
Binary file (6.15 kB). View file
 
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- anthropic==0.3.0
2
  pypdf==5.0.0
3
- -e .
 
 
 
1
+ anthropic==0.34.2
2
  pypdf==5.0.0
3
+ streamlit
4
+ reportlab
5
+ # -e .
sample_pdf_reader.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pdfplumber
3
+ import os
4
+
5
+ # Define the folder path to save uploaded files
6
+ UPLOAD_FOLDER = 'uploads'
7
+
8
+ def read_pdf(file_path):
9
+ with pdfplumber.open(file_path) as pdf:
10
+ text = ''
11
+ for page in pdf.pages:
12
+ text += page.extract_text()
13
+ return text
14
+
15
+ def main():
16
+ st.title("PDF Text Extractor")
17
+ st.markdown("Upload a PDF file to extract its text.")
18
+
19
+ # Create upload folder if it doesn't exist
20
+ if not os.path.exists(UPLOAD_FOLDER):
21
+ os.makedirs(UPLOAD_FOLDER)
22
+
23
+ # Create file uploader
24
+ uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
25
+
26
+ if uploaded_file:
27
+ # Save uploaded file to the upload folder
28
+ file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
29
+ with open(file_path, 'wb') as f:
30
+ f.write(uploaded_file.getvalue())
31
+
32
+ # Read PDF text
33
+ text = read_pdf(file_path)
34
+
35
+ # Display PDF text
36
+ st.write("Extracted Text:")
37
+ st.markdown(text)
38
+
39
+ # Optional: Delete the uploaded file after processing
40
+ # os.remove(file_path)
41
+
42
+ if __name__ == "__main__":
43
+ main()
44
+
45
+
46
+ # import streamlit as st
47
+ # import pdfplumber
48
+ # import io
49
+
50
+ # def read_pdf(file_buffer):
51
+ # with pdfplumber.open(io.BytesIO(file_buffer)) as pdf:
52
+ # text = ''
53
+ # for page in pdf.pages:
54
+ # text += page.extract_text()
55
+ # return text
56
+
57
+ # def main():
58
+ # st.title("PDF Text Extractor")
59
+ # st.markdown("Upload a PDF file to extract its text.")
60
+
61
+ # # Create file uploader
62
+ # uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
63
+
64
+ # if uploaded_file:
65
+ # # Read PDF text
66
+ # text = read_pdf(uploaded_file.getvalue())
67
+
68
+ # # Display PDF text
69
+ # st.write("Extracted Text:")
70
+ # st.markdown(text)
71
+
72
+ # if __name__ == "__main__":
73
+ # main()
src/api_client.py CHANGED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import anthropic
2
+ from config import ANTHROPIC_API_KEY
3
+
4
+ client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
5
+
6
+ def call_claude_api(prompt):
7
+ response = client.messages.create(
8
+ model='claude-3-5-sonnet-20240620',
9
+ max_tokens=3000,
10
+ temperature=0,
11
+ system="You are a resume optimization expert. Optimize the attached resume to achieve an ATS score of 85% or higher for the target job description. Provide a revised resume, predicted ATS score, and suggestions for improvement.",
12
+ messages=[
13
+ {"role": "user", "content": prompt}
14
+ ]
15
+ )
16
+
17
+ return response.content
src/config.py CHANGED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import os
2
+
3
+ ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')
src/main.py CHANGED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from resume_analyzer import optimize_resume
2
+ from pdf_handler import read_pdf
3
+ from config import ANTHROPIC_API_KEY
4
+ import pathlib
5
+ from pathlib import Path
6
+
7
+ # Read resume
8
+ resume_file_path = pathlib.Path(__file__).parent.parent / 'data' / 'sample_resumes' / 'Anirudh_Nuti_DA.pdf'
9
+
10
+ print(pathlib.Path(__file__).parent.parent / 'data' / 'sample_resumes')
src/pdf_handler.py CHANGED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import pdfplumber
4
+ from pypdf import PdfReader
5
+
6
+ def read_pdf(file_path):
7
+ with pdfplumber.open(file_path) as pdf:
8
+ text = ''
9
+ for page in pdf.pages:
10
+ text += page.extract_text()
11
+ return text
12
+
13
+ # with pdfplumber.open(io.BytesIO(file_buffer)) as pdf:
14
+ # text = ''
15
+ # for page in pdf.pages:
16
+ # text += page.extract_text()
17
+ # return text
src/resume_analyzer.py CHANGED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from api_client import call_claude_api
2
+ from text_processing import text_processing
3
+ import json
4
+
5
+ def optimize_resume(resume_content, job_description):
6
+ prompt = f"""
7
+ I'm actively applying for jobs and want to optimize my resume for Applicant Tracking Systems (ATS) to increase my chances of getting interviews. My goal is to achieve an ATS score of 85% or higher. Please help me with the following:
8
+
9
+ 1. Analyze my current resume and the target job description provided below.
10
+ 2. Identify any areas where keywords, skills, or formatting could be improved to better align with the job requirements and ATS algorithms.
11
+ 3. Revise my resume based on your analysis. Incorporate relevant keywords, highlight key accomplishments with strong action verbs, and ensure the formatting is ATS-friendly.
12
+ 4. Offer additional suggestions for optimizing my resume content or layout, if applicable.
13
+ 5. Provide a predicted ATS score for the job description and the updated resume.
14
+ 6. Identify any projects in the resume that may not be relevant to this specific job application.
15
+ 7. Provide an updated version of the resume. Start the updated resume section with this heading 'Here's an updated version of your resume, optimized for the given job description:'
16
+
17
+ Please present your analysis and suggestions in a structured Python dictionary format, similar to the following:
18
+
19
+ resume_analysis = {{
20
+ "ats_score": 0,
21
+ "keyword_suggestions": [],
22
+ "formatting_suggestions": [],
23
+ "content_improvements": [],
24
+ "irrelevant_projects": []
25
+ }}
26
+
27
+ Current Resume: {resume_content}
28
+ Job Description: {job_description}
29
+ """
30
+
31
+ response = call_claude_api(prompt)
32
+ response = response[0].text
33
+ # print(response)
34
+
35
+ # Extract the dictionary and updated resume
36
+ try:
37
+ return text_processing(response)
38
+ except:
39
+ return None, None
src/text_processing.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import ast
3
+
4
+ def text_processing(text):
5
+ # Regular expressions to extract resume_analysis and the updated resume
6
+ resume_analysis_pattern = re.compile(r'resume_analysis\s*=\s*({.*?})', re.DOTALL)
7
+ updated_resume_pattern = re.compile(r"Here's an updated version of your resume, optimized for the given job description:\n\n(.*)", re.DOTALL)
8
+
9
+ # print(resume_analysis_pattern)
10
+
11
+ # Extract resume_analysis
12
+ resume_analysis_match = resume_analysis_pattern.search(text)
13
+ resume_analysis = resume_analysis_match.group(1) if resume_analysis_match else None
14
+
15
+ # Extract updated resume
16
+ updated_resume_match = updated_resume_pattern.search(text)
17
+ updated_resume = updated_resume_match.group(1) if updated_resume_match else None
18
+
19
+ # Convert the resume_analysis string to a Python dictionary
20
+ resume_analysis = ast.literal_eval(resume_analysis) if resume_analysis else None
21
+
22
+ return resume_analysis, updated_resume
uploads/Anirudh_Nuti_DA.pdf ADDED
Binary file (115 kB). View file