Spaces:
Running
Running
Commit
·
0c0f923
1
Parent(s):
edf8f4b
Resume Hosting :)
Browse files- .gitattributes +35 -0
- README.md +18 -0
- app.py +68 -0
- main.py +21 -0
- prompts.yaml +22 -0
- requirements.txt +5 -0
- tools/LinkedInScraperTool.py +77 -0
- tools/resumescraper.py +49 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Resume Roaster
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.15.0
|
8 |
+
app_file: main.py
|
9 |
+
pinned: false
|
10 |
+
tags:
|
11 |
+
- smolagents
|
12 |
+
- agent
|
13 |
+
- smolagent
|
14 |
+
- tool
|
15 |
+
- agent-course
|
16 |
+
---
|
17 |
+
|
18 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
import os
|
3 |
+
from smolagents import CodeAgent, HfApiModel
|
4 |
+
from smolagents.tools import Tool
|
5 |
+
import yaml
|
6 |
+
|
7 |
+
# Load environment variables from .env in the root
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
# Retrieve the Hugging Face token from the environment
|
11 |
+
hf_token = os.getenv("HF_TOKEN")
|
12 |
+
|
13 |
+
class FinalAnswerTool(Tool):
|
14 |
+
name = "final_answer"
|
15 |
+
description = "Use this tool to provide your final answer"
|
16 |
+
inputs = {
|
17 |
+
"answer": {
|
18 |
+
"type": "string",
|
19 |
+
"description": "The final answer to the problem"
|
20 |
+
}
|
21 |
+
}
|
22 |
+
output_type = "string"
|
23 |
+
|
24 |
+
def forward(self, answer: str) -> str:
|
25 |
+
return answer
|
26 |
+
|
27 |
+
class LinkedInScraperTool(Tool):
|
28 |
+
name = "linkedin_scraper"
|
29 |
+
description = "Scrapes LinkedIn profiles to extract professional information"
|
30 |
+
inputs = {
|
31 |
+
"linkedin_url": {
|
32 |
+
"type": "string",
|
33 |
+
"description": "The URL of the LinkedIn profile"
|
34 |
+
}
|
35 |
+
}
|
36 |
+
output_type = "object"
|
37 |
+
|
38 |
+
def forward(self, linkedin_url: str):
|
39 |
+
# Dummy implementation; replace with actual scraping logic
|
40 |
+
return {
|
41 |
+
"experience": "10 years in industry",
|
42 |
+
"skills": "Python, AI",
|
43 |
+
"description": "Experienced professional with a robust background in technology."
|
44 |
+
}
|
45 |
+
|
46 |
+
def create_agent():
|
47 |
+
final_answer = FinalAnswerTool()
|
48 |
+
linkedin_scraper = LinkedInScraperTool()
|
49 |
+
|
50 |
+
model = HfApiModel(
|
51 |
+
max_tokens=2096,
|
52 |
+
temperature=0.5,
|
53 |
+
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
54 |
+
custom_role_conversions=None,
|
55 |
+
)
|
56 |
+
|
57 |
+
with open("prompts.yaml", 'r') as stream:
|
58 |
+
prompt_templates = yaml.safe_load(stream)
|
59 |
+
|
60 |
+
agent = CodeAgent(
|
61 |
+
model=model,
|
62 |
+
tools=[linkedin_scraper, final_answer],
|
63 |
+
max_steps=6,
|
64 |
+
verbosity_level=1,
|
65 |
+
prompt_templates=prompt_templates
|
66 |
+
)
|
67 |
+
|
68 |
+
return agent
|
main.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from app import create_agent
|
3 |
+
|
4 |
+
def roast_profile(linkedin_url):
|
5 |
+
agent = create_agent()
|
6 |
+
response = agent.run(
|
7 |
+
f"Scrape this LinkedIn profile: {linkedin_url} and create a humorous but not mean-spirited roast based on their experience, skills, and description. Keep it professional and avoid personal attacks."
|
8 |
+
)
|
9 |
+
return response
|
10 |
+
|
11 |
+
demo = gr.Interface(
|
12 |
+
fn=roast_profile,
|
13 |
+
inputs=gr.Textbox(label="LinkedIn Profile URL"),
|
14 |
+
outputs=gr.Textbox(label="Roast Result"),
|
15 |
+
title="LinkedIn Profile Roaster",
|
16 |
+
description="Enter a LinkedIn profile URL and get a humorous professional roast!",
|
17 |
+
examples=[["https://www.linkedin.com/in/example-profile"]]
|
18 |
+
)
|
19 |
+
|
20 |
+
if __name__ == "__main__":
|
21 |
+
demo.launch(share=True)
|
prompts.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
system_prompt: |
|
2 |
+
You are a witty professional roaster who analyzes LinkedIn profiles.
|
3 |
+
Your job is to create humorous but not mean-spirited roasts based on people's professional experiences.
|
4 |
+
Focus on gentle teasing about common LinkedIn behaviors like:
|
5 |
+
- Overuse of buzzwords
|
6 |
+
- Lengthy job titles
|
7 |
+
- Humble brags
|
8 |
+
- Excessive use of emojis
|
9 |
+
- Connection collecting
|
10 |
+
Avoid personal attacks or inappropriate content.
|
11 |
+
|
12 |
+
task_prompt: |
|
13 |
+
Using the provided LinkedIn profile information, create a humorous roast that:
|
14 |
+
1. References specific details from their profile
|
15 |
+
2. Keeps the tone light and professional
|
16 |
+
3. Focuses on common LinkedIn behaviors and professional quirks
|
17 |
+
4. Avoids mean-spirited or personal attacks
|
18 |
+
5. Would be appropriate to share in a professional setting
|
19 |
+
|
20 |
+
final_answer:
|
21 |
+
pre_messages: "Here is your final roast:"
|
22 |
+
post_messages: ""
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
PyPDF2
|
3 |
+
PyYAML
|
4 |
+
smolagents
|
5 |
+
python-dotenv
|
tools/LinkedInScraperTool.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from bs4 import BeautifulSoup
|
2 |
+
import requests
|
3 |
+
from typing import Dict
|
4 |
+
from smolagents.tools import Tool
|
5 |
+
|
6 |
+
class LinkedInScraperTool(Tool):
|
7 |
+
name = "linkedin_scraper"
|
8 |
+
description = "Scrapes LinkedIn profiles to extract professional information"
|
9 |
+
inputs = {"linkedin_url": str}
|
10 |
+
outputs = dict
|
11 |
+
|
12 |
+
def __call__(self, linkedin_url: str) -> dict:
|
13 |
+
try:
|
14 |
+
# Add headers to mimic a browser request
|
15 |
+
headers = {
|
16 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
17 |
+
}
|
18 |
+
|
19 |
+
response = requests.get(linkedin_url, headers=headers)
|
20 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
21 |
+
|
22 |
+
# Extract profile information
|
23 |
+
profile_data = {
|
24 |
+
'name': self._extract_name(soup),
|
25 |
+
'headline': self._extract_headline(soup),
|
26 |
+
'about': self._extract_about(soup),
|
27 |
+
'experience': self._extract_experience(soup),
|
28 |
+
'education': self._extract_education(soup),
|
29 |
+
'skills': self._extract_skills(soup)
|
30 |
+
}
|
31 |
+
|
32 |
+
return profile_data
|
33 |
+
|
34 |
+
except Exception as e:
|
35 |
+
return {"error": f"Failed to scrape profile: {str(e)}"}
|
36 |
+
|
37 |
+
def _extract_name(self, soup):
|
38 |
+
name_element = soup.find('h1', {'class': 'text-heading-xlarge'})
|
39 |
+
return name_element.text.strip() if name_element else "Name not found"
|
40 |
+
|
41 |
+
def _extract_headline(self, soup):
|
42 |
+
headline_element = soup.find('div', {'class': 'text-body-medium'})
|
43 |
+
return headline_element.text.strip() if headline_element else "Headline not found"
|
44 |
+
|
45 |
+
def _extract_about(self, soup):
|
46 |
+
about_element = soup.find('div', {'class': 'pv-about-section'})
|
47 |
+
return about_element.text.strip() if about_element else "About section not found"
|
48 |
+
|
49 |
+
def _extract_experience(self, soup):
|
50 |
+
experience_elements = soup.find_all('li', {'class': 'experience-item'})
|
51 |
+
experience = []
|
52 |
+
for exp in experience_elements:
|
53 |
+
title_element = exp.find('h3', {'class': 'experience-title'})
|
54 |
+
company_element = exp.find('p', {'class': 'experience-company'})
|
55 |
+
if title_element and company_element:
|
56 |
+
experience.append({
|
57 |
+
'title': title_element.text.strip(),
|
58 |
+
'company': company_element.text.strip()
|
59 |
+
})
|
60 |
+
return experience if experience else ["Experience not found"]
|
61 |
+
|
62 |
+
def _extract_education(self, soup):
|
63 |
+
education_elements = soup.find_all('li', {'class': 'education-item'})
|
64 |
+
education = []
|
65 |
+
for edu in education_elements:
|
66 |
+
school_element = edu.find('h3', {'class': 'education-school'})
|
67 |
+
degree_element = edu.find('p', {'class': 'education-degree'})
|
68 |
+
if school_element and degree_element:
|
69 |
+
education.append({
|
70 |
+
'school': school_element.text.strip(),
|
71 |
+
'degree': degree_element.text.strip()
|
72 |
+
})
|
73 |
+
return education if education else ["Education not found"]
|
74 |
+
|
75 |
+
def _extract_skills(self, soup):
|
76 |
+
skills_elements = soup.find_all('span', {'class': 'skill-name'})
|
77 |
+
return [skill.text.strip() for skill in skills_elements] if skills_elements else ["Skills not found"]
|
tools/resumescraper.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from smolagents.tools import Tool
|
2 |
+
|
3 |
+
class ResumeScraperTool(Tool):
|
4 |
+
name = "resume_scraper"
|
5 |
+
description = (
|
6 |
+
"Parses a resume (in plain text) to extract key sections such as Summary, "
|
7 |
+
"Experience, Education, and Skills. This tool expects the resume text to include "
|
8 |
+
"section headers like 'Summary:', 'Experience:', 'Education:', and 'Skills:'."
|
9 |
+
)
|
10 |
+
inputs = {
|
11 |
+
"resume_text": {
|
12 |
+
"type": "string",
|
13 |
+
"description": "The plain text of the resume"
|
14 |
+
}
|
15 |
+
}
|
16 |
+
output_type = "object"
|
17 |
+
|
18 |
+
def forward(self, resume_text: str) -> dict:
|
19 |
+
# Basic extraction using simple markers; in a real-world case, you might want to use NLP.
|
20 |
+
sections = {
|
21 |
+
"summary": "Summary not found",
|
22 |
+
"experience": "Experience not found",
|
23 |
+
"education": "Education not found",
|
24 |
+
"skills": "Skills not found"
|
25 |
+
}
|
26 |
+
lower_text = resume_text.lower()
|
27 |
+
|
28 |
+
if "summary:" in lower_text:
|
29 |
+
start = lower_text.index("summary:")
|
30 |
+
# Assume the section ends at the next double newline or end of text
|
31 |
+
end = lower_text.find("\n\n", start)
|
32 |
+
sections["summary"] = resume_text[start + len("summary:"): end].strip() if end != -1 else resume_text[start + len("summary:"):].strip()
|
33 |
+
|
34 |
+
if "experience:" in lower_text:
|
35 |
+
start = lower_text.index("experience:")
|
36 |
+
end = lower_text.find("\n\n", start)
|
37 |
+
sections["experience"] = resume_text[start + len("experience:"): end].strip() if end != -1 else resume_text[start + len("experience:"):].strip()
|
38 |
+
|
39 |
+
if "education:" in lower_text:
|
40 |
+
start = lower_text.index("education:")
|
41 |
+
end = lower_text.find("\n\n", start)
|
42 |
+
sections["education"] = resume_text[start + len("education:"): end].strip() if end != -1 else resume_text[start + len("education:"):].strip()
|
43 |
+
|
44 |
+
if "skills:" in lower_text:
|
45 |
+
start = lower_text.index("skills:")
|
46 |
+
end = lower_text.find("\n\n", start)
|
47 |
+
sections["skills"] = resume_text[start + len("skills:"): end].strip() if end != -1 else resume_text[start + len("skills:"):].strip()
|
48 |
+
|
49 |
+
return sections
|