Spaces:
Sleeping
Sleeping
File size: 1,140 Bytes
7e4deac 0a478f6 7e4deac 81578be 0a478f6 81578be 0a478f6 7e4deac 81578be 7e4deac 81578be 7e4deac 81578be 0a478f6 e913fd2 0a478f6 e913fd2 ad4e39f e913fd2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import os
from dotenv import load_dotenv
from transformers import pipeline, AutoTokenizer
load_dotenv()
# Load a larger Hugging Face model
model_name = "EleutherAI/gpt-neo-2.7B"
generator = pipeline("text-generation", model=model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def modelFeedback(ats_score, resume_data):
input_prompt = f"""
You are now an ATS Score analyzer and given ATS Score is {int(ats_score * 100)}%.
Your task is to provide feedback to the user based on the ATS score.
Print ATS score first. Mention where the resume is good and where the resume lacks.
Talk about each section of the user's resume and talk about good and bad points of it.
Resume Data: {resume_data}
"""
# Tokenize the input to check its length
input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
input_length = input_ids.shape[1]
print(f"Input length: {input_length}")
# Generate response with max_new_tokens instead of max_length
response = generator(input_prompt, max_new_tokens=150, num_return_sequences=1)
return response[0]['generated_text'] |