Spaces:
Sleeping
Sleeping
File size: 6,999 Bytes
41059d3 748b9f8 41059d3 e142dae 41059d3 b2e6968 41059d3 748b9f8 41059d3 b2e6968 41059d3 748b9f8 41059d3 22aecfa 41059d3 22aecfa 41059d3 22aecfa 41059d3 e142dae 41059d3 e6a96bc 41059d3 22aecfa 41059d3 f090183 41059d3 22aecfa 41059d3 22aecfa 41059d3 22aecfa 41059d3 22aecfa 41059d3 0a02473 41059d3 0a02473 41059d3 0a02473 41059d3 e142dae 41059d3 32c46a5 6bfd1da 41059d3 22aecfa e142dae 41059d3 e142dae 0a02473 748b9f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
from PyPDF2 import PdfReader
from markdownify import markdownify
import gradio as gr
import openai
# Persistent System Prompt
LOSSDOG_PROMPT = """
<LossDogFramework version="3.0">
<Identity>
<Description>
You are Loss Dog, a cutting-edge AI career advisor, resume analyzer, and builder. Your primary role is to:
- Read and analyze the user's resume thoroughly.
- Use the resume as a knowledge context for all interactions.
- Engage with the user by answering questions, identifying areas of improvement, and offering suggestions.
</Description>
</Identity>
<CoreDirectives>
<Mission>
Your mission is to provide actionable resume advice. Always leverage the uploaded resume to give feedback,
highlight strengths, and identify weaknesses.
</Mission>
</CoreDirectives>
</LossDogFramework>
"""
def extract_text_from_file(file_path: str, file_name: str) -> str:
"""Extract text from a PDF or TXT file."""
if file_name.endswith(".pdf"):
try:
pdf_reader = PdfReader(file_path)
text = "\n".join(page.extract_text() for page in pdf_reader.pages)
return text
except Exception as e:
return f"Error reading PDF file: {str(e)}"
elif file_name.endswith(".txt"):
try:
with open(file_path, "r") as f:
return f.read()
except Exception as e:
return f"Error reading text file: {str(e)}"
else:
return "Unsupported file format. Please upload a PDF or TXT file."
def convert_to_markdown(text: str) -> str:
"""Convert extracted file text to Markdown for neat display."""
return markdownify(text, heading_style="ATX")
def interact_with_lossdog(
user_message: str,
markdown_text: str,
api_key: str,
history: list
) -> list:
"""
Generates the assistant's response, always including the resume content as context
alongside the conversation history.
"""
try:
openai.api_key = api_key
# Validate existing history entries
validated_history = []
for msg in history:
if isinstance(msg, dict) and "role" in msg and "content" in msg:
validated_history.append({"role": msg["role"], "content": msg["content"]})
# Build the messages for OpenAI Chat
messages = [
{"role": "system", "content": LOSSDOG_PROMPT},
{"role": "system", "content": f"Resume Content:\n{markdown_text}"}
] + validated_history
# Add the new user message at the end
messages.append({"role": "user", "content": user_message})
# Create ChatCompletion
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=messages,
max_tokens=4000 # You can adjust this as needed
)
assistant_response = response.choices[0].message.content
# Update local (Gradio) history
validated_history.append({"role": "user", "content": user_message})
validated_history.append({"role": "assistant", "content": assistant_response})
return validated_history
except Exception as e:
# Append the error as an assistant message (for visibility)
history.append({"role": "assistant", "content": f"Error: {str(e)}"})
return history
def create_demo():
"""Build the Gradio app."""
with gr.Blocks(css="#resume-preview {height:300px; overflow-y:auto; border:1px solid #ccc; padding:10px;}") as demo:
gr.Markdown("""
# 🐕 LOSS Dog: AI-Powered Resume Advisor
**Steps**:
1. Upload your resume (PDF/TXT). It will appear in a scrollable box on the right.
2. Ask any questions or request feedback. LOSS Dog always references the uploaded resume.
3. Enjoy a back-and-forth conversation to refine your resume!
""")
# API Key
api_key = gr.Textbox(
label="OpenAI API Key",
placeholder="Enter your OpenAI API key...",
type="password"
)
# Layout
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.Chatbot(label="Chat with LOSS Dog", type="messages")
with gr.Column(scale=1):
markdown_preview = gr.Markdown(label="Resume Preview", elem_id="resume-preview")
# User Input
with gr.Row():
user_input = gr.Textbox(label="Your Message", lines=1)
send_button = gr.Button("Send 🐾")
# File Upload
with gr.Row():
upload = gr.File(label="Upload Your Resume (PDF or TXT)")
# States
history_state = gr.State([]) # Chat History
markdown_state = gr.State("") # Stored resume text in Markdown
# 1) File Upload Handler
def handle_upload(file, api_key):
"""
Extract text -> convert to Markdown -> display in the right pane.
We do NOT modify the chat history here; user can start fresh or continue.
"""
if not file:
return "No file uploaded.", gr.update(value=[])
text = extract_text_from_file(file.name, file.name)
if text.startswith("Error"):
# Show error in preview
return text, gr.update(value=[])
resume_md = convert_to_markdown(text)
# Keep the conversation? Up to you. We'll keep existing conversation.
return resume_md, gr.update(value=[])
# 2) Chat Message Handler
def handle_message(user_message, api_key, markdown_text, history):
"""
Called when the user sends a new message. We pass the stored resume + history.
"""
updated_history = interact_with_lossdog(user_message, markdown_text, api_key, history)
return updated_history, updated_history
# Link File Upload -> handle_upload
upload.change(
handle_upload,
inputs=[upload, api_key],
outputs=[markdown_preview, history_state]
)
# Link Send Button -> handle_message
send_button.click(
handle_message,
inputs=[user_input, api_key, markdown_state, history_state],
outputs=[chatbot, history_state]
)
# Any time the user uploads a file, also store the resume text in markdown_state
# so subsequent messages can see it.
def store_resume_in_state(markdown_content):
return markdown_content
# We'll create a small chain that ensures markdown_preview -> markdown_state
markdown_preview.change(
store_resume_in_state,
inputs=[markdown_preview],
outputs=[markdown_state]
)
return demo
if __name__ == "__main__":
demo = create_demo()
demo.launch() |