Spaces:
Running
Running
File size: 3,787 Bytes
b3f0cb9 0dc6a26 32bfdde fd5cacf 0dc6a26 ef54515 0dc6a26 fd5cacf 0dc6a26 5f687f6 ef54515 0dc6a26 ef54515 0dc6a26 ef54515 b515f43 0dc6a26 5f687f6 ef54515 5f687f6 0dc6a26 5f687f6 793b79e 5f687f6 ef54515 10f166b 0dc6a26 ef54515 10f166b 78dd3e4 10f166b 78dd3e4 10f166b 786f03e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import os
import gradio as gr
from huggingface_hub import InferenceClient
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
model_name = "meta-llama/Llama-3.2-1B"
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
if not hf_token:
logger.error("HUGGINGFACE_TOKEN environment variable is not set")
raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
client = InferenceClient(model=model_name, token=hf_token)
def generate_text(prompt):
try:
logger.info(f"Attempting to generate text for prompt: {prompt[:50]}...")
response = client.text_generation(
prompt,
max_new_tokens=500,
temperature=0.7,
top_k=50,
top_p=0.95,
do_sample=True
)
logger.info(f"Generated text: {response[:100]}...")
return response
except Exception as e:
logger.error(f"Error in generate_text: {type(e).__name__}: {str(e)}")
return f"An error occurred: {type(e).__name__}: {str(e)}"
def generate_email(industry, recipient_role, company_details):
try:
prompt = f"""Task: Generate a professional cold outreach email.
Context:
- Industry: {industry}
- Recipient Role: {recipient_role}
- Company Details: {company_details}
Instructions:
1. Create a catchy subject line related to the industry and recipient role.
2. Write a personalized greeting.
3. Introduce yourself and your company briefly.
4. Explain how your company can benefit the recipient, using specific details from the company information.
5. Suggest a meeting or call to discuss further.
6. Thank the recipient and provide your contact information.
7. Use a professional closing.
Now, write the email following these instructions. Be creative and specific, don't use placeholder text:
"""
generated_text = generate_text(prompt)
# Remove any remaining prompt text if present
email_content = generated_text.split("Now, write the email following these instructions.")[-1].strip()
logger.info(f"Generated email for {industry}, {recipient_role}")
return email_content
except Exception as e:
logger.error(f"Error in generate_email: {type(e).__name__}: {str(e)}")
return "I apologize, but an unexpected error occurred. Please try again later or contact support."
def test_model_connection():
try:
test_prompt = "Write a short paragraph about the importance of AI in modern business:"
response = generate_text(test_prompt)
logger.info(f"Test model connection successful. Response: {response}")
return "Model connection test successful. Response: " + response
except Exception as e:
logger.error(f"Test model connection failed: {type(e).__name__}: {str(e)}")
return f"Model connection test failed: {type(e).__name__}: {str(e)}"
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# EmailGenie: AI-Powered Cold Email Generator")
with gr.Tab("Generate Email"):
industry = gr.Textbox(label="Industry")
recipient_role = gr.Textbox(label="Recipient Role")
company_details = gr.Textbox(label="Company/Personal Details", lines=5)
generate_button = gr.Button("Generate Email")
output = gr.Textbox(label="Generated Email", lines=10)
generate_button.click(generate_email, inputs=[industry, recipient_role, company_details], outputs=output)
with gr.Tab("Test Connection"):
test_button = gr.Button("Test Model Connection")
test_output = gr.Textbox(label="Connection Test Result", lines=5)
test_button.click(test_model_connection, inputs=None, outputs=test_output)
demo.launch() |