import streamlit as st from engine import AdvancedPromptOptimizer from llm_optimizer import optimize_with_llm, PERSONAS from dotenv import load_dotenv import os load_dotenv() cost_model = { "GPT-4": (0.01, 0.03), "Claude Opus": (0.015, 0.075), "Claude Sonnet": (0.003, 0.015), "LLaMA 2": (0.012, 0.04), "Custom": (None, None), } def format_cost(tokens, cost_per_k): return f"${tokens * cost_per_k / 1000:.4f}" def main(): st.set_page_config( layout="wide", page_title="PromptCraft - AI Prompt Optimizer", page_icon="🚀", initial_sidebar_state="expanded" ) # Custom CSS for enhanced styling st.markdown(""" """, unsafe_allow_html=True) # Header Section st.markdown("""

🚀 PromptCraft AI

✨ Optimize Your AI Prompts, Save Money & Time ✨

Transform verbose prompts into efficient, cost-effective versions without losing meaning

""", unsafe_allow_html=True) col1, col2 = st.columns([0.65, 0.35], gap="large") with col1: st.markdown("""

⚙️ Configuration

""", unsafe_allow_html=True) st.markdown("**💰 LLM Cost Settings**") model = st.selectbox("Select LLM Model", list(cost_model.keys())) if model == "Custom": input_cost = st.number_input("Input Cost ($/1K tokens)", 0.01, 1.0, 0.03) output_cost = st.number_input("Output Cost ($/1K tokens)", 0.01, 1.0, 0.06) else: input_cost, output_cost = cost_model[model] st.markdown("**🤖 Optimization Model**") # Create columns for the optimizer section opt_col1, opt_col2 = st.columns([1, 1]) with opt_col1: optimizer_model = st.selectbox("Choose Optimizer", ["spaCy + Lemminflect", "GPT-5"]) persona = "Default" api_key_input = "" tavily_api_key_input = "" if optimizer_model == "GPT-5": with opt_col2: persona = st.selectbox("Choose Persona", list(PERSONAS.keys())) # API Keys in the same row api_col1, api_col2 = st.columns([1, 1]) with api_col1: api_key_input = st.text_input("AIMLAPI API Key (optional)", type="password", help="If you don't provide a key, the one in your .env file will be used.") with api_col2: tavily_api_key_input = st.text_input("Tavily API Key (optional)", type="password", help="If you don't provide a key, the one in your .env file will be used.") elif optimizer_model == "spaCy + Lemminflect": with opt_col2: aggressiveness = st.slider( "Optimization Level", 0.0, 1.0, 0.7, help="Higher = more aggressive shortening", ) else: aggressiveness = 1.0 st.markdown("**📝 Your Prompt**") prompt = st.text_area( "Original Prompt", height=200, placeholder="✨ Paste your AI prompt here and watch the magic happen...\n\nExample: 'Please analyze this data very carefully and provide a comprehensive detailed report with all the advantages and disadvantages'", help="Enter the prompt you want to optimize. The optimizer will reduce token count while preserving meaning." ) col_btn1, col_btn2, col_btn3 = st.columns([1, 2, 1]) with col_btn2: optimize_clicked = st.button("🚀 Optimize My Prompt", type="primary", use_container_width=True) if optimize_clicked: if optimizer_model == "spaCy + Lemminflect": optimizer = AdvancedPromptOptimizer() optimized, orig_toks, new_toks = optimizer.optimize(prompt, aggressiveness) else: # GPT-5 api_key = api_key_input if api_key_input else os.getenv("AIMLAPI_API_KEY") tavily_api_key = tavily_api_key_input if tavily_api_key_input else os.getenv("TAVILY_API_KEY") if not api_key or api_key == "": st.error("Please set your AIMLAPI_API_KEY in the .env file or enter it above.") return optimized = optimize_with_llm(prompt, api_key, persona, tavily_api_key=tavily_api_key) # We need to calculate the tokens for the optimized prompt # This is a simplification, as we don't have the exact tokenizer for gpt-5 # We will use tiktoken as an approximation import tiktoken tokenizer = tiktoken.get_encoding("cl100k_base") orig_toks = len(tokenizer.encode(prompt)) new_toks = len(tokenizer.encode(optimized)) if orig_toks == 0: st.warning("Please enter a valid prompt.") return # Calculate savings token_savings = orig_toks - new_toks percent_savings = (token_savings / orig_toks) * 100 if orig_toks > 0 else 0 input_cost_savings = token_savings * input_cost / 1000 output_cost_savings = token_savings * output_cost / 1000 total_cost_savings = input_cost_savings + output_cost_savings with col1: st.markdown("""

✨ Optimized Prompt

""", unsafe_allow_html=True) st.code(optimized, language="text") # Enhanced download button col_dl1, col_dl2, col_dl3 = st.columns([1, 2, 1]) with col_dl2: st.download_button( "📥 Download Optimized Prompt", optimized, file_name="optimized_prompt.txt", use_container_width=True ) with col2: st.markdown("""

📊 Optimization Results

""", unsafe_allow_html=True) # Token Savings Card st.markdown( f"""

🎯 Token Reduction

{percent_savings:.1f}%
{token_savings} tokens saved
""", unsafe_allow_html=True, ) # Cost Savings Card if orig_toks > 0 and (input_cost + output_cost) > 0: cost_percent_savings = ( total_cost_savings / (orig_toks * (input_cost + output_cost) / 1000) * 100 ) else: cost_percent_savings = 0 st.markdown( f"""

💸 Cost Reduction

{cost_percent_savings:.1f}%
${total_cost_savings:.4f} saved per call
""", unsafe_allow_html=True, ) # Visual Progress Indicator progress_value = min(1.0, max(0.0, percent_savings / 100)) st.markdown("**📈 Optimization Progress**") st.progress(progress_value) st.markdown(f"

Prompt reduced to {100-percent_savings:.1f}% of original size

", unsafe_allow_html=True) # Detailed Breakdown with st.expander("📊 Cost Analysis"): col_a, col_b = st.columns(2) with col_a: st.markdown( f"**Input Cost**\n\n" f"Original: {format_cost(orig_toks, input_cost)}\n\n" f"Optimized: {format_cost(new_toks, input_cost)}\n\n" f"Saved: {format_cost(token_savings, input_cost)}" ) with col_b: st.markdown( f"**Output Cost**\n\n" f"Original: {format_cost(orig_toks, output_cost)}\n\n" f"Optimized: {format_cost(new_toks, output_cost)}\n\n" f"Saved: {format_cost(token_savings, output_cost)}" ) # Optimization report with st.expander("🔍 Applied Optimizations"): st.markdown("### Common Transformations") st.json( { "Removed fillers": "e.g., 'very', 'carefully'", "Shortened phrases": "'advantages/disadvantages' → 'pros/cons'", "Structural changes": "Simplified JSON formatting", "Verb optimization": "Converted to base forms", "Preposition removal": "Dropped non-essential connectors", } ) st.markdown("### Share Your Savings") st.code( f"Saved {token_savings} tokens (${total_cost_savings:.4f}) with #PromptOptimizer\n" f"Optimization level: {aggressiveness*100:.0f}%" ) if __name__ == "__main__": main()