# app.py import gradio as gr from models import ( load_sentiment_model, load_summarization_model, load_translation_model, load_question_answering_model, load_text_generation_model, load_ner_model, load_text_classification_model # Removed: load_text_to_sql_model ) # Title and Description for your app TITLE = "🧠 Eternal Equation AI Processor (- = +)" DESCRIPTION = """ **The Eternal Equation: (- = +)** A conceptual framework made real. This tool demonstrates how simple interfaces can harness complex AI. **šŸŽÆ Purpose:** To make powerful AI models accessible, understandable, and usable for everyone through a clean, intuitive interface based on the universal pattern of Input -> Process -> Output. **⚔ Current Capabilities:** • **Sentiment Analysis:** Determine the emotional tone (Positive/Negative) of any text. • **Text Summarization:** Condense long articles, reports, or paragraphs into concise summaries. • **Translation:** Convert English text to French. • **Question Answering:** Get answers from context. • **Text Generation:** Create new text from prompts. • **Named Entity Recognition:** Identify people, organizations, locations, etc. • **Zero-Shot Classification:** Categorize text without pre-training. **āš ļø Important Limitations:** • **Summarization Length:** Works best with texts between **50 and 500 words**. Longer texts are automatically truncated. • **Processing Speed:** Hosted on free-tier CPU hardware. Processing may take 10-30 seconds. • **Beta Stage:** This is a live demo and proof-of-concept. Outputs may occasionally be imperfect. **šŸ”® The Future:** More modes for code, images, and audio are coming soon. """ # Load all models at startup (The Orchestra assembles) sentiment_pipeline = load_sentiment_model() summarize_pipeline = load_summarization_model() translate_pipeline = load_translation_model() qa_pipeline = load_question_answering_model() textgen_pipeline = load_text_generation_model() ner_pipeline = load_ner_model() classify_pipeline = load_text_classification_model() def process_text(input_text, mode, context=None, candidate_labels=None): """The Conductor: routes input to the correct specialist model.""" # Check for empty input FIRST if not input_text.strip(): return "āš ļø Please enter some text to process." # Handle Sentiment Analysis if mode == "Sentiment Analysis": try: result = sentiment_pipeline(input_text) return f"šŸŽÆ **Label:** {result[0]['label']}\n\nšŸ”® **Confidence:** {result[0]['score']:.4f}" except Exception as e: return f"āŒ Error in sentiment analysis: {str(e)}" # Handle Text Summarization elif mode == "Text Summarization": # Check word count BEFORE even trying the model word_list = input_text.split() word_count = len(word_list) if word_count < 50: return "šŸ“ **Please provide a longer text for summarization (at least 50 words).** This model is designed for articles and paragraphs." # Smart Truncation for long texts max_word_limit = 500 if word_count > max_word_limit: truncated_text = " ".join(word_list[:max_word_limit]) warning_msg = f"āš ļø Note: Your text was very long ({word_count} words). Summarized the first {max_word_limit} words.\n\n" else: truncated_text = input_text warning_msg = "" try: result = summarize_pipeline(truncated_text, max_length=130, min_length=30, do_sample=False) return f"{warning_msg}šŸ“„ **Summary:**\n\n{result[0]['summary_text']}" except Exception as e: return f"āŒ The summarization model failed. Please try a different text.\n(Error: {str(e)})" # Handle Translation elif mode == "Translation (EN to FR)": try: result = translate_pipeline(input_text) return f"šŸ‡«šŸ‡· **Translation:**\n\n{result[0]['translation_text']}" except Exception as e: return f"āŒ Translation error: {str(e)}" # Handle Question Answering elif mode == "Question Answering": if not context or not context.strip(): return "āš ļø For question answering, please provide both a question and context text." try: result = qa_pipeline(question=input_text, context=context) return f"ā“ **Question:** {input_text}\n\nšŸ“š **Answer:** {result['answer']}\n\nšŸŽÆ **Confidence:** {result['score']:.4f}" except Exception as e: return f"āŒ Question answering error: {str(e)}" # Handle Text Generation elif mode == "Text Generation": try: # Limit generation to prevent long processing times result = textgen_pipeline(input_text, max_length=100, do_sample=True, temperature=0.7) return f"šŸŽØ **Generated Text:**\n\n{result[0]['generated_text']}" except Exception as e: return f"āŒ Text generation error: {str(e)}" # Handle Named Entity Recognition elif mode == "Named Entity Recognition": try: result = ner_pipeline(input_text) if not result: return "šŸ” No named entities found in the text." formatted_result = "šŸ·ļø **Named Entities:**\n\n" for entity in result: formatted_result += f"- {entity['word']} ({entity['entity_group']}, confidence: {entity['score']:.4f})\n" return formatted_result except Exception as e: return f"āŒ NER error: {str(e)}" # Handle Zero-Shot Classification elif mode == "Zero-Shot Classification": if not candidate_labels or not candidate_labels.strip(): return "āš ļø For zero-shot classification, please provide candidate labels (comma-separated)." try: labels = [label.strip() for label in candidate_labels.split(",")] result = classify_pipeline(input_text, candidate_labels=labels) formatted_result = "šŸ“Š **Classification Results:**\n\n" for label, score in zip(result['labels'], result['scores']): formatted_result += f"- {label}: {score:.4f}\n" return formatted_result except Exception as e: return f"āŒ Classification error: {str(e)}" # Handle any other mode that might be added in the future else: return "Selected mode is not yet implemented." # Create the Gradio Interface (The Frontend UI) with gr.Blocks(title=TITLE, css=".gradio-container {max-width: 800px; margin: auto;}") as demo: gr.Markdown(f"# {TITLE}") gr.Markdown(DESCRIPTION) with gr.Row(): with gr.Column(scale=1): input_text = gr.Textbox(label="āž– Input Text", placeholder="Paste your text here...", lines=5) mode = gr.Radio( choices=[ "Sentiment Analysis", "Text Summarization", "Translation (EN to FR)", "Question Answering", "Text Generation", "Named Entity Recognition", "Zero-Shot Classification" ], label="🟰 Processing Mode", value="Sentiment Analysis" ) # Conditional inputs based on mode context = gr.Textbox( label="šŸ“š Context (for Question Answering)", placeholder="Paste the context text here...", lines=3, visible=False ) candidate_labels = gr.Textbox( label="šŸ·ļø Candidate Labels (comma-separated, for Zero-Shot Classification)", placeholder="e.g., politics, sports, technology, science", visible=False ) # Show/hide additional inputs based on mode selection def toggle_additional_inputs(selected_mode): return { context: gr.update(visible=selected_mode == "Question Answering"), candidate_labels: gr.update(visible=selected_mode == "Zero-Shot Classification") } mode.change(toggle_additional_inputs, inputs=mode, outputs=[context, candidate_labels]) submit_btn = gr.Button("Process", variant="primary") with gr.Column(scale=1): output_text = gr.Textbox(label="āž• AI Output", lines=8) # Examples gr.Examples( examples=[ ["I am absolutely thrilled with this product! It's everything I hoped for and more.", "Sentiment Analysis", None, None], ["""The field of artificial intelligence (AI) has seen unprecedented growth in the last decade. Breakthroughs in machine learning, particularly deep learning, have driven advancements in areas from computer vision to natural language processing. Companies across all sectors are investing heavily in AI research and implementation, hoping to gain a competitive edge. This rapid expansion has also sparked important debates about ethics, bias in algorithms, and the future of work. While the potential benefits are vast, experts urge for careful consideration of the societal impacts to ensure the technology is developed and used responsibly.""", "Text Summarization", None, None], ["Hello, world! How are you today?", "Translation (EN to FR)", None, None], ["What is the capital of France?", "Question Answering", "France is a country in Europe. Paris is the capital city of France.", None], ["Once upon a time in a land far, far away", "Text Generation", None, None], ["Apple Inc. was founded by Steve Jobs in Cupertino, California.", "Named Entity Recognition", None, None], ["The new smartphone has a great camera and long battery life", "Zero-Shot Classification", None, "technology, photography, travel"] ], inputs=[input_text, mode, context, candidate_labels], outputs=output_text, fn=process_text, cache_examples=False ) submit_btn.click( fn=process_text, inputs=[input_text, mode, context, candidate_labels], outputs=output_text ) # Launch the app if __name__ == "__main__": demo.launch()