from flask import Flask, request, jsonify, send_file from flask_cors import CORS import os import io import base64 import requests import random from PIL import Image, ImageDraw, ImageFont import numpy as np from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration import torch from datetime import datetime, timedelta import json import re from collections import Counter import threading import time app = Flask(__name__) CORS(app) # Initialize AI models with fallback options print("Loading AI models...") # Global flags for model availability MODELS_AVAILABLE = { 'caption': False, 'text_gen': False, 'sentiment': False } # Try to load models with fallbacks try: # Set cache directory with proper permissions os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache' os.environ['HF_HOME'] = '/tmp/hf_cache' # Create cache directories os.makedirs('/tmp/transformers_cache', exist_ok=True) os.makedirs('/tmp/hf_cache', exist_ok=True) print("Attempting to load image captioning model...") caption_processor = BlipProcessor.from_pretrained( "Salesforce/blip-image-captioning-base", cache_dir='/tmp/transformers_cache', use_fast=False ) caption_model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-base", cache_dir='/tmp/transformers_cache' ) MODELS_AVAILABLE['caption'] = True print("✓ Image captioning model loaded") except Exception as e: print(f"⚠ Image captioning model failed: {e}") caption_processor = None caption_model = None try: print("Attempting to load sentiment analyzer...") sentiment_analyzer = pipeline( "sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment-latest", cache_dir='/tmp/transformers_cache' ) MODELS_AVAILABLE['sentiment'] = True print("✓ Sentiment analyzer loaded") except Exception as e: print(f"⚠ Sentiment analyzer failed: {e}") # Use a lighter fallback model try: sentiment_analyzer = pipeline("sentiment-analysis", cache_dir='/tmp/transformers_cache') MODELS_AVAILABLE['sentiment'] = True print("✓ Fallback sentiment analyzer loaded") except: sentiment_analyzer = None print("⚠ All sentiment models failed") try: print("Attempting to load text generator...") text_generator = pipeline( "text-generation", model="gpt2", # Lighter model cache_dir='/tmp/transformers_cache' ) MODELS_AVAILABLE['text_gen'] = True print("✓ Text generator loaded") except Exception as e: print(f"⚠ Text generator failed: {e}") text_generator = None print(f"Models loaded: {sum(MODELS_AVAILABLE.values())}/3") print("Fallback systems enabled for failed models") # Meme templates and trending data MEME_TEMPLATES = { "drake": {"top": "Drake pointing away", "bottom": "Drake pointing towards"}, "distracted_boyfriend": {"top": "Looking at something new", "bottom": "Ignoring what you had"}, "woman_yelling_cat": {"top": "When someone disagrees", "bottom": "You trying to stay calm"}, "this_is_fine": {"top": "Everything is falling apart", "bottom": "This is fine"}, "expanding_brain": {"top": "Basic idea", "bottom": "Galaxy brain idea"}, "change_my_mind": {"top": "Controversial opinion", "bottom": "Change my mind"}, "two_buttons": {"top": "Difficult choice A", "bottom": "Difficult choice B"}, "disaster_girl": {"top": "When you cause chaos", "bottom": "And act innocent"} } HUMOR_STYLES = { "sarcastic": ["Oh great, another", "Because that always works", "Sure, that makes perfect sense"], "wholesome": ["You're doing great!", "Believe in yourself", "Every day is a gift"], "dark": ["When life gives you lemons", "Nothing matters anyway", "We're all doomed but"], "relatable": ["When you realize", "Me trying to", "That moment when"], "gen_z": ["No cap", "It's giving", "That's lowkey", "Main character energy"] } # Mock trending data (in production, this would come from social media APIs) trending_topics = ["AI taking over", "Work from home", "Monday motivation", "Weekend vibes", "Cryptocurrency", "Climate change", "Social media addiction", "Netflix binge"] class MemeAI: def __init__(self): self.meme_history = [] self.user_preferences = {} def analyze_image(self, image): """Analyze image and generate smart suggestions""" if MODELS_AVAILABLE['caption'] and caption_processor and caption_model: try: # Generate caption using AI model inputs = caption_processor(image, return_tensors="pt") out = caption_model.generate(**inputs, max_length=50) caption = caption_processor.decode(out[0], skip_special_tokens=True) # Extract key objects/concepts keywords = self.extract_keywords(caption) return { "caption": caption, "keywords": keywords, "suggestions": self.generate_text_suggestions(keywords), "ai_powered": True } except Exception as e: print(f"AI image analysis error: {e}") # Fallback to rule-based analysis return self.analyze_image_fallback(image) def analyze_image_fallback(self, image): """Fallback image analysis when AI models aren't available""" # Simple image property analysis width, height = image.size mode = image.mode # Basic heuristics keywords = [] caption = "Image uploaded" # Aspect ratio heuristics aspect_ratio = width / height if aspect_ratio > 1.5: keywords.extend(["landscape", "wide", "horizontal"]) caption = "Wide image uploaded" elif aspect_ratio < 0.7: keywords.extend(["portrait", "vertical", "tall"]) caption = "Tall image uploaded" else: keywords.extend(["square", "balanced"]) caption = "Square image uploaded" # Color mode heuristics if mode == "RGBA": keywords.append("transparent") elif mode == "L": keywords.extend(["grayscale", "black and white"]) # Add generic meme-friendly keywords keywords.extend(["meme", "funny", "relatable"]) return { "caption": caption, "keywords": keywords[:5], "suggestions": self.generate_text_suggestions(keywords), "ai_powered": False, "note": "Using fallback analysis - upgrade for AI-powered insights" } def extract_keywords(self, text): """Extract meaningful keywords from image caption""" # Simple keyword extraction (in production, use more sophisticated NLP) words = re.findall(r'\b\w+\b', text.lower()) # Filter out common words stop_words = {'a', 'an', 'the', 'is', 'are', 'was', 'were', 'with', 'of', 'in', 'on', 'at'} keywords = [word for word in words if word not in stop_words and len(word) > 2] return keywords[:5] # Return top 5 keywords def generate_text_suggestions(self, keywords, humor_style="relatable"): """Generate contextual meme text suggestions""" suggestions = [] # Keyword-based suggestions for keyword in keywords: if keyword in ["person", "people", "man", "woman"]: suggestions.extend([ f"When you see someone {keyword}", f"Me trying to be a normal {keyword}", f"That {keyword} energy" ]) elif keyword in ["dog", "cat", "animal"]: suggestions.extend([ f"When your {keyword} judges you", f"Me as a {keyword}", f"{keyword.title()} > humans" ]) elif keyword in ["car", "food", "house", "computer"]: suggestions.extend([ f"When you can't afford a {keyword}", f"My relationship with {keyword}", f"{keyword.title()} problems require {keyword} solutions" ]) # Add humor style variations style_templates = HUMOR_STYLES.get(humor_style, HUMOR_STYLES["relatable"]) for template in style_templates[:3]: suggestions.append(f"{template} {random.choice(keywords)}") return list(set(suggestions))[:10] # Return unique suggestions, max 10 def get_generic_suggestions(self): """Fallback suggestions when image analysis fails""" return [ "When you realize it's Monday", "Me trying to adult", "This is fine", "Why are you like this?", "Big mood energy", "That awkward moment when", "Me vs my responsibilities", "Plot twist: nobody asked" ] def analyze_mood(self, text): """Analyze text mood for personalized suggestions""" if MODELS_AVAILABLE['sentiment'] and sentiment_analyzer: try: result = sentiment_analyzer(text)[0] # Handle different sentiment model outputs if 'label' in result: mood = result['label'].lower() # Convert labels to standard format if 'pos' in mood or mood == 'positive': mood = 'positive' elif 'neg' in mood or mood == 'negative': mood = 'negative' else: mood = 'neutral' else: mood = 'neutral' confidence = result.get('score', 0.5) mood_suggestions = { 'positive': ["You're killing it!", "Main character energy", "That's the spirit!", "Living your best life"], 'negative': ["This is fine", "Why are we here?", "Everything is chaos", "Big oof energy"], 'neutral': ["It be like that sometimes", "Just vibing", "No thoughts, head empty", "Existing peacefully"] } suggestions = mood_suggestions.get(mood, mood_suggestions['neutral']) return suggestions + [f"When you're feeling {mood}"] except Exception as e: print(f"Sentiment analysis error: {e}") # Fallback mood analysis return self.analyze_mood_fallback(text) def analyze_mood_fallback(self, text): """Fallback mood analysis using keyword matching""" text_lower = text.lower() positive_words = ['good', 'great', 'awesome', 'happy', 'love', 'best', 'amazing', 'wonderful'] negative_words = ['bad', 'awful', 'hate', 'worst', 'terrible', 'sad', 'angry', 'frustrated'] pos_count = sum(1 for word in positive_words if word in text_lower) neg_count = sum(1 for word in negative_words if word in text_lower) if pos_count > neg_count: return ["You're killing it!", "Positive vibes only", "That energy though"] elif neg_count > pos_count: return ["This is fine", "We've all been there", "Mood honestly"] else: return ["It be like that", "Just existing", "Neutral chaos energy"] def get_trending_suggestions(self): """Generate suggestions based on trending topics""" trending_memes = [] for topic in trending_topics[:5]: trending_memes.extend([ f"When {topic} hits different", f"Me explaining {topic} to my parents", f"{topic} be like" ]) return trending_memes def predict_virality(self, text, image_features=None): """Mock virality prediction (would use ML model in production)""" score = 0 # Length check (shorter usually better) if len(text) < 50: score += 20 # Trending topic check for topic in trending_topics: if topic.lower() in text.lower(): score += 30 # Humor markers humor_words = ['when', 'me', 'that', 'mood', 'vibes', 'energy', 'literally'] score += sum(5 for word in humor_words if word in text.lower()) # Randomize for demo score += random.randint(0, 30) return min(score, 100) def learn_user_preferences(self, user_id, meme_data): """Learn from user's meme creation patterns""" if user_id not in self.user_preferences: self.user_preferences[user_id] = { 'humor_styles': [], 'topics': [], 'formats': [] } # Update preferences (simplified) self.user_preferences[user_id]['topics'].extend(meme_data.get('keywords', [])) if 'humor_style' in meme_data: self.user_preferences[user_id]['humor_styles'].append(meme_data['humor_style']) # Initialize AI engine meme_ai = MemeAI() @app.route('/health', methods=['GET']) def health_check(): return jsonify({ "status": "healthy", "timestamp": datetime.now().isoformat(), "models_loaded": MODELS_AVAILABLE, "ai_features": sum(MODELS_AVAILABLE.values()) }) @app.route('/analyze-image', methods=['POST']) def analyze_image(): """Analyze uploaded image and provide smart suggestions""" try: data = request.get_json() if 'image' not in data: return jsonify({"error": "No image provided"}), 400 # Decode base64 image image_data = base64.b64decode(data['image'].split(',')[1]) image = Image.open(io.BytesIO(image_data)) # Analyze image analysis = meme_ai.analyze_image(image) return jsonify({ "success": True, "analysis": analysis, "trending_suggestions": meme_ai.get_trending_suggestions()[:5] }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/generate-suggestions', methods=['POST']) def generate_suggestions(): """Generate text suggestions based on various inputs""" try: data = request.get_json() suggestions = [] # If keywords provided if 'keywords' in data: suggestions.extend(meme_ai.generate_text_suggestions( data['keywords'], data.get('humor_style', 'relatable') )) # If mood text provided if 'mood_text' in data: suggestions.extend(meme_ai.analyze_mood(data['mood_text'])) # Add trending suggestions suggestions.extend(meme_ai.get_trending_suggestions()[:3]) # Remove duplicates and limit unique_suggestions = list(set(suggestions))[:15] return jsonify({ "success": True, "suggestions": unique_suggestions, "humor_styles": list(HUMOR_STYLES.keys()) }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/predict-virality', methods=['POST']) def predict_virality(): """Predict how viral a meme might be""" try: data = request.get_json() if 'text' not in data: return jsonify({"error": "No text provided"}), 400 score = meme_ai.predict_virality(data['text']) # Generate advice advice = [] if score < 30: advice.append("Try adding trending topics or relatable situations") if len(data['text']) > 100: advice.append("Shorter text usually performs better") if score > 70: advice.append("This has great potential to go viral!") return jsonify({ "success": True, "virality_score": score, "advice": advice, "trending_topics": trending_topics[:5] }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/meme-battle', methods=['POST']) def meme_battle(): """AI judges meme battle between submissions""" try: data = request.get_json() if 'memes' not in data or len(data['memes']) < 2: return jsonify({"error": "Need at least 2 memes for battle"}), 400 results = [] for i, meme in enumerate(data['memes']): score = meme_ai.predict_virality(meme.get('text', '')) results.append({ "id": i, "text": meme.get('text', ''), "score": score, "feedback": f"Virality potential: {score}%" }) # Sort by score results.sort(key=lambda x: x['score'], reverse=True) return jsonify({ "success": True, "winner": results[0], "rankings": results, "battle_commentary": f"The winner with {results[0]['score']}% virality potential!" }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/trending-topics', methods=['GET']) def get_trending_topics(): """Get current trending topics for memes""" return jsonify({ "success": True, "trending_topics": trending_topics, "meme_templates": MEME_TEMPLATES, "humor_styles": list(HUMOR_STYLES.keys()) }) @app.route('/personalized-suggestions', methods=['POST']) def get_personalized_suggestions(): """Get personalized suggestions based on user history""" try: data = request.get_json() user_id = data.get('user_id', 'anonymous') # Get user preferences preferences = meme_ai.user_preferences.get(user_id, {}) # Generate personalized suggestions suggestions = [] # Based on user's favorite topics if 'topics' in preferences: top_topics = Counter(preferences['topics']).most_common(3) for topic, _ in top_topics: suggestions.extend(meme_ai.generate_text_suggestions([topic])) # Based on humor style if 'humor_styles' in preferences and preferences['humor_styles']: favorite_style = Counter(preferences['humor_styles']).most_common(1)[0][0] suggestions.extend(HUMOR_STYLES.get(favorite_style, [])) # Fallback to trending if no preferences if not suggestions: suggestions = meme_ai.get_trending_suggestions() return jsonify({ "success": True, "personalized_suggestions": list(set(suggestions))[:10], "user_preferences": preferences }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/save-meme-data', methods=['POST']) def save_meme_data(): """Save meme data for learning user preferences""" try: data = request.get_json() user_id = data.get('user_id', 'anonymous') # Learn from this meme meme_ai.learn_user_preferences(user_id, data) return jsonify({ "success": True, "message": "Preferences updated" }) except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/create-template', methods=['POST']) def create_template(): """AI creates new meme template suggestions""" try: data = request.get_json() # Mock template creation (in production, use image generation models) new_templates = [ { "name": "AI Takeover", "description": "When AI does something better than humans", "suggested_text": { "top": "Humans doing task manually", "bottom": "AI doing it in 0.1 seconds" } }, { "name": "Remote Work Reality", "description": "Work from home expectations vs reality", "suggested_text": { "top": "What I thought WFH would be like", "bottom": "What it actually is" } }, { "name": "Gen Z vs Millennial", "description": "Generational differences", "suggested_text": { "top": "Gen Z explaining new slang", "bottom": "Millennials pretending to understand" } } ] return jsonify({ "success": True, "new_templates": new_templates, "trending_formats": ["Before/After", "Expectation/Reality", "Me vs Everyone else"] }) except Exception as e: return jsonify({"error": str(e)}), 500 if __name__ == '__main__': port = int(os.environ.get('PORT', 7860)) app.run(host='0.0.0.0', port=port, debug=True)