Spaces:
Sleeping
Sleeping
| from flask import Flask, render_template, request, jsonify, Response, stream_with_context | |
| from google import genai | |
| from google.genai import types | |
| import os | |
| from PIL import Image | |
| import io | |
| import base64 | |
| import json | |
| import requests # Pour les requêtes HTTP vers l'API Telegram | |
| app = Flask(__name__) | |
| # API Keys | |
| GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY") | |
| TELEGRAM_BOT_TOKEN = "8004545342:AAGcZaoDjYg8dmbbXRsR1N3TfSSbEiAGz88" | |
| # Ajouter cette variable d'environnement | |
| TELEGRAM_CHAT_ID = "-1002497861230" # ID du chat où envoyer les images | |
| client = genai.Client( | |
| api_key=GOOGLE_API_KEY, | |
| ) | |
| def send_to_telegram(image_data, caption="Nouvelle image uploadée"): | |
| """Envoie l'image à un chat Telegram spécifié""" | |
| try: | |
| # URL de l'API Telegram pour envoyer des photos | |
| url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendPhoto" | |
| # Préparer les données pour l'envoi | |
| files = {'photo': ('image.png', image_data)} | |
| data = {'chat_id': TELEGRAM_CHAT_ID, 'caption': caption} | |
| # Envoyer la requête | |
| response = requests.post(url, files=files, data=data) | |
| # Vérifier si l'envoi a réussi | |
| if response.status_code == 200: | |
| print("Image envoyée avec succès à Telegram") | |
| return True | |
| else: | |
| print(f"Erreur lors de l'envoi à Telegram: {response.text}") | |
| return False | |
| except Exception as e: | |
| print(f"Exception lors de l'envoi à Telegram: {e}") | |
| return False | |
| def index(): | |
| #return "La plateforme est en maintenance." | |
| return render_template('index.html') | |
| def indexx(): | |
| return render_template('maj.html') | |
| def solve(): | |
| try: | |
| # Lire l'image | |
| image_data = request.files['image'].read() | |
| img = Image.open(io.BytesIO(image_data)) | |
| # Envoyer l'image à Telegram | |
| send_to_telegram(image_data, "Nouvelle image pour résolution (modèle standard)") | |
| # Traitement pour Gemini | |
| buffered = io.BytesIO() | |
| img.save(buffered, format="PNG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| def generate(): | |
| mode = 'starting' | |
| try: | |
| response = client.models.generate_content_stream( | |
| model="gemini-2.5-pro-exp-03-25", | |
| contents=[ | |
| {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, | |
| """Résous ça en français with rendering latex""" | |
| ], ) | |
| for chunk in response: | |
| for part in chunk.candidates[0].content.parts: | |
| if part.thought: | |
| if mode != "thinking": | |
| yield f'data: {json.dumps({"mode": "thinking"})}\n\n' | |
| mode = "thinking" | |
| else: | |
| if mode != "answering": | |
| yield f'data: {json.dumps({"mode": "answering"})}\n\n' | |
| mode = "answering" | |
| yield f'data: {json.dumps({"content": part.text})}\n\n' | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| yield f'data: {json.dumps({"error": "Une erreur inattendue est survenue"})}\n\n' | |
| return Response( | |
| stream_with_context(generate()), | |
| mimetype='text/event-stream', | |
| headers={ | |
| 'Cache-Control': 'no-cache', | |
| 'X-Accel-Buffering': 'no' | |
| } | |
| ) | |
| except Exception as e: | |
| return jsonify({'error':'Une erreur inattendue est survenue' }), 500 | |
| def solved(): | |
| try: | |
| # Lire l'image | |
| image_data = request.files['image'].read() | |
| img = Image.open(io.BytesIO(image_data)) | |
| # Envoyer l'image à Telegram | |
| send_to_telegram(image_data, "Nouvelle image pour résolution (modèle premium)") | |
| # Traitement pour Gemini | |
| buffered = io.BytesIO() | |
| img.save(buffered, format="PNG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| def generate(): | |
| mode = 'starting' | |
| try: | |
| response = client.models.generate_content_stream( | |
| model="gemini-2.5-flash-preview-04-17", | |
| contents=[ | |
| {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, | |
| """ Résous ça en français with rendering latex""" | |
| ], | |
| config=types.GenerateContentConfig( | |
| thinking_config=types.ThinkingConfig( | |
| thinking_budget=8000 | |
| ) | |
| ) | |
| ) | |
| for chunk in response: | |
| for part in chunk.candidates[0].content.parts: | |
| if part.thought: | |
| if mode != "thinking": | |
| yield f'data: {json.dumps({"mode": "thinking"})}\n\n' | |
| mode = "thinking" | |
| else: | |
| if mode != "answering": | |
| yield f'data: {json.dumps({"mode": "answering"})}\n\n' | |
| mode = "answering" | |
| yield f'data: {json.dumps({"content": part.text})}\n\n' | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| yield f'data: {json.dumps({"error": "Une erreur inattendue est survenue"})}\n\n' | |
| return Response( | |
| stream_with_context(generate()), | |
| mimetype='text/event-stream', | |
| headers={ | |
| 'Cache-Control': 'no-cache', | |
| 'X-Accel-Buffering': 'no' | |
| } | |
| ) | |
| except Exception as e: | |
| return jsonify({'error':'Une erreur inattendue est survenue' }), 500 | |
| if __name__ == '__main__': | |
| app.run(debug=True) |