Spaces:
Runtime error
Runtime error
File size: 7,094 Bytes
15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 810dee2 15bdce2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
from flask import Flask, send_from_directory, request, jsonify, render_template
from flask_cors import CORS
import os
from pathlib import Path
from audio_utils import AudioUtils
import logging
import google.generativeai as genai
import yaml
from inference import InferenceManager
from chat_log import ChatLogger
from dotenv import load_dotenv
# Configurar logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
app = Flask(__name__)
CORS(app)
# Cargar variables de entorno
load_dotenv()
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
# Cargar configuraci贸n
with open('config.yaml', 'r') as file:
config = yaml.safe_load(file)
# Inicializar componentes con mensajes
try:
logger.info("Iniciando sistema...")
logger.info("Configurando API de Google...")
genai.configure(api_key=GOOGLE_API_KEY)
logger.info("Inicializando sistema de inferencia...")
inference = InferenceManager()
logger.info("Inicializando registro de chat...")
chat_logger = ChatLogger()
logger.info("Inicializando sistema de audio...")
audio_utils = AudioUtils()
logger.info("Sistema iniciado correctamente")
except Exception as e:
logger.error(f"Error cr铆tico inicializando el sistema: {e}")
raise e
# Configurar el directorio de archivos temporales
TEMP_AUDIO_DIR = Path("static/temp_audio")
TEMP_AUDIO_DIR.mkdir(parents=True, exist_ok=True)
# Configurar el modelo
model = genai.GenerativeModel('gemini-pro')
@app.after_request
def after_request(response):
# Agregar headers necesarios para ngrok
response.headers.add('Accept-Ranges', 'bytes')
response.headers.add('Access-Control-Allow-Origin', '*')
logger.debug(f"Response headers: {dict(response.headers)}")
return response
@app.route('/temp_audio/<path:filename>')
def serve_audio(filename):
try:
logger.info(f"Attempting to serve audio file: {filename}")
file_path = TEMP_AUDIO_DIR / filename
if not file_path.exists():
logger.error(f"File not found: {file_path}")
return jsonify({'error': 'File not found'}), 404
if not filename.endswith(('.wav', '.mp3')):
logger.error(f"Invalid file format: {filename}")
return jsonify({'error': 'Invalid file format'}), 400
logger.info(f"Serving file from: {file_path}")
response = send_from_directory(str(TEMP_AUDIO_DIR), filename)
response.headers['Content-Type'] = 'audio/wav' if filename.endswith('.wav') else 'audio/mpeg'
response.headers['Cache-Control'] = 'no-cache'
return response
except Exception as e:
logger.error(f"Error serving audio file: {e}", exc_info=True)
return jsonify({'error': str(e)}), 500
@app.route('/generate_audio', methods=['POST'])
def generate_audio():
try:
text = request.json.get('text')
model = request.json.get('model', 'EDGE') # EDGE como default
logger.info(f"Generando audio para texto: '{text}' usando modelo: {model}")
if not text:
logger.error("No se proporcion贸 texto")
return jsonify({'error': 'No text provided'}), 400
try:
temp_audio_utils = AudioUtils(model_name=model)
audio_file = temp_audio_utils.text_to_speech(text, return_file=True)
if not audio_file:
logger.error(f"Fallo al generar audio con {model}, intentando con EDGE")
temp_audio_utils = AudioUtils(model_name='EDGE')
audio_file = temp_audio_utils.text_to_speech(text, return_file=True)
except Exception as e:
logger.error(f"Error con modelo {model}, usando EDGE: {e}")
temp_audio_utils = AudioUtils(model_name='EDGE')
audio_file = temp_audio_utils.text_to_speech(text, return_file=True)
if audio_file:
audio_url = f'/temp_audio/{audio_file}'
logger.info(f"Audio generado: {audio_url}")
file_path = TEMP_AUDIO_DIR / audio_file
if not file_path.exists():
logger.error(f"Archivo no encontrado: {file_path}")
return jsonify({
'error': 'Generated file not found',
'details': 'El archivo de audio no se gener贸 correctamente'
}), 500
return jsonify({
'audio_url': audio_url,
'model_used': temp_audio_utils.current_model
})
return jsonify({
'error': 'Failed to generate audio',
'details': 'No se pudo generar el audio con ning煤n modelo'
}), 500
except Exception as e:
logger.error(f"Error cr铆tico generando audio: {e}", exc_info=True)
return jsonify({
'error': str(e),
'details': 'Error interno del servidor al generar audio'
}), 500
@app.route('/static/<path:path>')
def send_static(path):
logger.debug(f"Serving static file: {path}")
return send_from_directory('static', path)
@app.route('/')
def home():
return render_template('chat.html')
@app.route('/chat', methods=['POST'])
def chat():
try:
data = request.json
message = data.get('message', '')
mode = data.get('mode', 'seguros')
# Actualizar modo si es necesario
if mode != inference.current_mode:
inference.change_mode(mode)
response = inference.get_response(message)
return jsonify({'response': response})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/change_mode', methods=['POST'])
def change_mode():
try:
mode = request.json.get('mode')
success = inference.change_mode(mode)
return jsonify({'success': success})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/change_model', methods=['POST'])
def change_model():
try:
model = request.json.get('model')
success = inference.change_model(model)
return jsonify({'success': success})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/change_tts', methods=['POST'])
def change_tts():
try:
model = request.json.get('model')
if model not in ['EDGE', 'VITS', 'gTTS']:
return jsonify({'error': 'Invalid TTS model'}), 400
# Actualizar modelo TTS
audio_utils = AudioUtils(model_name=model)
return jsonify({'success': True})
except Exception as e:
logger.error(f"Error changing TTS model: {e}")
return jsonify({'error': str(e)}), 500
if __name__ == "__main__":
try:
logger.info("Iniciando servidor...")
# Cambiar para que funcione en Hugging Face Spaces
app.run(host='0.0.0.0', port=7860) # Puerto est谩ndar de Spaces
except Exception as e:
logger.error(f"Error al iniciar la aplicaci贸n: {e}", exc_info=True)
|