salomonsky commited on
Commit
810dee2
verified
1 Parent(s): 358aaa8

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +23 -44
app.py CHANGED
@@ -8,6 +8,7 @@ import google.generativeai as genai
8
  import yaml
9
  from inference import InferenceManager
10
  from chat_log import ChatLogger
 
11
 
12
  # Configurar logging
13
  logging.basicConfig(level=logging.DEBUG)
@@ -16,6 +17,10 @@ logger = logging.getLogger(__name__)
16
  app = Flask(__name__)
17
  CORS(app)
18
 
 
 
 
 
19
  # Cargar configuraci贸n
20
  with open('config.yaml', 'r') as file:
21
  config = yaml.safe_load(file)
@@ -25,10 +30,10 @@ try:
25
  logger.info("Iniciando sistema...")
26
 
27
  logger.info("Configurando API de Google...")
28
- genai.configure(api_key=config.get('GOOGLE_API_KEY', ''))
29
 
30
  logger.info("Inicializando sistema de inferencia...")
31
- inference = InferenceManager(config)
32
 
33
  logger.info("Inicializando registro de chat...")
34
  chat_logger = ChatLogger()
@@ -45,6 +50,9 @@ except Exception as e:
45
  TEMP_AUDIO_DIR = Path("static/temp_audio")
46
  TEMP_AUDIO_DIR.mkdir(parents=True, exist_ok=True)
47
 
 
 
 
48
  @app.after_request
49
  def after_request(response):
50
  # Agregar headers necesarios para ngrok
@@ -137,69 +145,40 @@ def send_static(path):
137
 
138
  @app.route('/')
139
  def home():
140
- # Agregar mensajes iniciales
141
- initial_messages = [{
142
- 'role': 'assistant',
143
- 'content': 'Sistema iniciado. Todos los modelos cargados correctamente.'
144
- }]
145
- return render_template('chat.html', messages=initial_messages)
146
 
147
  @app.route('/chat', methods=['POST'])
148
  def chat():
149
  try:
150
- message = request.json.get('message')
151
- mode = request.json.get('mode', 'seguros') # Modo por defecto
152
-
153
- if not message:
154
- return jsonify({'error': 'No message provided'}), 400
155
-
156
- # Registrar mensaje del usuario
157
- chat_logger.log_message(message, is_user=True)
158
 
159
- # Obtener respuesta usando el sistema de inferencia
160
- response = inference.get_response(message, mode)
161
-
162
- if response:
163
- # Registrar respuesta del bot
164
- chat_logger.log_message(response, is_user=False)
165
-
166
- return jsonify({
167
- 'response': response
168
- })
169
-
170
- return jsonify({'error': 'No response generated'}), 500
171
 
 
 
172
  except Exception as e:
173
- logger.error(f"Error in chat endpoint: {e}", exc_info=True)
174
  return jsonify({'error': str(e)}), 500
175
 
176
  @app.route('/change_mode', methods=['POST'])
177
  def change_mode():
178
  try:
179
  mode = request.json.get('mode')
180
- if mode not in ['seguros', 'creditos', 'cobranza']:
181
- return jsonify({'error': 'Invalid mode'}), 400
182
-
183
- # Actualizar modo en el sistema de inferencia
184
- inference.mode = mode
185
-
186
- return jsonify({'success': True})
187
  except Exception as e:
188
- logger.error(f"Error changing mode: {e}")
189
  return jsonify({'error': str(e)}), 500
190
 
191
  @app.route('/change_model', methods=['POST'])
192
  def change_model():
193
  try:
194
  model = request.json.get('model')
195
- if model not in ['Gemini 8b', 'Mixtral 7b']:
196
- return jsonify({'error': 'Invalid model'}), 400
197
-
198
- inference.current_model = model
199
-
200
- return jsonify({'success': True})
201
  except Exception as e:
202
- logger.error(f"Error changing model: {e}")
203
  return jsonify({'error': str(e)}), 500
204
 
205
  @app.route('/change_tts', methods=['POST'])
 
8
  import yaml
9
  from inference import InferenceManager
10
  from chat_log import ChatLogger
11
+ from dotenv import load_dotenv
12
 
13
  # Configurar logging
14
  logging.basicConfig(level=logging.DEBUG)
 
17
  app = Flask(__name__)
18
  CORS(app)
19
 
20
+ # Cargar variables de entorno
21
+ load_dotenv()
22
+ GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
23
+
24
  # Cargar configuraci贸n
25
  with open('config.yaml', 'r') as file:
26
  config = yaml.safe_load(file)
 
30
  logger.info("Iniciando sistema...")
31
 
32
  logger.info("Configurando API de Google...")
33
+ genai.configure(api_key=GOOGLE_API_KEY)
34
 
35
  logger.info("Inicializando sistema de inferencia...")
36
+ inference = InferenceManager()
37
 
38
  logger.info("Inicializando registro de chat...")
39
  chat_logger = ChatLogger()
 
50
  TEMP_AUDIO_DIR = Path("static/temp_audio")
51
  TEMP_AUDIO_DIR.mkdir(parents=True, exist_ok=True)
52
 
53
+ # Configurar el modelo
54
+ model = genai.GenerativeModel('gemini-pro')
55
+
56
  @app.after_request
57
  def after_request(response):
58
  # Agregar headers necesarios para ngrok
 
145
 
146
  @app.route('/')
147
  def home():
148
+ return render_template('chat.html')
 
 
 
 
 
149
 
150
  @app.route('/chat', methods=['POST'])
151
  def chat():
152
  try:
153
+ data = request.json
154
+ message = data.get('message', '')
155
+ mode = data.get('mode', 'seguros')
 
 
 
 
 
156
 
157
+ # Actualizar modo si es necesario
158
+ if mode != inference.current_mode:
159
+ inference.change_mode(mode)
 
 
 
 
 
 
 
 
 
160
 
161
+ response = inference.get_response(message)
162
+ return jsonify({'response': response})
163
  except Exception as e:
 
164
  return jsonify({'error': str(e)}), 500
165
 
166
  @app.route('/change_mode', methods=['POST'])
167
  def change_mode():
168
  try:
169
  mode = request.json.get('mode')
170
+ success = inference.change_mode(mode)
171
+ return jsonify({'success': success})
 
 
 
 
 
172
  except Exception as e:
 
173
  return jsonify({'error': str(e)}), 500
174
 
175
  @app.route('/change_model', methods=['POST'])
176
  def change_model():
177
  try:
178
  model = request.json.get('model')
179
+ success = inference.change_model(model)
180
+ return jsonify({'success': success})
 
 
 
 
181
  except Exception as e:
 
182
  return jsonify({'error': str(e)}), 500
183
 
184
  @app.route('/change_tts', methods=['POST'])