salomonsky commited on
Commit
a4f92b8
·
verified ·
1 Parent(s): ece2740

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -8
app.py CHANGED
@@ -3,24 +3,17 @@ import streamlit as st
3
  import subprocess
4
  from gtts import gTTS
5
  import cv2
6
- from huggingface_hub import InferenceClient
7
  import torch
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
  print('Using {} for inference.'.format(device))
11
 
12
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
13
-
14
  def generate_output(prompt):
15
  if not prompt:
16
  return None, "El campo de la pregunta es obligatorio."
17
 
18
- response = client.text_generation(prompt, max_new_tokens=50, temperature=0.6)
19
- gpt3_output = response.strip()
20
- personalized_response = f"{gpt3_output}"
21
-
22
  try:
23
- tts = gTTS(personalized_response, lang='es')
24
  audio_path = "audio.mp3"
25
  tts.save(audio_path)
26
  except Exception as e:
 
3
  import subprocess
4
  from gtts import gTTS
5
  import cv2
 
6
  import torch
7
 
8
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
  print('Using {} for inference.'.format(device))
10
 
 
 
11
  def generate_output(prompt):
12
  if not prompt:
13
  return None, "El campo de la pregunta es obligatorio."
14
 
 
 
 
 
15
  try:
16
+ tts = gTTS(prompt, lang='es')
17
  audio_path = "audio.mp3"
18
  tts.save(audio_path)
19
  except Exception as e: