File size: 1,446 Bytes
a16a4f0
2da8087
a16a4f0
 
2da8087
0299673
a16a4f0
761b25a
 
 
ffce342
 
 
a16a4f0
 
a4f92b8
a16a4f0
 
 
 
 
 
3a9886f
a16a4f0
761b25a
a16a4f0
 
 
 
 
 
 
 
ffce342
 
2da8087
 
ffce342
a16a4f0
2da8087
a16a4f0
761b25a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import os
import streamlit as st
import subprocess
from gtts import gTTS
import cv2
import torch

device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} for inference.'.format(device))

def generate_output(prompt):
    if not prompt:
        return None, "El campo de la pregunta es obligatorio."

    try:
        tts = gTTS(prompt, lang='es')
        audio_path = "audio.mp3"
        tts.save(audio_path)
    except Exception as e:
        return None, f"No se pudo generar el audio: {str(e)}"

    video_path = "video.mp4"
    command = f"CUDA_VISIBLE_DEVICES='' python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face face.jpg --audio {audio_path} --outfile {video_path} --nosmooth --resize_factor 4"
    process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
    
    if process.returncode != 0:
        error_message = process.stderr
        return None, f"No se pudo generar el video: {error_message}"

    if os.path.isfile(video_path):
        return video_path, None
    return None, "No se pudo generar el video"

st.title("Lypsinc + Inteligencia Artificial")
prompt = st.text_input("Pregunta")

if st.button("Generar Video"):
    video_path, error_message = generate_output(prompt)
    if error_message:
        st.error(f"Error: {error_message}")
    else:
        with open(video_path, "rb") as video_file:
            st.video(video_file.read())