import os
import torch
import librosa
import gradio as gr
from scipy.io.wavfile import write
from transformers import WavLMModel

import utils
from models import SynthesizerTrn
from mel_processing import mel_spectrogram_torch
from speaker_encoder.voice_encoder import SpeakerEncoder

'''
def get_wavlm():
    os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')
    shutil.move('WavLM-Large.pt', 'wavlm')
'''

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

print("Loading FreeVC...")
hps = utils.get_hparams_from_file("configs/freevc.json")
freevc = SynthesizerTrn(
    hps.data.filter_length // 2 + 1,
    hps.train.segment_size // hps.data.hop_length,
    **hps.model).to(device)
_ = freevc.eval()
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')

print("Loading FreeVC(24k)...")
hps = utils.get_hparams_from_file("configs/freevc-24.json")
freevc_24 = SynthesizerTrn(
    hps.data.filter_length // 2 + 1,
    hps.train.segment_size // hps.data.hop_length,
    **hps.model).to(device)
_ = freevc_24.eval()
_ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)

print("Loading FreeVC-s...")
hps = utils.get_hparams_from_file("configs/freevc-s.json")
freevc_s = SynthesizerTrn(
    hps.data.filter_length // 2 + 1,
    hps.train.segment_size // hps.data.hop_length,
    **hps.model).to(device)
_ = freevc_s.eval()
_ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)

print("Loading WavLM for content...")
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)



import ffmpeg

import random 
import numpy as np 
from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError

def pad_buffer(audio):
    # Pad buffer to multiple of 2 bytes
    buffer_size = len(audio)
    element_size = np.dtype(np.int16).itemsize
    if buffer_size % element_size != 0:
        audio = audio + b'\0' * (element_size - (buffer_size % element_size))
    return audio 

def generate_voice(text, voice_name):
    try:
        audio = generate(
            text[:250], # Limit to 250 characters
            voice=voice_name, 
            model="eleven_multilingual_v2"
        )
        with open("output" + ".mp3", mode='wb') as f:
          f.write(audio)       
        return "output.mp3"        

    except UnauthenticatedRateLimitError as e:
        raise gr.Error("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.") 
    except Exception as e:
        raise gr.Error(e)

html_denoise = """
<html>
<head>
</script>
<link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.css">
</head>
<body>
<div id="target"></div>
<script src="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.js"></script>
<script
	type="module"
	src="https://gradio.s3-us-west-2.amazonaws.com/4.15.0/gradio.js"
></script>
<iframe
    src="https://g-app-center-40055665-8145-0zp6jbv.openxlab.space"
    frameBorder="0"
    width="1280"
    height="700"
></iframe>
    
</body>
</html>
"""

def convert(api_key, text, tgt, voice, save_path):
    model = "FreeVC (24kHz)"
    with torch.no_grad():
        # tgt
        wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
        wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
        if model == "FreeVC" or model == "FreeVC (24kHz)":
            g_tgt = smodel.embed_utterance(wav_tgt)
            g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
        else:
            wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
            mel_tgt = mel_spectrogram_torch(
                wav_tgt,
                hps.data.filter_length,
                hps.data.n_mel_channels,
                hps.data.sampling_rate,
                hps.data.hop_length,
                hps.data.win_length,
                hps.data.mel_fmin,
                hps.data.mel_fmax
            )
        # src

        os.environ["ELEVEN_API_KEY"] = api_key
        src = generate_voice(text, voice)
        wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
        wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
        c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
        # infer
        if model == "FreeVC":
            audio = freevc.infer(c, g=g_tgt)
        elif model == "FreeVC-s":
            audio = freevc_s.infer(c, mel=mel_tgt)
        else:
            audio = freevc_24.infer(c, g=g_tgt)
        audio = audio[0][0].data.cpu().float().numpy()
        if model == "FreeVC" or model == "FreeVC-s":
            write(f"output/{save_path}.wav", hps.data.sampling_rate, audio)
        else:
            write(f"output/{save_path}.wav", 24000, audio)
    return f"output/{save_path}.wav"


class subtitle:
    def __init__(self,index:int, start_time, end_time, text:str):
        self.index = int(index)
        self.start_time = start_time
        self.end_time = end_time
        self.text = text.strip()
    def normalize(self,ntype:str,fps=30):
         if ntype=="prcsv":
              h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
              self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
              h,m,s,fs=(self.end_time.replace(';',':')).split(":")
              self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
         elif ntype=="srt":
             h,m,s=self.start_time.split(":")
             s=s.replace(",",".")
             self.start_time=int(h)*3600+int(m)*60+round(float(s),5)
             h,m,s=self.end_time.split(":")
             s=s.replace(",",".")
             self.end_time=int(h)*3600+int(m)*60+round(float(s),5)
         else:
             raise ValueError
    def add_offset(self,offset=0):
        self.start_time+=offset
        if self.start_time<0:
            self.start_time=0
        self.end_time+=offset
        if self.end_time<0:
            self.end_time=0
    def __str__(self) -> str:
        return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'

def read_srt(uploaded_file):
    offset=0
    with open(uploaded_file.name,"r",encoding="utf-8") as f:
        file=f.readlines()
    subtitle_list=[]
    indexlist=[]
    filelength=len(file)
    for i in range(0,filelength):
        if " --> " in file[i]:
            is_st=True
            for char in file[i-1].strip().replace("\ufeff",""):
                if char not in ['0','1','2','3','4','5','6','7','8','9']:
                    is_st=False
                    break
            if is_st:
                indexlist.append(i) #get line id
    listlength=len(indexlist)
    for i in range(0,listlength-1):
        st,et=file[indexlist[i]].split(" --> ")
        id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
        text=""
        for x in range(indexlist[i]+1,indexlist[i+1]-2):
            text+=file[x]
        st=subtitle(id,st,et,text)
        st.normalize(ntype="srt")
        st.add_offset(offset=offset)
        subtitle_list.append(st)
    st,et=file[indexlist[-1]].split(" --> ")
    id=file[indexlist[-1]-1]
    text=""
    for x in range(indexlist[-1]+1,filelength):
        text+=file[x]
    st=subtitle(id,st,et,text)
    st.normalize(ntype="srt")
    st.add_offset(offset=offset)
    subtitle_list.append(st)
    return subtitle_list

import webrtcvad
from pydub import AudioSegment
from pydub.utils import make_chunks

def vad(audio_name, out_path_name):
  audio = AudioSegment.from_file(audio_name, format="wav")
  # Set the desired sample rate (WebRTC VAD supports only 8000, 16000, 32000, or 48000 Hz)
  audio = audio.set_frame_rate(48000)
  # Set single channel (mono)
  audio = audio.set_channels(1)

  # Initialize VAD
  vad = webrtcvad.Vad()
  # Set aggressiveness mode (an integer between 0 and 3, 3 is the most aggressive)
  vad.set_mode(3)

  # Convert pydub audio to bytes
  frame_duration = 30  # Duration of a frame in ms
  frame_width = int(audio.frame_rate * frame_duration / 1000)  # width of a frame in samples
  frames = make_chunks(audio, frame_duration)

  # Perform voice activity detection
  voiced_frames = []
  for frame in frames:
      if len(frame.raw_data) < frame_width * 2:  # Ensure frame is correct length
          break
      is_speech = vad.is_speech(frame.raw_data, audio.frame_rate)
      if is_speech:
          voiced_frames.append(frame)

  # Combine voiced frames back to an audio segment
  voiced_audio = sum(voiced_frames, AudioSegment.silent(duration=0))

  voiced_audio.export(f"{out_path_name}.wav", format="wav")


def trim_audio(intervals, input_file_path, output_file_path):
    # load the audio file
    audio = AudioSegment.from_file(input_file_path)

    # iterate over the list of time intervals
    for i, (start_time, end_time) in enumerate(intervals):
        # extract the segment of the audio
        segment = audio[start_time*1000:end_time*1000]
        output_file_path_i = f"increased_{i}.wav"
        
        if len(segment) < 5000:
            # Calculate how many times to repeat the audio to make it at least 5 seconds long
            repeat_count = (5000 // len(segment)) + 3
            # Repeat the audio
            longer_audio = segment * repeat_count
            # Save the extended audio
            print(f"Audio was less than 5 seconds. Extended to {len(longer_audio)} milliseconds.")
            longer_audio.export(output_file_path_i, format='wav')
            vad(f"{output_file_path_i}", f"{output_file_path}_{i}")
        else:
            print("Audio is already 5 seconds or longer.")
            segment.export(f"{output_file_path}_{i}.wav", format='wav')

import re

def sort_key(file_name):
    """Extract the last number in the file name for sorting."""
    numbers = re.findall(r'\d+', file_name)
    if numbers:
        return int(numbers[-1])
    return -1  # In case there's no number, this ensures it goes to the start.


def merge_audios(folder_path):
    output_file = "AI配音版.wav"
    # Get all WAV files in the folder
    files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
    # Sort files based on the last digit in their names
    sorted_files = sorted(files, key=sort_key)
    
    # Initialize an empty audio segment
    merged_audio = AudioSegment.empty()
    
    # Loop through each file, in order, and concatenate them
    for file in sorted_files:
        audio = AudioSegment.from_wav(os.path.join(folder_path, file))
        merged_audio += audio
        print(f"Merged: {file}")
    
    # Export the merged audio to a new file
    merged_audio.export(output_file, format="wav")
    return "AI配音版.wav"

import shutil

def convert_from_srt(apikey, filename, audio_full, voice, multilingual):
    subtitle_list = read_srt(filename)
    
    #audio_data, sr = librosa.load(audio_full, sr=44100)
        
    #write("audio_full.wav", sr, audio_data.astype(np.int16))

    if os.path.isdir("output"):
        shutil.rmtree("output")
    if multilingual==False:
        for i in subtitle_list:
            try:
                os.makedirs("output", exist_ok=True)
                trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
                print(f"正在合成第{i.index}条语音")
                print(f"语音内容:{i.text}")
                convert(apikey, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
            except Exception:
                pass
    else:
        for i in subtitle_list:
            try:
                os.makedirs("output", exist_ok=True)
                trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
                print(f"正在合成第{i.index}条语音")
                print(f"语音内容:{i.text.splitlines()[1]}")
                convert(apikey, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
            except Exception:
                pass
    merge_audios("output")
    
    return "AI配音版.wav"
    
restart_markdown = ("""
### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
""")

all_voices = voices() 

with gr.Blocks() as app:
    gr.Markdown("# <center>🌊💕🎶 11Labs TTS - SRT文件一键AI配音</center>")
    gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
    with gr.Row():
        with gr.Column():
            inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
            inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
            inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")

            inp3 = gr.Dropdown(choices=[ voice.name for voice in all_voices ], label='请选择一个说话人提供基础音色', info="试听音色链接:https://huggingface.co/spaces/elevenlabs/tts", value='Rachel')
            #inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
            inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
            btn = gr.Button("一键开启AI配音吧💕", variant="primary")
        with gr.Column():
            out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
    
        btn.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1])
    gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
    gr.HTML('''
        <div class="footer">
                    <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
                    </p>
        </div>
    ''')

app.launch(share=True, show_error=True)