Letsur commited on
Commit
ce79f00
·
verified ·
1 Parent(s): d46fcfc

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +133 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ from typing_extensions import Literal
4
+
5
+ import gradio as gr
6
+ import tempfile
7
+ import numpy as np
8
+
9
+ from dotenv import load_dotenv
10
+ from elevenlabs.client import ElevenLabs
11
+ from elevenlabs import play, stream, save
12
+ from elevenlabs import Voice, VoiceSettings
13
+ from pydub import AudioSegment
14
+ from pydub.playback import play
15
+ import imageio_ffmpeg as ffmpeg
16
+ import requests
17
+ from audiostretchy.stretch import AudioStretch
18
+
19
+
20
+ load_dotenv()
21
+
22
+ def verify_auth(username, password):
23
+ if username == USER and password == PASSWORD:
24
+
25
+ return True
26
+ else:
27
+ return False
28
+
29
+ ELE_API_KEY = os.getenv("ELE_API_KEY")
30
+ USER = os.getenv("USER")
31
+ PASSWORD = os.getenv("PASSWORD")
32
+
33
+ MODEL = "eleven_multilingual_v2"
34
+
35
+ ele_client = ElevenLabs(api_key=ELE_API_KEY)
36
+
37
+ VOICE = [
38
+ "승현",
39
+ "우승"
40
+ ]
41
+
42
+ KEY_MAPPING = {
43
+ "승현": "0RBbbgk6KUJxHmWzPiHz", # 승현+제시카(2:1)
44
+ "우승": "ASwOiisDbuaP2R1jUQU6", # 우승+TTS_KKC(1:1)
45
+ }
46
+
47
+
48
+ AudioSegment.converter = ffmpeg.get_ffmpeg_exe()
49
+
50
+
51
+ def change_pitch(audio_segment, pitch_shift):
52
+ new_sample_rate = int(audio_segment.frame_rate * (2.0 ** pitch_shift))
53
+ pitched_audio = audio_segment._spawn(audio_segment.raw_data, overrides={'frame_rate': new_sample_rate})
54
+ return pitched_audio.set_frame_rate(audio_segment.frame_rate)
55
+
56
+
57
+ def predict(
58
+ text: str,
59
+ voice: str,
60
+ output_file_format: Literal["mp3"] = "",
61
+ speed: float = 1.0,
62
+ pitch_shift: float = 0.0,
63
+ stability: float = 0.5,
64
+ similarity: float = 0.7,
65
+ style_exaggeration: float = 0.,
66
+ speaker_boost: bool = True
67
+ ):
68
+
69
+ try:
70
+ voice_setup=Voice(
71
+ voice_id=KEY_MAPPING[voice],
72
+ settings=VoiceSettings(stability=stability, similarity_boost=similarity, style=style_exaggeration, use_speaker_boost=speaker_boost)
73
+ )
74
+
75
+ audio = ele_client.generate(
76
+ text = text,
77
+ voice = voice_setup,
78
+ model=MODEL
79
+ )
80
+ audio_data = b''.join(audio)
81
+
82
+ except Exception as e:
83
+ raise requests.exceptions.RequestException(f"An error occurred while generating speech. Please check your API key and come back try again. {str(e)}")
84
+
85
+ print(f"[Text] {text}")
86
+
87
+ audio_stretch = AudioStretch()
88
+ audio_stretch.open_mp3(io.BytesIO(audio_data))
89
+ audio_stretch.stretch(ratio=1/speed) # speed 0.5 -> 2.0\
90
+
91
+ # Export the final audio to a temporary file
92
+ with tempfile.NamedTemporaryFile(suffix=f".{output_file_format}", delete=False) as temp_file:
93
+ audio_stretch.save(path=temp_file.name)
94
+ audio = AudioSegment.from_file(temp_file.name)
95
+
96
+ # Adjust pitch if needed
97
+ if pitch_shift != 0.0:
98
+ audio = change_pitch(audio, pitch_shift)
99
+
100
+ audio.export(temp_file.name, format=output_file_format)
101
+ temp_file_path = temp_file.name
102
+
103
+ return temp_file_path
104
+
105
+
106
+ with gr.Blocks() as demo:
107
+ gr.Markdown("# <center> Letsur Text-To-Speech API with Gradio </center>")
108
+ with gr.Row(variant="panel"):
109
+ voice = gr.Dropdown(choices=VOICE, label="Voice Options", value="승현")
110
+ output_file_format = gr.Dropdown(choices=["mp3"], label="Output Options", value="mp3")
111
+
112
+ text = gr.Textbox(label="Input text",
113
+ value="안녕하세요.",
114
+ placeholder="안녕하세요.")
115
+
116
+ # Additional parameters
117
+ with gr.Accordion("Advanced Settings", open=False):
118
+ speed = gr.Slider(label="speed", minimum=0.8, maximum=1.2, step=0.1, value=1.0)
119
+ pitch_shift = gr.Slider(label="pitch_shift", minimum=-0.1, maximum=0.1, step=0.05, value=0.0) # 범위: 0~1
120
+ stability = gr.Slider(label="stability", minimum=0., maximum=1., step=0.1, value=1.0) # 범위: 0~1
121
+ similarity = gr.Slider(label="similarity", minimum=0., maximum=1., step=0.1, value=1.0) # 범위: 0~1
122
+ style_exaggeration = gr.Slider(label="style_exaggeration", minimum=0., maximum=1., step=0.1, value=0.) # 범위: 0~1
123
+ speaker_boost = gr.Checkbox(label="speaker_boost", value=True) # True or False
124
+
125
+ btn = gr.Button("Text-To-Speech")
126
+ output_audio = gr.Audio(label="Speech Output")
127
+
128
+ inputs = [text, voice, output_file_format] + [speed, pitch_shift, stability, similarity, style_exaggeration, speaker_boost]
129
+
130
+ text.submit(fn=predict, inputs=inputs, outputs=output_audio, api_name="predict")
131
+ btn.click(fn=predict, inputs=inputs, outputs=output_audio, api_name=False)
132
+
133
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio==4.36.1
2
+ python-dotenv==1.0.0
3
+ typing_extensions==4.8.0
4
+ pydub==0.25.1
5
+ audiostretchy==1.3.5
6
+ elevenlabs
7
+ ffprobe
8
+ imageio[ffmpeg]
9
+ numpy==1.26.4