Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,12 @@ import os
|
|
4 |
import time
|
5 |
import torch
|
6 |
from scipy.io import wavfile
|
|
|
7 |
import datasets
|
8 |
|
9 |
# Bark imports
|
10 |
from bark import generate_audio, SAMPLE_RATE
|
11 |
-
from bark.generation import preload_models
|
12 |
|
13 |
# Hugging Face Transformers
|
14 |
from transformers import (
|
@@ -24,6 +25,9 @@ class VoiceSynthesizer:
|
|
24 |
self.working_dir = os.path.join(self.base_dir, "working_files")
|
25 |
os.makedirs(self.working_dir, exist_ok=True)
|
26 |
|
|
|
|
|
|
|
27 |
# Initialize models dictionary
|
28 |
self.models = {
|
29 |
"bark": self._initialize_bark,
|
@@ -41,6 +45,38 @@ class VoiceSynthesizer:
|
|
41 |
except Exception as e:
|
42 |
print(f"Bark model loading error: {e}")
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
def _initialize_bark(self):
|
45 |
"""Bark model initialization (already done in __init__)"""
|
46 |
return None
|
@@ -67,12 +103,6 @@ class VoiceSynthesizer:
|
|
67 |
print(f"SpeechT5 model loading error: {e}")
|
68 |
return None
|
69 |
|
70 |
-
def set_model(self, model_name):
|
71 |
-
"""Set the current model for speech synthesis"""
|
72 |
-
if model_name not in self.models:
|
73 |
-
raise ValueError(f"Model {model_name} not supported")
|
74 |
-
self.current_model = model_name
|
75 |
-
|
76 |
def generate_speech(self, text, model_name=None, voice_preset=None):
|
77 |
"""Generate speech using selected model"""
|
78 |
if not text or not text.strip():
|
@@ -97,21 +127,34 @@ class VoiceSynthesizer:
|
|
97 |
|
98 |
def _generate_bark_speech(self, text, voice_preset=None):
|
99 |
"""Generate speech using Bark"""
|
100 |
-
#
|
101 |
voice_presets = [
|
102 |
"v2/en_speaker_6", # Female
|
103 |
"v2/en_speaker_3", # Male
|
104 |
"v2/en_speaker_9", # Neutral
|
105 |
]
|
106 |
|
107 |
-
#
|
108 |
-
history_prompt =
|
109 |
|
110 |
-
#
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
# Save generated audio
|
117 |
filename = f"bark_speech_{int(time.time())}.wav"
|
@@ -159,7 +202,13 @@ def create_interface():
|
|
159 |
|
160 |
with gr.Row():
|
161 |
with gr.Column():
|
162 |
-
gr.Markdown("##
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
text_input = gr.Textbox(label="Enter Text to Speak")
|
164 |
|
165 |
# Model Selection
|
@@ -196,6 +245,13 @@ def create_interface():
|
|
196 |
audio_output = gr.Audio(label="Generated Speech")
|
197 |
error_output = gr.Textbox(label="Errors", visible=True)
|
198 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
# Dynamic model and preset visibility
|
200 |
def update_model_visibility(model):
|
201 |
if "bark" in model.lower():
|
|
|
4 |
import time
|
5 |
import torch
|
6 |
from scipy.io import wavfile
|
7 |
+
import soundfile as sf
|
8 |
import datasets
|
9 |
|
10 |
# Bark imports
|
11 |
from bark import generate_audio, SAMPLE_RATE
|
12 |
+
from bark.generation import preload_models, generate_text_semantic
|
13 |
|
14 |
# Hugging Face Transformers
|
15 |
from transformers import (
|
|
|
25 |
self.working_dir = os.path.join(self.base_dir, "working_files")
|
26 |
os.makedirs(self.working_dir, exist_ok=True)
|
27 |
|
28 |
+
# Store reference voice
|
29 |
+
self.reference_voice = None
|
30 |
+
|
31 |
# Initialize models dictionary
|
32 |
self.models = {
|
33 |
"bark": self._initialize_bark,
|
|
|
45 |
except Exception as e:
|
46 |
print(f"Bark model loading error: {e}")
|
47 |
|
48 |
+
def process_reference_audio(self, reference_audio):
|
49 |
+
"""Process and store reference audio for voice cloning"""
|
50 |
+
try:
|
51 |
+
# Ensure audio is in the right format
|
52 |
+
if reference_audio is None:
|
53 |
+
return "No audio provided"
|
54 |
+
|
55 |
+
# Convert to numpy array if needed
|
56 |
+
if isinstance(reference_audio, tuple):
|
57 |
+
reference_audio = reference_audio[0]
|
58 |
+
|
59 |
+
# Ensure the audio is mono and normalized
|
60 |
+
if reference_audio.ndim > 1:
|
61 |
+
reference_audio = reference_audio.mean(axis=1)
|
62 |
+
|
63 |
+
# Resample or trim if necessary
|
64 |
+
if len(reference_audio) > SAMPLE_RATE * 10: # Limit to 10 seconds
|
65 |
+
reference_audio = reference_audio[:SAMPLE_RATE * 10]
|
66 |
+
|
67 |
+
# Save reference audio
|
68 |
+
ref_filename = os.path.join(self.working_dir, "reference_voice.wav")
|
69 |
+
sf.write(ref_filename, reference_audio, SAMPLE_RATE)
|
70 |
+
|
71 |
+
# Store reference voice
|
72 |
+
self.reference_voice = reference_audio
|
73 |
+
|
74 |
+
return "Reference voice processed successfully"
|
75 |
+
|
76 |
+
except Exception as e:
|
77 |
+
print(f"Reference audio processing error: {e}")
|
78 |
+
return f"Error processing reference audio: {str(e)}"
|
79 |
+
|
80 |
def _initialize_bark(self):
|
81 |
"""Bark model initialization (already done in __init__)"""
|
82 |
return None
|
|
|
103 |
print(f"SpeechT5 model loading error: {e}")
|
104 |
return None
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
def generate_speech(self, text, model_name=None, voice_preset=None):
|
107 |
"""Generate speech using selected model"""
|
108 |
if not text or not text.strip():
|
|
|
127 |
|
128 |
def _generate_bark_speech(self, text, voice_preset=None):
|
129 |
"""Generate speech using Bark"""
|
130 |
+
# Default Bark voice presets
|
131 |
voice_presets = [
|
132 |
"v2/en_speaker_6", # Female
|
133 |
"v2/en_speaker_3", # Male
|
134 |
"v2/en_speaker_9", # Neutral
|
135 |
]
|
136 |
|
137 |
+
# Prepare history prompt
|
138 |
+
history_prompt = None
|
139 |
|
140 |
+
# Check if a reference voice is available
|
141 |
+
if self.reference_voice is not None:
|
142 |
+
# Save reference voice for Bark
|
143 |
+
ref_filename = os.path.join(self.working_dir, "reference_voice.wav")
|
144 |
+
history_prompt = ref_filename
|
145 |
+
elif voice_preset:
|
146 |
+
# Use predefined voice preset
|
147 |
+
history_prompt = voice_presets[0] if "v2/en_speaker" not in voice_preset else voice_preset
|
148 |
+
|
149 |
+
# Generate audio with or without history prompt
|
150 |
+
if history_prompt:
|
151 |
+
audio_array = generate_audio(
|
152 |
+
text,
|
153 |
+
history_prompt=history_prompt
|
154 |
+
)
|
155 |
+
else:
|
156 |
+
# Fallback to default generation
|
157 |
+
audio_array = generate_audio(text)
|
158 |
|
159 |
# Save generated audio
|
160 |
filename = f"bark_speech_{int(time.time())}.wav"
|
|
|
202 |
|
203 |
with gr.Row():
|
204 |
with gr.Column():
|
205 |
+
gr.Markdown("## 1. Capture Reference Voice")
|
206 |
+
reference_audio = gr.Audio(sources=["microphone", "upload"], type="numpy")
|
207 |
+
process_ref_btn = gr.Button("Process Reference Voice")
|
208 |
+
process_ref_output = gr.Textbox(label="Reference Voice Processing")
|
209 |
+
|
210 |
+
with gr.Column():
|
211 |
+
gr.Markdown("## 2. Generate Speech")
|
212 |
text_input = gr.Textbox(label="Enter Text to Speak")
|
213 |
|
214 |
# Model Selection
|
|
|
245 |
audio_output = gr.Audio(label="Generated Speech")
|
246 |
error_output = gr.Textbox(label="Errors", visible=True)
|
247 |
|
248 |
+
# Process reference audio
|
249 |
+
process_ref_btn.click(
|
250 |
+
fn=synthesizer.process_reference_audio,
|
251 |
+
inputs=reference_audio,
|
252 |
+
outputs=process_ref_output
|
253 |
+
)
|
254 |
+
|
255 |
# Dynamic model and preset visibility
|
256 |
def update_model_visibility(model):
|
257 |
if "bark" in model.lower():
|