Update app.py
Browse files
app.py
CHANGED
@@ -105,7 +105,7 @@ class App:
|
|
105 |
input_file_multi = gr.Files(label="Upload one or more audio/video files here", elem_id="multi_input", type='filepath', file_count="multiple", allow_reordering=True, file_types=["audio","video"], visible=False, interactive=True)
|
106 |
|
107 |
with gr.Row():
|
108 |
-
with gr.Column(scale=
|
109 |
with gr.Row():
|
110 |
model_list = self.whisper_inf.available_models if len(whisper_params["model_list"]) == 0 else whisper_params["model_list"]
|
111 |
dd_model = gr.Dropdown(choices=model_list, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
|
@@ -114,12 +114,16 @@ class App:
|
|
114 |
dd_translate_model = gr.Dropdown(choices=self.nllb_inf.available_models, value=nllb_params["model_size"],label="Model", info="Model used for translation", interactive=True)
|
115 |
dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
|
116 |
with gr.Column(scale=1):
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
120 |
cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
|
121 |
cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True)
|
122 |
-
|
|
|
123 |
with gr.Accordion("Diarization options", open=False, visible=True):
|
124 |
tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).")
|
125 |
dd_diarization_device = gr.Dropdown(label="Device",
|
|
|
105 |
input_file_multi = gr.Files(label="Upload one or more audio/video files here", elem_id="multi_input", type='filepath', file_count="multiple", allow_reordering=True, file_types=["audio","video"], visible=False, interactive=True)
|
106 |
|
107 |
with gr.Row():
|
108 |
+
with gr.Column(scale=5):
|
109 |
with gr.Row():
|
110 |
model_list = self.whisper_inf.available_models if len(whisper_params["model_list"]) == 0 else whisper_params["model_list"]
|
111 |
dd_model = gr.Dropdown(choices=model_list, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
|
|
|
114 |
dd_translate_model = gr.Dropdown(choices=self.nllb_inf.available_models, value=nllb_params["model_size"],label="Model", info="Model used for translation", interactive=True)
|
115 |
dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
|
116 |
with gr.Column(scale=1):
|
117 |
+
with gr.Row():
|
118 |
+
cb_timestamp_preview = gr.Checkbox(value=whisper_params["add_timestamp_preview"],label="Add timestamp to preview", interactive=True)
|
119 |
+
cb_timestamp_file = gr.Checkbox(value=whisper_params["add_timestamp_file"], label="Add timestamp to output", interactive=True)
|
120 |
+
with gr.Row():
|
121 |
+
cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Speaker diarization", info="Token required for diarization",interactive=True)
|
122 |
+
with gr.Column(scale=1):
|
123 |
cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
|
124 |
cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True)
|
125 |
+
|
126 |
+
|
127 |
with gr.Accordion("Diarization options", open=False, visible=True):
|
128 |
tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).")
|
129 |
dd_diarization_device = gr.Dropdown(label="Device",
|