LAP-DEV commited on
Commit
7c85ef1
·
verified ·
1 Parent(s): c1a2fef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -97,19 +97,15 @@ class App:
97
 
98
  with gr.Row():
99
  with gr.Column(scale=1):
100
- with gr.Row():
101
- input_multi = gr.Radio(["Audio", "Video", "Multiple"], label="Process one or multiple files", value="Audio")
102
- dd_file_format = gr.Dropdown(choices=["CSV","SRT","TXT"], value=whisper_params["output_format"], label="Output format", multiselect=True, interactive=True, visible=True)
103
- with gr.Row():
104
- cb_timestamp_preview = gr.Checkbox(value=whisper_params["add_timestamp_preview"],label="Add timestamp to preview", interactive=True)
105
- cb_timestamp_file = gr.Checkbox(value=whisper_params["add_timestamp_file"], label="Add timestamp to output", interactive=True)
106
  with gr.Column(scale=4):
107
  input_file_audio = gr.Audio(type='filepath', elem_id="audio_input", show_download_button=True, visible=True, interactive=True)
108
  input_file_video = gr.Video(elem_id="video_input", show_download_button=True, visible=False, interactive=True)
109
  input_file_multi = gr.Files(label="Upload one or more audio/video files here", elem_id="multi_input", type='filepath', file_count="multiple", allow_reordering=True, file_types=["audio","video"], visible=False, interactive=True)
110
 
111
  with gr.Row():
112
- with gr.Column(scale=4):
113
  with gr.Row():
114
  model_list = self.whisper_inf.available_models if len(whisper_params["model_list"]) == 0 else whisper_params["model_list"]
115
  dd_model = gr.Dropdown(choices=model_list, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
@@ -119,10 +115,13 @@ class App:
119
  dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
120
  with gr.Column(scale=1):
121
  with gr.Row():
122
- cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Speaker diarization", info="Token required for diarization",interactive=True)
 
123
  with gr.Row():
124
- cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
125
- cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True)
 
 
126
 
127
  with gr.Accordion("Diarization options", open=False, visible=True):
128
  tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).")
 
97
 
98
  with gr.Row():
99
  with gr.Column(scale=1):
100
+ input_multi = gr.Radio(["Audio", "Video", "Multiple"], label="Process one or multiple files", value="Audio")
101
+ dd_file_format = gr.Dropdown(choices=["CSV","SRT","TXT"], value=whisper_params["output_format"], label="Output format", multiselect=True, interactive=True, visible=True)
 
 
 
 
102
  with gr.Column(scale=4):
103
  input_file_audio = gr.Audio(type='filepath', elem_id="audio_input", show_download_button=True, visible=True, interactive=True)
104
  input_file_video = gr.Video(elem_id="video_input", show_download_button=True, visible=False, interactive=True)
105
  input_file_multi = gr.Files(label="Upload one or more audio/video files here", elem_id="multi_input", type='filepath', file_count="multiple", allow_reordering=True, file_types=["audio","video"], visible=False, interactive=True)
106
 
107
  with gr.Row():
108
+ with gr.Column(scale=3):
109
  with gr.Row():
110
  model_list = self.whisper_inf.available_models if len(whisper_params["model_list"]) == 0 else whisper_params["model_list"]
111
  dd_model = gr.Dropdown(choices=model_list, value=whisper_params["model_size"],label="Model", info="Larger models increase transcription quality, but reduce performance", interactive=True)
 
115
  dd_target_lang = gr.Dropdown(choices=["English","Dutch","French","German"], value=nllb_params["target_lang"],label="Language", info="Language used for output translation", interactive=True)
116
  with gr.Column(scale=1):
117
  with gr.Row():
118
+ cb_timestamp_preview = gr.Checkbox(value=whisper_params["add_timestamp_preview"],label="Add timestamp to preview", interactive=True)
119
+ cb_timestamp_file = gr.Checkbox(value=whisper_params["add_timestamp_file"], label="Add timestamp to output", interactive=True)
120
  with gr.Row():
121
+ cb_diarize = gr.Checkbox(value=diarization_params["is_diarize"],label="Speaker diarization", info="Token required for diarization",interactive=True)
122
+ with gr.Column(scale=1):
123
+ cb_translate = gr.Checkbox(value=whisper_params["is_translate"], label="Translate to English", info="Translate using OpenAI Whisper's built-in module",interactive=True)
124
+ cb_translate_output = gr.Checkbox(value=translation_params["translate_output"], label="Translate to selected language", info="Translate using Facebook's NLLB",interactive=True)
125
 
126
  with gr.Accordion("Diarization options", open=False, visible=True):
127
  tb_hf_token = gr.Text(label="Token", value=diarization_params["hf_token"],info="An access token is required to use diarization & can be created [here](https://hf.co/settings/tokens). If not done yet for your account, you need to accept the terms & conditions of [diarization](https://huggingface.co/pyannote/speaker-diarization-3.1) & [segmentation](https://huggingface.co/pyannote/segmentation-3.0).")