import os import time import json import gradio as gr import torch import torchaudio import numpy as np from denoiser.demucs import Demucs from pydub import AudioSegment modelpath = './denoiser/master64.th' def transcribe(file_upload, microphone): file = microphone if microphone is not None else file_upload model = Demucs(hidden=64) state_dict = torch.load(modelpath, map_location='cpu') model.load_state_dict(state_dict) demucs = model x, sr = torchaudio.load(file) out = demucs(x[None])[0] out = out / max(out.abs().max().item(), 1) torchaudio.save('enhanced.wav', out, sr) enhanced = AudioSegment.from_wav('enhanced.wav') # 只有去完噪的需要降 bitrate 再做語音識別 enhanced.export('enhanced.wav', format="wav", bitrate="256k") return "enhanced.wav" demo = gr.Interface( fn=transcribe, inputs=[ gr.Audio(type="filepath", label="語音質檢原始音檔"), ], outputs=gr.Audio(type="filepath", label="Output"), title="