DeepLearning101 commited on
Commit
0075f67
·
verified ·
1 Parent(s): 00885ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -23
app.py CHANGED
@@ -9,43 +9,48 @@ from denoiser.demucs import Demucs
9
  from pydub import AudioSegment
10
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
11
 
12
- # 設置 Hugging Face Hub 的 Access Token
13
- auth_token = os.getenv("HF_TOKEN")
14
 
15
- # 加載私有模型
16
- model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
17
- model = AutoModelForSequenceClassification.from_pretrained(model_id, token=auth_token)
18
- tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
 
 
19
 
20
  def transcribe(file_upload, microphone):
21
  file = microphone if microphone is not None else file_upload
22
- demucs_model = Demucs(hidden=64)
23
- state_dict = torch.load("path_to_model_checkpoint", map_location='cpu') # 請確保提供正確的模型文件路徑
24
- demucs_model.load_state_dict(state_dict)
 
25
  x, sr = torchaudio.load(file)
26
- out = demucs_model(x[None])[0]
27
  out = out / max(out.abs().max().item(), 1)
28
  torchaudio.save('enhanced.wav', out, sr)
29
- enhanced = AudioSegment.from_wav('enhanced.wav') # 只有去完噪的需要降bitrate再做語音識別
30
  enhanced.export('enhanced.wav', format="wav", bitrate="256k")
 
31
 
32
- # 假設模型是用於文本分類
33
- inputs = tokenizer("enhanced.wav", return_tensors="pt")
34
- outputs = model(**inputs)
35
- predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
36
 
37
- return "enhanced.wav", predictions
38
 
39
  demo = gr.Interface(
40
  fn=transcribe,
41
  inputs=[
42
- gr.Audio(type="filepath", label="語音質檢麥克風實時錄音"),
43
- gr.Audio(type="filepath", label="語音質檢原始音檔"),
44
- ],
45
- outputs=[
46
- gr.Audio(type="filepath", label="Output"),
47
- gr.Textbox(label="Model Predictions")
48
  ],
 
 
 
 
 
49
  title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>語音質檢噪音去除 (語音增強):Meta Denoiser</a>",
50
  description="為了提升語音識別的效果,可以在識別前先進行噪音去除",
51
  allow_flagging="never",
@@ -57,4 +62,4 @@ demo = gr.Interface(
57
  ],
58
  )
59
 
60
- demo.launch(debug=True)
 
9
  from pydub import AudioSegment
10
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
11
 
12
+ # # 設置 Hugging Face Hub 的 Access Token
13
+ # auth_token = os.getenv("HF_TOKEN")
14
 
15
+ # # 加載私有模型
16
+ # model_id = "DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser"
17
+ # model = AutoModelForSequenceClassification.from_pretrained(model_id, token=auth_token)
18
+ # tokenizer = AutoTokenizer.from_pretrained(model_id, token=auth_token)
19
+
20
+ modelpath = './denoiser/master64.th'
21
 
22
  def transcribe(file_upload, microphone):
23
  file = microphone if microphone is not None else file_upload
24
+ model = Demucs(hidden=64)
25
+ state_dict = torch.load(modelpath, map_location='cpu')
26
+ model.load_state_dict(state_dict)
27
+ demucs = model
28
  x, sr = torchaudio.load(file)
29
+ out = demucs(x[None])[0]
30
  out = out / max(out.abs().max().item(), 1)
31
  torchaudio.save('enhanced.wav', out, sr)
32
+ enhanced = AudioSegment.from_wav('enhanced.wav') #只有去完噪的需要降bitrate再做語音識別
33
  enhanced.export('enhanced.wav', format="wav", bitrate="256k")
34
+ return "enhanced.wav"
35
 
36
+ # # 假設模型是用於文本分類
37
+ # inputs = tokenizer("enhanced.wav", return_tensors="pt")
38
+ # outputs = model(**inputs)
39
+ # predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
40
 
41
+ # return "enhanced.wav", predictions
42
 
43
  demo = gr.Interface(
44
  fn=transcribe,
45
  inputs=[
46
+ gr.Audio(source="microphone", type="filepath", optional=True, label="語音質檢麥克風實時錄音"),
47
+ gr.Audio(source="upload", type="filepath", optional=True, label="語音質檢原始音檔"),
 
 
 
 
48
  ],
49
+ outputs=gr.Audio(type="filepath", label="Output"),
50
+ # outputs=[
51
+ # gr.Audio(type="filepath", label="Output"),
52
+ # gr.Textbox(label="Model Predictions")
53
+ # ],
54
  title="<p style='text-align: center'><a href='https://www.twman.org/AI' target='_blank'>語音質檢噪音去除 (語音增強):Meta Denoiser</a>",
55
  description="為了提升語音識別的效果,可以在識別前先進行噪音去除",
56
  allow_flagging="never",
 
62
  ],
63
  )
64
 
65
+ demo.launch(enable_queue=True, debug=True)