Spaces:
Running
Running
| # Copyright 2022-2024 Xiaomi Corp. (authors: Fangjun Kuang) | |
| # | |
| # See LICENSE for clarification regarding multiple authors | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import wave | |
| from functools import lru_cache | |
| from typing import Tuple, List | |
| import numpy as np | |
| import sherpa_onnx | |
| from huggingface_hub import hf_hub_download | |
| sample_rate = 16000 | |
| def read_wave(wave_filename: str) -> Tuple[np.ndarray, int]: | |
| """ | |
| Args: | |
| wave_filename: | |
| Path to a wave file. It should be single channel and each sample should | |
| be 16-bit. Its sample rate does not need to be 16kHz. | |
| Returns: | |
| Return a tuple containing: | |
| - A 1-D array of dtype np.float32 containing the samples, which are | |
| normalized to the range [-1, 1]. | |
| - sample rate of the wave file | |
| """ | |
| with wave.open(wave_filename) as f: | |
| assert f.getnchannels() == 1, f.getnchannels() | |
| assert f.getsampwidth() == 2, f.getsampwidth() # it is in bytes | |
| num_samples = f.getnframes() | |
| samples = f.readframes(num_samples) | |
| samples_int16 = np.frombuffer(samples, dtype=np.int16) | |
| samples_float32 = samples_int16.astype(np.float32) | |
| samples_float32 = samples_float32 / 32768 | |
| return samples_float32, f.getframerate() | |
| def decode( | |
| tagger: sherpa_onnx.AudioTagging, | |
| filename: str, | |
| top_k: int = -1, | |
| ) -> List[sherpa_onnx.AudioEvent]: | |
| s = tagger.create_stream() | |
| samples, sample_rate = read_wave(filename) | |
| s.accept_waveform(sample_rate, samples) | |
| events = tagger.compute(s, top_k) | |
| return events | |
| def _get_nn_model_filename( | |
| repo_id: str, | |
| filename: str, | |
| subfolder: str = ".", | |
| ) -> str: | |
| nn_model_filename = hf_hub_download( | |
| repo_id=repo_id, | |
| filename=filename, | |
| subfolder=subfolder, | |
| ) | |
| return nn_model_filename | |
| def get_pretrained_model(repo_id: str) -> sherpa_onnx.AudioTagging: | |
| assert repo_id in ( | |
| "k2-fsa/sherpa-onnx-zipformer-small-audio-tagging-2024-04-15", | |
| "k2-fsa/sherpa-onnx-zipformer-audio-tagging-2024-04-09", | |
| ), repo_id | |
| model = _get_nn_model_filename( | |
| repo_id=repo_id, | |
| filename="model.int8.onnx", | |
| ) | |
| labels = _get_nn_model_filename( | |
| repo_id=repo_id, | |
| filename="class_labels_indices.csv", | |
| ) | |
| config = sherpa_onnx.AudioTaggingConfig( | |
| model=sherpa_onnx.AudioTaggingModelConfig( | |
| zipformer=sherpa_onnx.OfflineZipformerAudioTaggingModelConfig( | |
| model=model, | |
| ), | |
| num_threads=1, | |
| debug=True, | |
| provider="cpu", | |
| ), | |
| labels=labels, | |
| top_k=5, | |
| ) | |
| return sherpa_onnx.AudioTagging(config) | |
| models = { | |
| "k2-fsa/sherpa-onnx-zipformer-audio-tagging-2024-04-09": get_pretrained_model, | |
| "k2-fsa/sherpa-onnx-zipformer-small-audio-tagging-2024-04-15": get_pretrained_model, | |
| } | |