File size: 3,329 Bytes
de60a6a
458da1c
a0110cc
 
 
eb0bc41
0e08ca7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3097eb
0e08ca7
a0110cc
 
 
 
 
77121d6
0e08ca7
 
 
77121d6
a0110cc
 
9a8d907
6af81aa
77121d6
458da1c
6af81aa
77121d6
d0b2fc8
0e08ca7
d0b2fc8
5a86410
0e08ca7
 
 
 
 
 
 
 
 
 
 
77121d6
5a86410
0e08ca7
 
 
 
 
 
 
 
 
 
 
 
5a86410
0e08ca7
ed72842
0e08ca7
a0110cc
 
0e08ca7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
import os
import soundfile as sf

def main():
    # Gradio Interface
    with gr.Blocks() as app:
        gr.Markdown(
            """
            # <div align="center"> Ilaria Audio Analyzer 💖 (BETA) </div>
            Audio Analyzer Software by Ilaria, Help me on [Ko-Fi](https://ko-fi.com/ilariaowo)\n
            Special thanks to [Alex Murkoff](https://github.com/alexlnkp) for helping me coding it!
    
            Need help with AI? [Join AI Hub!](https://discord.gg/aihub)
            """
        )
    
        with gr.Row():
            with gr.Column():
                audio_input = gr.Audio(type='filepath')
                create_spec_butt = gr.Button(value='Create Spectrogram And Get Info', variant='primary')
            with gr.Column():
                output_markdown = gr.Markdown(value="", visible=True)
                image_output = gr.Image(type='filepath', interactive=False)
    
        create_spec_butt.click(fn=create_spectrogram_and_get_info, inputs=[audio_input], outputs=[output_markdown, image_output])
        audio_input.change(fn=lambda: ({"value": "", "__type__": "update"},
                                       {"value": "", "__type__": "update"}),
                           inputs=[], outputs=[image_output, output_markdown])
        
        app.queue(max_size=1022).launch(share=True)

def create_spectrogram_and_get_info(audio_file):
    # Clear figure in case it has data in it
    plt.clf()
    
    # Read the audio data from the file
    audio_data, sample_rate = sf.read(audio_file)

    # Convert to mono if it's not mono
    if len(audio_data.shape) > 1:
        audio_data = np.mean(audio_data, axis=1)

    # Create the spectrogram
    plt.specgram(audio_data, Fs=sample_rate / 1, NFFT=4096, sides='onesided',
                 cmap="Reds_r", scale_by_freq=True, scale='dB', mode='magnitude')

    # Save the spectrogram to a PNG file
    plt.savefig('spectrogram.png')

    # Get the audio file info
    audio_info = sf.info(audio_file)
    
    bit_depth = {'PCM_16': 16, 'FLOAT': 32}.get(audio_info.subtype, 0)
    
    # Convert duration to minutes, seconds, and milliseconds
    minutes, seconds = divmod(audio_info.duration, 60)
    seconds, milliseconds = divmod(seconds, 1)
    milliseconds *= 1000  # convert from seconds to milliseconds
    
    # Convert bitrate to mb/s
    bitrate = audio_info.samplerate * audio_info.channels * bit_depth / 8 / 1024 / 1024
    
    # Calculate speed in kbps
    speed_in_kbps = audio_info.samplerate * bit_depth / 1000
    
    # Create a table with the audio file info
    info_table = f"""
    <center>
    
    | Information | Value |
    | :---: | :---: |
    | File Name | {os.path.basename(audio_file)} |
    | Duration | {int(minutes)} minutes - {int(seconds)} seconds - {int(milliseconds)} milliseconds |
    | Bitrate | {speed_in_kbps} kbp/s |
    | Audio Channels | {audio_info.channels} |
    | Samples per second | {audio_info.samplerate} Hz |
    | Bit per second | {audio_info.samplerate * audio_info.channels * bit_depth} bit/s |
    
    </center>
    """
    
    # Return the PNG file of the spectrogram and the info table
    return {"value": info_table, "__type__": "update"}, 'spectrogram.png'

# Create the Gradio interface
main()