import streamlit as st #
import moviepy.editor as mp
import speech_recognition as sr
from pydub import AudioSegment
import tempfile
import os
import io
from transformers import pipeline
import matplotlib.pyplot as plt

# Function to convert video to audio
def video_to_audio(video_file):
    # Load the video using moviepy
    video = mp.VideoFileClip(video_file)
    
    # Extract audio
    audio = video.audio
    temp_audio_path = tempfile.mktemp(suffix=".mp3")
    
    # Write the audio to a file
    audio.write_audiofile(temp_audio_path)
    return temp_audio_path

# Function to convert MP3 audio to WAV
def convert_mp3_to_wav(mp3_file):
    # Load the MP3 file using pydub
    audio = AudioSegment.from_mp3(mp3_file)
    
    # Create a temporary WAV file
    temp_wav_path = tempfile.mktemp(suffix=".wav")
    
    # Export the audio to the temporary WAV file
    audio.export(temp_wav_path, format="wav")
    return temp_wav_path

# Function to transcribe audio to text
def transcribe_audio(audio_file):
    # Initialize recognizer
    recognizer = sr.Recognizer()
    
    # Load the audio file using speech_recognition
    audio = sr.AudioFile(audio_file)
    
    with audio as source:
        audio_data = recognizer.record(source)
    
    try:
        # Transcribe the audio data to text using Google Web Speech API
        text = recognizer.recognize_google(audio_data)
        return text
    except sr.UnknownValueError:
        return "Audio could not be understood."
    except sr.RequestError:
        return "Could not request results from Google Speech Recognition service."

# Function to perform emotion detection using Hugging Face transformers
def detect_emotion(text):
    # Load emotion detection pipeline
    emotion_pipeline = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)
    
    # Get the emotion predictions
    result = emotion_pipeline(text)
    
    # Extract the emotion with the highest score
    emotions = {emotion['label']: emotion['score'] for emotion in result[0]}
    return emotions

# Streamlit app layout
st.title("Video and Audio to Text Transcription with Emotion Detection and Visualization")
st.write("Upload a video or audio file to convert it to transcription, detect emotions, and visualize the audio waveform.")

# Create tabs to separate video and audio uploads
tab = st.selectbox("Select the type of file to upload", ["Video", "Audio"])

if tab == "Video":
    # File uploader for video
    uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"])

    if uploaded_video is not None:
        # Save the uploaded video file temporarily
        with tempfile.NamedTemporaryFile(delete=False) as tmp_video:
            tmp_video.write(uploaded_video.read())
            tmp_video_path = tmp_video.name

        # Add an "Analyze Video" button
        if st.button("Analyze Video"):
            with st.spinner("Processing video... Please wait."):

                # Convert video to audio
                audio_file = video_to_audio(tmp_video_path)
                
                # Convert the extracted MP3 audio to WAV
                wav_audio_file = convert_mp3_to_wav(audio_file)
                
                # Transcribe audio to text
                transcription = transcribe_audio(wav_audio_file)

                # Show the transcription
                st.text_area("Transcription", transcription, height=300)

                # Emotion detection
                emotions = detect_emotion(transcription)
                st.write(f"Detected Emotions: {emotions}")

                # Store transcription and audio file in session state
                st.session_state.transcription = transcription
                
                # Store the audio file as a BytesIO object in memory
                with open(wav_audio_file, "rb") as f:
                    audio_data = f.read()
                    st.session_state.wav_audio_file = io.BytesIO(audio_data)

                # Cleanup temporary files
                os.remove(tmp_video_path)
                os.remove(audio_file)

    # Check if transcription and audio file are stored in session state
    if 'transcription' in st.session_state and 'wav_audio_file' in st.session_state:
        # Provide the audio file to the user for download
        st.audio(st.session_state.wav_audio_file, format='audio/wav')
        
        # Add download buttons for the transcription and audio
        # Downloadable transcription file
        st.download_button(
            label="Download Transcription",
            data=st.session_state.transcription,
            file_name="transcription.txt",
            mime="text/plain"
        )
        
        # Downloadable audio file
        st.download_button(
            label="Download Audio",
            data=st.session_state.wav_audio_file,
            file_name="converted_audio.wav",
            mime="audio/wav"
        )

elif tab == "Audio":
    # File uploader for audio
    uploaded_audio = st.file_uploader("Upload Audio", type=["wav", "mp3"])

    if uploaded_audio is not None:
        # Save the uploaded audio file temporarily
        with tempfile.NamedTemporaryFile(delete=False) as tmp_audio:
            tmp_audio.write(uploaded_audio.read())
            tmp_audio_path = tmp_audio.name

        # Add an "Analyze Audio" button
        if st.button("Analyze Audio"):
            with st.spinner("Processing audio... Please wait."):

                # Convert audio to WAV if it's in MP3 format
                if uploaded_audio.type == "audio/mpeg":
                    wav_audio_file = convert_mp3_to_wav(tmp_audio_path)
                else:
                    wav_audio_file = tmp_audio_path
                
                # Transcribe audio to text
                transcription = transcribe_audio(wav_audio_file)

                # Show the transcription
                st.text_area("Transcription", transcription, height=300)

                # Emotion detection
                emotions = detect_emotion(transcription)
                st.write(f"Detected Emotions: {emotions}")

                # Store transcription in session state
                st.session_state.transcription_audio = transcription
                
                # Store the audio file as a BytesIO object in memory
                with open(wav_audio_file, "rb") as f:
                    audio_data = f.read()
                    st.session_state.wav_audio_file_audio = io.BytesIO(audio_data)

                # Cleanup temporary audio file
                os.remove(tmp_audio_path)

        # Check if transcription and audio file are stored in session state
        if 'transcription_audio' in st.session_state and 'wav_audio_file_audio' in st.session_state:
            # Provide the audio file to the user for download
            st.audio(st.session_state.wav_audio_file_audio, format='audio/wav')
            
            # Add download buttons for the transcription and audio
            # Downloadable transcription file
            st.download_button(
                label="Download Transcription",
                data=st.session_state.transcription_audio,
                file_name="transcription_audio.txt",
                mime="text/plain"
            )
            
            # Downloadable audio file
            st.download_button(
                label="Download Audio",
                data=st.session_state.wav_audio_file_audio,
                file_name="converted_audio_audio.wav",
                mime="audio/wav"
            )