Spaces:
Running
Running
import gradio as gr | |
import google.generativeai as genai | |
from pathlib import Path | |
import tempfile | |
import os | |
def summarize_video(video_path): | |
if video_path is None: | |
return "Please upload a video file." | |
try: | |
# Since Gradio passes the path as a string, we can use it directly | |
# Create the prompt | |
prompt = "Based on what is happening in the video generate a script that could be used for a voiceover. It;s a tutorial video so generate it accordingly to what is happening in the video. Refer to the actual text in the code and the terminal output to generate the description" | |
# Set up the model | |
model = genai.GenerativeModel(model_name="models/gemini-1.5-pro") | |
# Make the LLM request | |
print("Making LLM inference request...") | |
response = model.generate_content([prompt, video_path], | |
request_options={"timeout": 2000}) | |
return response.text | |
except Exception as e: | |
return f"An error occurred: {str(e)}" | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=summarize_video, | |
inputs=gr.Video(label="Upload Video"), | |
outputs=gr.Textbox(label="Summary", lines=10), | |
title="Video Summarizer", | |
description="Upload a video to get an AI-generated summary using Gemini 1.5 Pro.", | |
examples=[], | |
cache_examples=False | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch(share=True) |