Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import gradio as gr
|
|
| 4 |
import torch
|
| 5 |
import transformers
|
| 6 |
import librosa
|
| 7 |
-
import cv2
|
| 8 |
import numpy as np
|
| 9 |
|
| 10 |
# Load models
|
|
@@ -63,46 +63,46 @@ def audio_to_video(input):
|
|
| 63 |
output = gr.outputs.Video.from_str(output)
|
| 64 |
return output
|
| 65 |
|
| 66 |
-
def image_to_text(input):
|
| 67 |
-
input = cv2.imread(input)
|
| 68 |
-
input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 69 |
-
input = np.expand_dims(input, axis=0)
|
| 70 |
-
output = image_model(input)
|
| 71 |
-
return output[0]["label"]
|
| 72 |
-
|
| 73 |
-
def image_to_audio(input):
|
| 74 |
-
input = cv2.imread(input)
|
| 75 |
-
input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 76 |
-
input = np.expand_dims(input, axis=0)
|
| 77 |
-
output = image_model(input)
|
| 78 |
-
output = gr.outputs.Audio.from_str(output[0]["label"])
|
| 79 |
-
return output
|
| 80 |
|
| 81 |
def image_to_image(input):
|
| 82 |
return input
|
| 83 |
|
| 84 |
-
def image_to_video(input):
|
| 85 |
-
input = cv2.imread(input)
|
| 86 |
-
input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 87 |
-
input = np.expand_dims(input, axis=0)
|
| 88 |
-
output = image_model(input)
|
| 89 |
-
output = gr.outputs.Video.from_str(output[0]["label"])
|
| 90 |
-
return output
|
| 91 |
-
|
| 92 |
-
def video_to_text(input):
|
| 93 |
-
input = cv2.VideoCapture(input)
|
| 94 |
-
frames = []
|
| 95 |
-
while input.isOpened():
|
| 96 |
-
ret, frame = input.read()
|
| 97 |
-
if ret:
|
| 98 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 99 |
-
frames.append(frame)
|
| 100 |
-
else:
|
| 101 |
-
breakpoint
|
| 102 |
-
input.release()
|
| 103 |
-
frames = np.array(frames)
|
| 104 |
-
output = video_model(frames)
|
| 105 |
-
return output[0]["label"]
|
| 106 |
|
| 107 |
#Output switches
|
| 108 |
|
|
|
|
| 4 |
import torch
|
| 5 |
import transformers
|
| 6 |
import librosa
|
| 7 |
+
#import cv2
|
| 8 |
import numpy as np
|
| 9 |
|
| 10 |
# Load models
|
|
|
|
| 63 |
output = gr.outputs.Video.from_str(output)
|
| 64 |
return output
|
| 65 |
|
| 66 |
+
#def image_to_text(input):
|
| 67 |
+
# input = cv2.imread(input)
|
| 68 |
+
# input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 69 |
+
# input = np.expand_dims(input, axis=0)
|
| 70 |
+
# output = image_model(input)
|
| 71 |
+
# return output[0]["label"]
|
| 72 |
+
|
| 73 |
+
#def image_to_audio(input):
|
| 74 |
+
# input = cv2.imread(input)
|
| 75 |
+
# input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 76 |
+
# input = np.expand_dims(input, axis=0)
|
| 77 |
+
# output = image_model(input)
|
| 78 |
+
# output = gr.outputs.Audio.from_str(output[0]["label"])
|
| 79 |
+
# return output
|
| 80 |
|
| 81 |
def image_to_image(input):
|
| 82 |
return input
|
| 83 |
|
| 84 |
+
#def image_to_video(input):
|
| 85 |
+
# input = cv2.imread(input)
|
| 86 |
+
# input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)
|
| 87 |
+
# input = np.expand_dims(input, axis=0)
|
| 88 |
+
# output = image_model(input)
|
| 89 |
+
# output = gr.outputs.Video.from_str(output[0]["label"])
|
| 90 |
+
# return output
|
| 91 |
+
|
| 92 |
+
#def video_to_text(input):
|
| 93 |
+
# input = cv2.VideoCapture(input)
|
| 94 |
+
# frames = []
|
| 95 |
+
# while input.isOpened():
|
| 96 |
+
# ret, frame = input.read()
|
| 97 |
+
# if ret:
|
| 98 |
+
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 99 |
+
# frames.append(frame)
|
| 100 |
+
# else:
|
| 101 |
+
# breakpoint
|
| 102 |
+
# input.release()
|
| 103 |
+
# frames = np.array(frames)
|
| 104 |
+
# output = video_model(frames)
|
| 105 |
+
# return output[0]["label"]
|
| 106 |
|
| 107 |
#Output switches
|
| 108 |
|