Corregido proveedor NTIA para conectar al Space correcto
Browse files
app.py
CHANGED
@@ -853,33 +853,91 @@ def generate_video(prompt, model_name, num_frames=16, num_inference_steps=20):
|
|
853 |
# Configuración específica por tipo de modelo
|
854 |
if "zeroscope" in model_name.lower():
|
855 |
# Zeroscope models
|
856 |
-
|
857 |
prompt,
|
858 |
num_inference_steps=num_inference_steps,
|
859 |
num_frames=num_frames,
|
860 |
height=256,
|
861 |
width=256
|
862 |
-
)
|
863 |
elif "animatediff" in model_name.lower():
|
864 |
# AnimateDiff models
|
865 |
-
|
866 |
prompt,
|
867 |
num_inference_steps=num_inference_steps,
|
868 |
num_frames=num_frames
|
869 |
-
)
|
870 |
else:
|
871 |
# Text-to-video models (default)
|
872 |
-
|
873 |
prompt,
|
874 |
num_inference_steps=num_inference_steps,
|
875 |
num_frames=num_frames
|
876 |
-
)
|
877 |
|
878 |
print("Video generado exitosamente")
|
879 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
880 |
|
881 |
except Exception as e:
|
882 |
print(f"Error generando video: {str(e)}")
|
|
|
|
|
|
|
883 |
return f"Error generando video: {str(e)}"
|
884 |
|
885 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
@@ -948,21 +1006,52 @@ def generate_video_with_info(prompt, model_name, optimization_level="balanced",
|
|
948 |
else:
|
949 |
return video_frames
|
950 |
else:
|
951 |
-
# Si es un tensor numpy, convertirlo a
|
952 |
if hasattr(video_frames, 'shape'):
|
953 |
-
# Es un tensor, convertirlo a formato compatible
|
954 |
import numpy as np
|
955 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
956 |
# Tomar el primer batch
|
957 |
frames = video_frames[0]
|
958 |
-
return
|
959 |
else:
|
960 |
-
|
|
|
961 |
else:
|
962 |
return video_frames
|
963 |
|
964 |
except Exception as e:
|
965 |
print(f"Error generando video: {str(e)}")
|
|
|
|
|
|
|
966 |
return f"Error generando video: {str(e)}"
|
967 |
|
968 |
def chat_with_model(message, history, model_name):
|
|
|
853 |
# Configuración específica por tipo de modelo
|
854 |
if "zeroscope" in model_name.lower():
|
855 |
# Zeroscope models
|
856 |
+
result = pipeline(
|
857 |
prompt,
|
858 |
num_inference_steps=num_inference_steps,
|
859 |
num_frames=num_frames,
|
860 |
height=256,
|
861 |
width=256
|
862 |
+
)
|
863 |
elif "animatediff" in model_name.lower():
|
864 |
# AnimateDiff models
|
865 |
+
result = pipeline(
|
866 |
prompt,
|
867 |
num_inference_steps=num_inference_steps,
|
868 |
num_frames=num_frames
|
869 |
+
)
|
870 |
else:
|
871 |
# Text-to-video models (default)
|
872 |
+
result = pipeline(
|
873 |
prompt,
|
874 |
num_inference_steps=num_inference_steps,
|
875 |
num_frames=num_frames
|
876 |
+
)
|
877 |
|
878 |
print("Video generado exitosamente")
|
879 |
+
|
880 |
+
# Manejar diferentes tipos de respuesta
|
881 |
+
if hasattr(result, 'frames'):
|
882 |
+
video_frames = result.frames
|
883 |
+
elif hasattr(result, 'videos'):
|
884 |
+
video_frames = result.videos
|
885 |
+
else:
|
886 |
+
video_frames = result
|
887 |
+
|
888 |
+
# Convertir a formato compatible con Gradio
|
889 |
+
if isinstance(video_frames, list):
|
890 |
+
if len(video_frames) == 1:
|
891 |
+
return video_frames[0]
|
892 |
+
else:
|
893 |
+
return video_frames
|
894 |
+
else:
|
895 |
+
# Si es un tensor numpy, convertirlo a formato de video
|
896 |
+
if hasattr(video_frames, 'shape'):
|
897 |
+
import numpy as np
|
898 |
+
print(f"Forma del video: {video_frames.shape}")
|
899 |
+
|
900 |
+
# Convertir a formato de video compatible con Gradio
|
901 |
+
if len(video_frames.shape) == 4: # (frames, height, width, channels)
|
902 |
+
# Convertir frames a formato de video
|
903 |
+
frames_list = []
|
904 |
+
for i in range(video_frames.shape[0]):
|
905 |
+
frame = video_frames[i]
|
906 |
+
# Asegurar que el frame esté en el rango correcto (0-255)
|
907 |
+
if frame.dtype == np.float32 or frame.dtype == np.float16:
|
908 |
+
frame = (frame * 255).astype(np.uint8)
|
909 |
+
frames_list.append(frame)
|
910 |
+
|
911 |
+
# Crear video a partir de frames
|
912 |
+
import imageio
|
913 |
+
import tempfile
|
914 |
+
import os
|
915 |
+
|
916 |
+
# Crear archivo temporal
|
917 |
+
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
|
918 |
+
temp_path = tmp_file.name
|
919 |
+
|
920 |
+
# Guardar frames como video
|
921 |
+
imageio.mimsave(temp_path, frames_list, fps=8)
|
922 |
+
|
923 |
+
print(f"Video guardado en: {temp_path}")
|
924 |
+
return temp_path
|
925 |
+
|
926 |
+
elif len(video_frames.shape) == 5: # (batch, frames, height, width, channels)
|
927 |
+
# Tomar el primer batch
|
928 |
+
frames = video_frames[0]
|
929 |
+
return generate_video(prompt, model_name, num_frames, num_inference_steps)
|
930 |
+
else:
|
931 |
+
print(f"Forma no reconocida: {video_frames.shape}")
|
932 |
+
return None
|
933 |
+
else:
|
934 |
+
return video_frames
|
935 |
|
936 |
except Exception as e:
|
937 |
print(f"Error generando video: {str(e)}")
|
938 |
+
print(f"Tipo de error: {type(e).__name__}")
|
939 |
+
import traceback
|
940 |
+
traceback.print_exc()
|
941 |
return f"Error generando video: {str(e)}"
|
942 |
|
943 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
|
|
1006 |
else:
|
1007 |
return video_frames
|
1008 |
else:
|
1009 |
+
# Si es un tensor numpy, convertirlo a formato de video
|
1010 |
if hasattr(video_frames, 'shape'):
|
|
|
1011 |
import numpy as np
|
1012 |
+
print(f"Forma del video: {video_frames.shape}")
|
1013 |
+
|
1014 |
+
# Convertir a formato de video compatible con Gradio
|
1015 |
+
if len(video_frames.shape) == 4: # (frames, height, width, channels)
|
1016 |
+
# Convertir frames a formato de video
|
1017 |
+
frames_list = []
|
1018 |
+
for i in range(video_frames.shape[0]):
|
1019 |
+
frame = video_frames[i]
|
1020 |
+
# Asegurar que el frame esté en el rango correcto (0-255)
|
1021 |
+
if frame.dtype == np.float32 or frame.dtype == np.float16:
|
1022 |
+
frame = (frame * 255).astype(np.uint8)
|
1023 |
+
frames_list.append(frame)
|
1024 |
+
|
1025 |
+
# Crear video a partir de frames
|
1026 |
+
import imageio
|
1027 |
+
import tempfile
|
1028 |
+
import os
|
1029 |
+
|
1030 |
+
# Crear archivo temporal
|
1031 |
+
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
|
1032 |
+
temp_path = tmp_file.name
|
1033 |
+
|
1034 |
+
# Guardar frames como video
|
1035 |
+
imageio.mimsave(temp_path, frames_list, fps=8)
|
1036 |
+
|
1037 |
+
print(f"Video guardado en: {temp_path}")
|
1038 |
+
return temp_path
|
1039 |
+
|
1040 |
+
elif len(video_frames.shape) == 5: # (batch, frames, height, width, channels)
|
1041 |
# Tomar el primer batch
|
1042 |
frames = video_frames[0]
|
1043 |
+
return generate_video_with_info(prompt, model_name, optimization_level, input_image)
|
1044 |
else:
|
1045 |
+
print(f"Forma no reconocida: {video_frames.shape}")
|
1046 |
+
return None
|
1047 |
else:
|
1048 |
return video_frames
|
1049 |
|
1050 |
except Exception as e:
|
1051 |
print(f"Error generando video: {str(e)}")
|
1052 |
+
print(f"Tipo de error: {type(e).__name__}")
|
1053 |
+
import traceback
|
1054 |
+
traceback.print_exc()
|
1055 |
return f"Error generando video: {str(e)}"
|
1056 |
|
1057 |
def chat_with_model(message, history, model_name):
|