Spaces:
Sleeping
Sleeping
update
Browse files- app/services/video_service.py +84 -75
app/services/video_service.py
CHANGED
@@ -10,6 +10,8 @@ from moviepy.editor import VideoFileClip, AudioFileClip, CompositeVideoClip
|
|
10 |
from moviepy.audio.AudioClip import CompositeAudioClip
|
11 |
from io import BytesIO
|
12 |
from fastapi.responses import StreamingResponse
|
|
|
|
|
13 |
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
@@ -42,91 +44,98 @@ class VideoService:
|
|
42 |
answers_style = style_config.get('answers', {})
|
43 |
background_style = style_config.get('background', {})
|
44 |
|
45 |
-
# Créer un buffer en mémoire
|
46 |
video_buffer = BytesIO()
|
47 |
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
# Charger l'image de fond si elle existe
|
53 |
-
background_image = None
|
54 |
-
if background_style.get('image'):
|
55 |
-
# Décoder l'image base64 en gardant les couleurs d'origine
|
56 |
-
image_data = base64.b64decode(background_style['image'].split(',')[1])
|
57 |
-
img = Image.open(io.BytesIO(image_data))
|
58 |
-
|
59 |
-
# Redimensionner en conservant le ratio
|
60 |
-
ratio = img.width / img.height
|
61 |
-
new_height = HEIGHT
|
62 |
-
new_width = int(HEIGHT * ratio)
|
63 |
-
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
64 |
-
|
65 |
-
# Centrer et recadrer si nécessaire
|
66 |
-
if new_width > WIDTH:
|
67 |
-
left = (new_width - WIDTH) // 2
|
68 |
-
img = img.crop((left, 0, left + WIDTH, HEIGHT))
|
69 |
-
elif new_width < WIDTH:
|
70 |
-
new_img = Image.new('RGB', (WIDTH, HEIGHT), (0, 0, 0))
|
71 |
-
paste_x = (WIDTH - new_width) // 2
|
72 |
-
new_img.paste(img, (paste_x, 0))
|
73 |
-
img = new_img
|
74 |
|
75 |
-
#
|
76 |
-
|
|
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
if background_image is not None:
|
86 |
-
# Utiliser l'image de fond en RGB
|
87 |
-
frame = Image.fromarray(background_image)
|
88 |
-
if background_style.get('opacity', 1) < 1:
|
89 |
-
overlay = Image.new('RGB', (WIDTH, HEIGHT), (0, 0, 0))
|
90 |
-
frame = Image.blend(frame, overlay, 1 - background_style.get('opacity', 1))
|
91 |
|
92 |
-
#
|
93 |
-
|
94 |
-
frame
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
for _ in range(int(FPS * DURATION_QUESTION)):
|
103 |
-
out.write(frame_cv)
|
104 |
-
current_time += DURATION_QUESTION
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
out.write(frame_cv)
|
120 |
-
current_time += DURATION_ANSWER
|
121 |
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
127 |
|
128 |
# Supprimer le fichier temporaire
|
129 |
-
|
|
|
|
|
|
|
130 |
|
131 |
# Remettre le curseur au début du buffer
|
132 |
video_buffer.seek(0)
|
@@ -142,7 +151,7 @@ class VideoService:
|
|
142 |
|
143 |
except Exception as e:
|
144 |
logger.error(f"Erreur dans generate_quiz_video: {str(e)}")
|
145 |
-
raise
|
146 |
|
147 |
@staticmethod
|
148 |
def _scale_size(size, preview_height=170, video_height=720):
|
|
|
10 |
from moviepy.audio.AudioClip import CompositeAudioClip
|
11 |
from io import BytesIO
|
12 |
from fastapi.responses import StreamingResponse
|
13 |
+
import tempfile
|
14 |
+
from fastapi import HTTPException
|
15 |
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
|
|
44 |
answers_style = style_config.get('answers', {})
|
45 |
background_style = style_config.get('background', {})
|
46 |
|
47 |
+
# Créer un buffer en mémoire
|
48 |
video_buffer = BytesIO()
|
49 |
|
50 |
+
# Utiliser un fichier temporaire en mémoire
|
51 |
+
with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
|
52 |
+
temp_path = temp_file.name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
# Créer le writer avec cv2
|
55 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
56 |
+
out = cv2.VideoWriter(temp_path, fourcc, FPS, (WIDTH, HEIGHT))
|
57 |
|
58 |
+
# Charger l'image de fond si elle existe
|
59 |
+
background_image = None
|
60 |
+
if background_style.get('image'):
|
61 |
+
# Décoder l'image base64 en gardant les couleurs d'origine
|
62 |
+
image_data = base64.b64decode(background_style['image'].split(',')[1])
|
63 |
+
img = Image.open(io.BytesIO(image_data))
|
64 |
+
|
65 |
+
# Redimensionner en conservant le ratio
|
66 |
+
ratio = img.width / img.height
|
67 |
+
new_height = HEIGHT
|
68 |
+
new_width = int(HEIGHT * ratio)
|
69 |
+
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
70 |
+
|
71 |
+
# Centrer et recadrer si nécessaire
|
72 |
+
if new_width > WIDTH:
|
73 |
+
left = (new_width - WIDTH) // 2
|
74 |
+
img = img.crop((left, 0, left + WIDTH, HEIGHT))
|
75 |
+
elif new_width < WIDTH:
|
76 |
+
new_img = Image.new('RGB', (WIDTH, HEIGHT), (0, 0, 0))
|
77 |
+
paste_x = (WIDTH - new_width) // 2
|
78 |
+
new_img.paste(img, (paste_x, 0))
|
79 |
+
img = new_img
|
80 |
+
|
81 |
+
# Convertir en array numpy en préservant les couleurs
|
82 |
+
background_image = np.array(img)
|
83 |
|
84 |
+
# Liste pour stocker les moments où jouer le son
|
85 |
+
correct_answer_times = []
|
86 |
+
current_time = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
+
# Création des frames
|
89 |
+
for i, question in enumerate(quiz_data["questions"], 1):
|
90 |
+
frame = Image.new('RGB', (WIDTH, HEIGHT))
|
91 |
+
if background_image is not None:
|
92 |
+
# Utiliser l'image de fond en RGB
|
93 |
+
frame = Image.fromarray(background_image)
|
94 |
+
if background_style.get('opacity', 1) < 1:
|
95 |
+
overlay = Image.new('RGB', (WIDTH, HEIGHT), (0, 0, 0))
|
96 |
+
frame = Image.blend(frame, overlay, 1 - background_style.get('opacity', 1))
|
|
|
|
|
|
|
|
|
97 |
|
98 |
+
# Créer les frames
|
99 |
+
question_frame = VideoService._create_question_frame(
|
100 |
+
frame, question, i, len(quiz_data["questions"]),
|
101 |
+
title_style, questions_style, answers_style,
|
102 |
+
WIDTH, HEIGHT, show_answer=False
|
103 |
+
)
|
104 |
+
|
105 |
+
# Convertir en BGR pour OpenCV
|
106 |
+
frame_cv = cv2.cvtColor(np.array(question_frame), cv2.COLOR_RGB2BGR)
|
107 |
+
|
108 |
+
for _ in range(int(FPS * DURATION_QUESTION)):
|
109 |
+
out.write(frame_cv)
|
110 |
+
current_time += DURATION_QUESTION
|
|
|
|
|
111 |
|
112 |
+
# Marquer le moment pour jouer le son
|
113 |
+
correct_answer_times.append(current_time)
|
114 |
+
|
115 |
+
# Frame de réponse
|
116 |
+
answer_frame = VideoService._create_question_frame(
|
117 |
+
frame.copy(), question, i, len(quiz_data["questions"]),
|
118 |
+
title_style, questions_style, answers_style,
|
119 |
+
WIDTH, HEIGHT, show_answer=True
|
120 |
+
)
|
121 |
+
|
122 |
+
frame_cv = cv2.cvtColor(np.array(answer_frame), cv2.COLOR_RGB2BGR)
|
123 |
+
|
124 |
+
for _ in range(int(FPS * DURATION_ANSWER)):
|
125 |
+
out.write(frame_cv)
|
126 |
+
current_time += DURATION_ANSWER
|
127 |
|
128 |
+
out.release()
|
129 |
+
|
130 |
+
# Lire le fichier temporaire dans le buffer
|
131 |
+
temp_file.seek(0)
|
132 |
+
video_buffer.write(temp_file.read())
|
133 |
|
134 |
# Supprimer le fichier temporaire
|
135 |
+
try:
|
136 |
+
os.unlink(temp_path)
|
137 |
+
except:
|
138 |
+
pass
|
139 |
|
140 |
# Remettre le curseur au début du buffer
|
141 |
video_buffer.seek(0)
|
|
|
151 |
|
152 |
except Exception as e:
|
153 |
logger.error(f"Erreur dans generate_quiz_video: {str(e)}")
|
154 |
+
raise HTTPException(status_code=500, detail=str(e))
|
155 |
|
156 |
@staticmethod
|
157 |
def _scale_size(size, preview_height=170, video_height=720):
|