2nzi commited on
Commit
16c970a
·
verified ·
1 Parent(s): e7f53d7

update app

Browse files
Files changed (31) hide show
  1. Dockerfile +30 -24
  2. app/__pycache__/__init__.cpython-38.pyc +0 -0
  3. app/api/__pycache__/__init__.cpython-38.pyc +0 -0
  4. app/api/endpoints/__pycache__/__init__.cpython-38.pyc +0 -0
  5. app/api/endpoints/__pycache__/videos.cpython-38.pyc +0 -0
  6. app/api/endpoints/videos.py +56 -2
  7. app/core/__pycache__/__init__.cpython-38.pyc +0 -0
  8. app/core/__pycache__/auth.cpython-38.pyc +0 -0
  9. app/core/__pycache__/config.cpython-38.pyc +0 -0
  10. app/core/__pycache__/firebase.cpython-38.pyc +0 -0
  11. app/core/auth.py +13 -4
  12. app/services/__pycache__/__init__.cpython-38.pyc +0 -0
  13. app/services/__pycache__/clip_assignment.cpython-38.pyc +0 -0
  14. app/services/__pycache__/processor.cpython-38.pyc +0 -0
  15. app/services/__pycache__/youtube_downloader.cpython-38.pyc +0 -0
  16. app/services/clip_assignment.py +73 -0
  17. app/services/processor.py +133 -16
  18. app/services/video_processing/__pycache__/__init__.cpython-38.pyc +0 -0
  19. app/services/video_processing/__pycache__/clip_generator.cpython-38.pyc +0 -0
  20. app/services/video_processing/__pycache__/compression.cpython-38.pyc +0 -0
  21. app/services/video_processing/__pycache__/hf_upload.cpython-38.pyc +0 -0
  22. app/services/video_processing/__pycache__/scene_classifier.cpython-38.pyc +0 -0
  23. app/services/video_processing/__pycache__/scene_detection.cpython-38.pyc +0 -0
  24. app/services/video_processing/__pycache__/scrape_hf.cpython-38.pyc +0 -0
  25. app/services/video_processing/clip_generator.py +213 -0
  26. app/services/video_processing/compression.py +1 -1
  27. app/services/video_processing/hf_upload.py +57 -56
  28. app/services/video_processing/scene_classifier.py +87 -0
  29. app/services/video_processing/scene_detection.py +69 -0
  30. app/services/video_processing/scrape_hf.py +24 -0
  31. main.py +0 -50
Dockerfile CHANGED
@@ -1,25 +1,31 @@
1
- # Utiliser une image Python officielle
2
- FROM python:3.9-slim
3
-
4
- # Installer les dépendances système nécessaires
5
- RUN apt-get update && apt-get install -y \
6
- ffmpeg \
7
- && rm -rf /var/lib/apt/lists/*
8
-
9
- # Définir le répertoire de travail
10
- WORKDIR /code
11
-
12
- # Copier les fichiers de dépendances
13
- COPY requirements.txt .
14
-
15
- # Installer les dépendances Python
16
- RUN pip install --no-cache-dir -r requirements.txt
17
-
18
- # Copier le reste du code
19
- COPY . .
20
-
21
- # Exposer le port
22
- EXPOSE 7860
23
-
24
- # Commande pour démarrer l'application
 
 
 
 
 
 
25
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # FROM python:3.9
2
+ # WORKDIR /code
3
+ # COPY ./requirements.txt /code/requirements.txt
4
+ # RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
5
+ # COPY . /code
6
+ # CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
7
+ # Utiliser une image Python officielle
8
+ FROM python:3.9-slim
9
+
10
+ # Installer les dépendances système nécessaires
11
+ RUN apt-get update && apt-get install -y \
12
+ ffmpeg \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ # Définir le répertoire de travail
16
+ WORKDIR /code
17
+
18
+ # Copier les fichiers de dépendances
19
+ COPY requirements.txt .
20
+
21
+ # Installer les dépendances Python
22
+ RUN pip install --no-cache-dir -r requirements.txt
23
+
24
+ # Copier le reste du code
25
+ COPY . .
26
+
27
+ # Exposer le port
28
+ EXPOSE 7860
29
+
30
+ # Commande pour démarrer l'application
31
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
app/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (159 Bytes). View file
 
app/api/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (163 Bytes). View file
 
app/api/endpoints/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (173 Bytes). View file
 
app/api/endpoints/__pycache__/videos.cpython-38.pyc ADDED
Binary file (6.42 kB). View file
 
app/api/endpoints/videos.py CHANGED
@@ -7,7 +7,8 @@ from typing import List, Optional, Dict
7
  from app.services.youtube_downloader import download_youtube_video, parse_urls
8
  from ...services.processor import process_video
9
  from typing import Dict
10
- from ...core.firebase import db # Modifier cette ligne
 
11
 
12
 
13
  router = APIRouter()
@@ -106,6 +107,7 @@ async def upload_video(
106
  async def process_single_video(content: bytes, sport_id: str, user_info: dict, title: str):
107
  """Traite une seule vidéo et retourne son ID"""
108
  video_uuid = str(uuid.uuid4())
 
109
 
110
  # Calcul du hash MD5
111
  md5_hash = hashlib.md5()
@@ -136,7 +138,11 @@ async def process_single_video(content: bytes, sport_id: str, user_info: dict, t
136
  }
137
 
138
  db.collection('videos').document(video_uuid).set(video_data)
139
- await process_video(video_uuid, content)
 
 
 
 
140
 
141
  return {"message": "Upload initié", "video_id": video_uuid}
142
 
@@ -188,5 +194,53 @@ async def update_video_status(
188
 
189
  return {"message": "Statut mis à jour avec succès"}
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  except Exception as e:
192
  raise HTTPException(status_code=500, detail=str(e))
 
7
  from app.services.youtube_downloader import download_youtube_video, parse_urls
8
  from ...services.processor import process_video
9
  from typing import Dict
10
+ from ...core.firebase import db
11
+ from ...services.clip_assignment import ClipAssignmentService
12
 
13
 
14
  router = APIRouter()
 
107
  async def process_single_video(content: bytes, sport_id: str, user_info: dict, title: str):
108
  """Traite une seule vidéo et retourne son ID"""
109
  video_uuid = str(uuid.uuid4())
110
+ active_assignement = True
111
 
112
  # Calcul du hash MD5
113
  md5_hash = hashlib.md5()
 
138
  }
139
 
140
  db.collection('videos').document(video_uuid).set(video_data)
141
+ await process_video(video_uuid, content, user_info['uid'], sport_id)
142
+
143
+ if user_info['role'] in ["admin", "user_intern"] and active_assignement:
144
+ clip_service = ClipAssignmentService()
145
+ await clip_service.assign_clips_to_user(user_info["uid"], user_info["role"])
146
 
147
  return {"message": "Upload initié", "video_id": video_uuid}
148
 
 
194
 
195
  return {"message": "Statut mis à jour avec succès"}
196
 
197
+ except Exception as e:
198
+ raise HTTPException(status_code=500, detail=str(e))
199
+
200
+
201
+ @router.post("/clips/assign")
202
+ async def assign_clips(user_info=Depends(require_role(["admin", "user_intern"]))):
203
+ try:
204
+ clip_service = ClipAssignmentService()
205
+ await clip_service.assign_clips_to_user(user_info["uid"], user_info["role"])
206
+ return {"message": "Clips assignés avec succès"}
207
+ except Exception as e:
208
+ raise HTTPException(status_code=500, detail=str(e))
209
+
210
+ @router.delete("/clips/{clip_id}/user")
211
+ async def remove_clip(clip_id: str, user_info=Depends(require_role(["admin"]))):
212
+ try:
213
+ clip_service = ClipAssignmentService()
214
+ await clip_service.remove_clip_from_user(user_info["uid"], clip_id)
215
+ return {"message": "Clip retiré avec succès"}
216
+ except Exception as e:
217
+ raise HTTPException(status_code=500, detail=str(e))
218
+
219
+ @router.post("/clips/sync")
220
+ async def sync_user_clips(user_info=Depends(require_role(["admin", "user_intern"]))):
221
+ """Synchronise les clips disponibles pour l'utilisateur lors de sa connexion"""
222
+ try:
223
+ clip_service = ClipAssignmentService()
224
+ await clip_service.assign_clips_to_user(user_info["uid"], user_info["role"])
225
+ return {"message": "Clips synchronisés avec succès"}
226
+ except Exception as e:
227
+ raise HTTPException(status_code=500, detail=str(e))
228
+
229
+
230
+ @router.get("/clips/debug/{user_id}")
231
+ async def debug_user_clips(user_id: str, user_info=Depends(require_role(["admin"]))):
232
+ """Endpoint de débogage pour vérifier les clips d'un utilisateur"""
233
+ try:
234
+ user_ref = db.collection('users').document(user_id)
235
+ user_doc = user_ref.get()
236
+
237
+ if not user_doc.exists:
238
+ raise HTTPException(status_code=404, detail="Utilisateur non trouvé")
239
+
240
+ user_data = user_doc.to_dict()
241
+ return {
242
+ "clips_count": len(user_data.get("clips", [])),
243
+ "clips": user_data.get("clips", [])
244
+ }
245
  except Exception as e:
246
  raise HTTPException(status_code=500, detail=str(e))
app/core/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (164 Bytes). View file
 
app/core/__pycache__/auth.cpython-38.pyc ADDED
Binary file (1.56 kB). View file
 
app/core/__pycache__/config.cpython-38.pyc ADDED
Binary file (814 Bytes). View file
 
app/core/__pycache__/firebase.cpython-38.pyc ADDED
Binary file (1.25 kB). View file
 
app/core/auth.py CHANGED
@@ -2,15 +2,24 @@ from fastapi import Depends, HTTPException, status
2
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
3
  from firebase_admin import auth
4
  from .firebase import db
 
5
 
6
  security = HTTPBearer()
7
 
8
  def get_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
 
 
 
 
 
9
  try:
10
- token = credentials.credentials
11
- decoded_token = auth.verify_id_token(token)
 
 
 
 
12
 
13
- # Récupérer le rôle depuis Firestore
14
  user_id = decoded_token['uid']
15
  user_doc = db.collection('users').document(user_id).get()
16
 
@@ -20,11 +29,11 @@ def get_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
20
  detail="User not found in Firestore"
21
  )
22
 
23
- # Ajouter le rôle aux informations du token
24
  user_data = user_doc.to_dict()
25
  decoded_token['role'] = user_data.get('role', 'user_extern')
26
 
27
  return decoded_token
 
28
  except Exception as e:
29
  raise HTTPException(
30
  status_code=status.HTTP_401_UNAUTHORIZED,
 
2
  from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
3
  from firebase_admin import auth
4
  from .firebase import db
5
+ import time
6
 
7
  security = HTTPBearer()
8
 
9
  def get_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
10
+ if not credentials:
11
+ raise HTTPException(
12
+ status_code=status.HTTP_401_UNAUTHORIZED,
13
+ detail="Bearer authentication required"
14
+ )
15
  try:
16
+ # Utiliser une valeur valide pour clock_skew_seconds (entre 0 et 60)
17
+ decoded_token = auth.verify_id_token(
18
+ credentials.credentials,
19
+ check_revoked=True,
20
+ clock_skew_seconds=60 # Valeur maximale autorisée
21
+ )
22
 
 
23
  user_id = decoded_token['uid']
24
  user_doc = db.collection('users').document(user_id).get()
25
 
 
29
  detail="User not found in Firestore"
30
  )
31
 
 
32
  user_data = user_doc.to_dict()
33
  decoded_token['role'] = user_data.get('role', 'user_extern')
34
 
35
  return decoded_token
36
+
37
  except Exception as e:
38
  raise HTTPException(
39
  status_code=status.HTTP_401_UNAUTHORIZED,
app/services/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (168 Bytes). View file
 
app/services/__pycache__/clip_assignment.cpython-38.pyc ADDED
Binary file (2.54 kB). View file
 
app/services/__pycache__/processor.cpython-38.pyc ADDED
Binary file (4.24 kB). View file
 
app/services/__pycache__/youtube_downloader.cpython-38.pyc ADDED
Binary file (3.02 kB). View file
 
app/services/clip_assignment.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from firebase_admin import firestore
2
+ from ..core.firebase import db
3
+ from typing import List, Dict
4
+
5
+ class ClipAssignmentService:
6
+ def __init__(self):
7
+ self.db = db
8
+
9
+ async def assign_clips_to_user(self, user_id: str, role: str):
10
+ """
11
+ Affecte les clips à un utilisateur en fonction de son rôle
12
+ Pour le moment, admin et user_intern ont accès à tous les clips
13
+ """
14
+ if role not in ["admin", "user_intern"]:
15
+ return
16
+
17
+ try:
18
+ # Récupérer tous les clips disponibles
19
+ clips_ref = self.db.collection("clips")
20
+ clips = clips_ref.stream()
21
+
22
+ # Récupérer l'utilisateur
23
+ user_ref = self.db.collection("users").document(user_id)
24
+ user_data = user_ref.get().to_dict() or {}
25
+
26
+ # Initialiser la liste des clips si elle n'existe pas
27
+ if "clips" not in user_data:
28
+ user_data["clips"] = []
29
+
30
+ # Créer un set des clip_ids existants pour l'utilisateur
31
+ existing_clip_ids = {clip["clip_id"] for clip in user_data["clips"]}
32
+
33
+ # Ajouter les nouveaux clips
34
+ for clip in clips:
35
+ clip_data = clip.to_dict()
36
+ clip_id = clip_data["clip_id"]
37
+
38
+ if clip_id not in existing_clip_ids:
39
+ user_data["clips"].append({
40
+ "clip_id": clip_id,
41
+ "sport_id": clip_data["sport_id"],
42
+ "url": clip_data["url"],
43
+ "status": "ready",
44
+ "annotations": []
45
+ })
46
+
47
+ # Mettre à jour le document utilisateur
48
+ user_ref.set(user_data, merge=True)
49
+
50
+ return {"message": "Clips assignés avec succès"}
51
+
52
+ except Exception as e:
53
+ print(f"Erreur lors de l'assignation des clips: {str(e)}")
54
+ raise e
55
+
56
+ async def remove_clip_from_user(self, user_id: str, clip_id: str):
57
+ """Retire un clip spécifique d'un utilisateur"""
58
+ try:
59
+ user_ref = self.db.collection("users").document(user_id)
60
+ user_data = user_ref.get().to_dict()
61
+
62
+ if "clips" in user_data:
63
+ user_data["clips"] = [
64
+ clip for clip in user_data["clips"]
65
+ if clip["clip_id"] != clip_id
66
+ ]
67
+ user_ref.set(user_data, merge=True)
68
+
69
+ return {"message": "Clip retiré avec succès"}
70
+
71
+ except Exception as e:
72
+ print(f"Erreur lors du retrait du clip: {str(e)}")
73
+ raise e
app/services/processor.py CHANGED
@@ -6,8 +6,13 @@ from huggingface_hub import HfApi, create_repo
6
  import tempfile
7
  from .video_processing.hf_upload import HFUploader
8
  from .video_processing.compression import compress_video
 
 
 
 
 
9
 
10
- async def process_video(video_uuid: str, content: bytes):
11
  temp_files = []
12
  try:
13
  video_ref = db.collection('videos').document(video_uuid)
@@ -31,24 +36,136 @@ async def process_video(video_uuid: str, content: bytes):
31
  # Compress video
32
  compress_video(temp_raw_file.name, temp_compressed_file.name)
33
  temp_compressed_file.close()
 
 
 
 
 
34
 
35
- # Upload both versions
36
- raw_url = hf_uploader.upload_video(
37
- temp_raw_file.name,
38
- f"{sport_id}/raw/{video_uuid}.mp4"
39
- )
40
-
41
- compressed_url = hf_uploader.upload_video(
 
 
 
 
 
42
  temp_compressed_file.name,
43
- f"{sport_id}/compressed/{video_uuid}.mp4"
 
44
  )
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- # Update Firestore
47
- video_ref.update({
48
- "raw_video_url": raw_url,
49
- "compressed_video_url": compressed_url,
50
- "status": "ready"
51
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  except Exception as e:
54
  print(f"Erreur lors du traitement de la vidéo {video_uuid}: {str(e)}")
@@ -61,4 +178,4 @@ async def process_video(video_uuid: str, content: bytes):
61
  if os.path.exists(temp_file):
62
  os.unlink(temp_file)
63
  except Exception as e:
64
- print(f"Erreur lors de la suppression du fichier temporaire {temp_file}: {str(e)}")
 
6
  import tempfile
7
  from .video_processing.hf_upload import HFUploader
8
  from .video_processing.compression import compress_video
9
+ from .video_processing.scene_detection import SceneDetector
10
+ from .video_processing.scene_classifier import SceneClassifier
11
+ from .video_processing.clip_generator import ClipGenerator
12
+ from .video_processing.scrape_hf import extract_video_urls
13
+ import time
14
 
15
+ async def process_video(video_uuid: str, content: bytes, user_id: str, sport_id: str):
16
  temp_files = []
17
  try:
18
  video_ref = db.collection('videos').document(video_uuid)
 
36
  # Compress video
37
  compress_video(temp_raw_file.name, temp_compressed_file.name)
38
  temp_compressed_file.close()
39
+
40
+ # Detect scenes
41
+ scene_detector = SceneDetector()
42
+ scene_classifier = SceneClassifier()
43
+ scenes_data = scene_detector.detect_scenes(temp_compressed_file.name)
44
 
45
+ # Classify each scene
46
+ for scene in scenes_data["scenes"]:
47
+ classification = scene_classifier.classify_scene(
48
+ temp_compressed_file.name,
49
+ scene
50
+ )
51
+ scene["recognized_sport"] = classification["recognized_sport"]
52
+ scene["confidence"] = classification["confidence"]
53
+
54
+ # Generate clips
55
+ clip_generator = ClipGenerator()
56
+ generated_clips = clip_generator.generate_clips(
57
  temp_compressed_file.name,
58
+ scenes_data["scenes"],
59
+ sport_id
60
  )
61
+
62
+ # Create and upload clips
63
+ final_clips_data = []
64
+ user_ref = db.collection("users").document(user_id)
65
+ user_data = user_ref.get().to_dict() or {"clips": []}
66
+
67
+ if "clips" not in user_data:
68
+ user_data["clips"] = []
69
+
70
+ # Upload both versions first
71
+ raw_path = f"{sport_id}/raw/{video_uuid}.mp4"
72
+ compressed_path = f"{sport_id}/compressed/{video_uuid}.mp4"
73
 
74
+ hf_uploader.upload_video(temp_raw_file.name, raw_path)
75
+ hf_uploader.upload_video(temp_compressed_file.name, compressed_path)
76
+
77
+ # Attendre que HF indexe les fichiers
78
+ time.sleep(3)
79
+
80
+ # Construire l'URL de base pour le scraping
81
+ base_viewer_url = f"https://huggingface.co/datasets/{hf_uploader.repo_id}/viewer/default/files"
82
+
83
+ # Récupérer toutes les URLs
84
+ max_retries = 3
85
+ raw_url = None
86
+ compressed_url = None
87
+
88
+ for attempt in range(max_retries):
89
+ try:
90
+ urls = extract_video_urls(base_viewer_url)
91
+
92
+ # Chercher les URLs correspondantes
93
+ raw_url = next((url for url in urls if raw_path in url), None)
94
+ compressed_url = next((url for url in urls if compressed_path in url), None)
95
+
96
+ if raw_url and compressed_url:
97
+ print(f"[SUCCESS] URLs trouvées à la tentative {attempt + 1}")
98
+ break
99
+
100
+ print(f"[RETRY] Tentative {attempt + 1}/{max_retries}")
101
+ time.sleep(2)
102
+ except Exception as e:
103
+ print(f"[ERROR] Tentative {attempt + 1} échouée: {str(e)}")
104
+ if attempt == max_retries - 1:
105
+ raise e
106
+ time.sleep(2)
107
+
108
+ # Process clips
109
+ for clip_data in generated_clips:
110
+ clip_number = clip_data["clip_number"]
111
+ clip_file_path = clip_data["file_path"]
112
+ temp_files.append(clip_file_path)
113
+
114
+ clip_path = f"{sport_id}/clips/{video_uuid}_clip_{clip_number}.mp4"
115
+ hf_uploader.upload_video(clip_file_path, clip_path)
116
+
117
+ # Attendre et scraper l'URL du clip
118
+ time.sleep(2)
119
+ clip_url = None
120
+
121
+ for attempt in range(max_retries):
122
+ try:
123
+ urls = extract_video_urls(base_viewer_url)
124
+ clip_url = next((url for url in urls if clip_path in url), None)
125
+ if clip_url:
126
+ break
127
+ time.sleep(2)
128
+ except Exception:
129
+ if attempt == max_retries - 1:
130
+ raise
131
+ time.sleep(2)
132
+
133
+ if clip_url:
134
+ final_clips_data.append({
135
+ "clip_id": f"{video_uuid}_clip_{clip_number}",
136
+ "video_uuid": video_uuid,
137
+ "url": clip_url,
138
+ "duration": clip_data["duration"],
139
+ "confidence": clip_data["confidence"],
140
+ "segments": clip_data["segments"]
141
+ })
142
+
143
+ clip_ref = db.collection("clips").document(f"{video_uuid}_clip_{clip_number}")
144
+ clip_ref.set({
145
+ "clip_id": f"{video_uuid}_clip_{clip_number}",
146
+ "sport_id": sport_id,
147
+ "url": clip_url,
148
+ "duration": clip_data["duration"]
149
+ })
150
+
151
+ # Update user data
152
+ user_ref.set(user_data)
153
+
154
+ # Update video data with scraped URLs
155
+ update_data = {
156
+ "scenes": scenes_data,
157
+ "clips": final_clips_data,
158
+ "clips_count": len(final_clips_data),
159
+ "status": "ready",
160
+ "last_updated": firestore.SERVER_TIMESTAMP
161
+ }
162
+
163
+ if raw_url:
164
+ update_data["raw_video_url"] = raw_url
165
+ if compressed_url:
166
+ update_data["compressed_video_url"] = compressed_url
167
+
168
+ video_ref.update(update_data)
169
 
170
  except Exception as e:
171
  print(f"Erreur lors du traitement de la vidéo {video_uuid}: {str(e)}")
 
178
  if os.path.exists(temp_file):
179
  os.unlink(temp_file)
180
  except Exception as e:
181
+ print(f"[WARNING] Erreur lors de la suppression du fichier temporaire {temp_file}: {str(e)}")
app/services/video_processing/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (185 Bytes). View file
 
app/services/video_processing/__pycache__/clip_generator.cpython-38.pyc ADDED
Binary file (6.15 kB). View file
 
app/services/video_processing/__pycache__/compression.cpython-38.pyc ADDED
Binary file (990 Bytes). View file
 
app/services/video_processing/__pycache__/hf_upload.cpython-38.pyc ADDED
Binary file (2 kB). View file
 
app/services/video_processing/__pycache__/scene_classifier.cpython-38.pyc ADDED
Binary file (3.67 kB). View file
 
app/services/video_processing/__pycache__/scene_detection.cpython-38.pyc ADDED
Binary file (2.29 kB). View file
 
app/services/video_processing/__pycache__/scrape_hf.cpython-38.pyc ADDED
Binary file (620 Bytes). View file
 
app/services/video_processing/clip_generator.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ from moviepy.editor import VideoFileClip, concatenate_videoclips
3
+ import tempfile
4
+ import os
5
+
6
+ class ClipGenerator:
7
+ def __init__(self, target_duration: int = 60, min_confidence: float = 0.9):
8
+ self.target_duration = target_duration # durée cible en secondes
9
+ self.min_confidence = min_confidence
10
+ print(f"[DEBUG] Initialized ClipGenerator with target_duration={target_duration}s, min_confidence={min_confidence}")
11
+
12
+ def _time_to_seconds(self, time_str: str) -> float:
13
+ h, m, s = time_str.split(':')
14
+ return int(h) * 3600 + int(m) * 60 + float(s)
15
+
16
+ def _seconds_to_time(self, seconds: float) -> str:
17
+ h = int(seconds // 3600)
18
+ m = int((seconds % 3600) // 60)
19
+ s = seconds % 60
20
+ return f"{h:02d}:{m:02d}:{s:06.3f}"
21
+
22
+ def generate_clips(self, video_path: str, scenes: list, sport_id: str) -> list:
23
+ print(f"\n[DEBUG] Starting clip generation for {sport_id}")
24
+ print(f"[DEBUG] Input scenes: {len(scenes)}")
25
+
26
+ # Debug chaque scène avant filtrage
27
+ for scene in scenes:
28
+ print(f"[DEBUG] Pre-filter scene:")
29
+ print(f" - Start: {scene['start']}")
30
+ print(f" - End: {scene['end']}")
31
+ print(f" - Sport: {scene['recognized_sport']}")
32
+ print(f" - Confidence: {scene['confidence']}")
33
+ print(f" - Would pass sport filter: {scene['recognized_sport'] == sport_id}")
34
+ print(f" - Would pass confidence filter: {scene['confidence'] >= self.min_confidence}")
35
+
36
+ filtered_scenes = [
37
+ scene for scene in scenes
38
+ if scene["recognized_sport"].lower() == sport_id.lower()
39
+ and scene["confidence"] >= self.min_confidence
40
+ ]
41
+
42
+ print(f"[DEBUG] Filtered scenes: {len(filtered_scenes)}")
43
+ for scene in filtered_scenes:
44
+ print(f"[DEBUG] Scene: {scene['start']} -> {scene['end']} (conf: {scene['confidence']:.2%}) sport_id: {sport_id}")
45
+
46
+ if not filtered_scenes:
47
+ print("[DEBUG] No valid scenes found after filtering")
48
+ return []
49
+
50
+ clips_data = []
51
+ current_segments = []
52
+ current_duration = 0
53
+ clip_number = 1
54
+
55
+ for scene in filtered_scenes:
56
+ print(f"\n[DEBUG] Processing scene {scene['start']} -> {scene['end']}")
57
+ scene_start = self._time_to_seconds(scene["start"])
58
+ scene_end = self._time_to_seconds(scene["end"])
59
+ scene_duration = scene_end - scene_start
60
+ print(f"[DEBUG] Scene duration: {scene_duration:.2f}s")
61
+ print(f"[DEBUG] Current accumulated duration: {current_duration:.2f}s")
62
+
63
+ if current_duration + scene_duration > self.target_duration:
64
+ remaining_duration = self.target_duration - current_duration
65
+ print(f"[DEBUG] Scene would exceed target duration. Remaining space: {remaining_duration:.2f}s")
66
+
67
+ if remaining_duration > 0:
68
+ segment_end = self._seconds_to_time(scene_start + remaining_duration)
69
+ current_segments.append({
70
+ "start": scene["start"],
71
+ "end": segment_end,
72
+ "confidence": scene["confidence"]
73
+ })
74
+ print(f"[DEBUG] Added partial segment: {scene['start']} -> {segment_end}")
75
+
76
+ clip_data = self._create_clip(video_path, current_segments, clip_number)
77
+ clips_data.append(clip_data)
78
+ print(f"[DEBUG] Created clip {clip_number} with {len(current_segments)} segments")
79
+
80
+ clip_number += 1
81
+ current_segments = []
82
+ current_duration = 0
83
+
84
+ remaining_scene_duration = scene_duration - remaining_duration
85
+ print(f"[DEBUG] Remaining scene duration to process: {remaining_scene_duration:.2f}s")
86
+
87
+ while remaining_scene_duration > 0:
88
+ if remaining_scene_duration >= self.target_duration:
89
+ segment_start = self._seconds_to_time(scene_start + scene_duration - remaining_scene_duration)
90
+ segment_end = self._seconds_to_time(scene_start + scene_duration - remaining_scene_duration + self.target_duration)
91
+ segment = {
92
+ "start": segment_start,
93
+ "end": segment_end,
94
+ "confidence": scene["confidence"]
95
+ }
96
+ print(f"[DEBUG] Creating full clip from remaining: {segment_start} -> {segment_end}")
97
+ clip_data = self._create_clip(video_path, [segment], clip_number)
98
+ clips_data.append(clip_data)
99
+ clip_number += 1
100
+ remaining_scene_duration -= self.target_duration
101
+ else:
102
+ segment_start = self._seconds_to_time(scene_end - remaining_scene_duration)
103
+ current_segments = [{
104
+ "start": segment_start,
105
+ "end": scene["end"],
106
+ "confidence": scene["confidence"]
107
+ }]
108
+ print(f"[DEBUG] Keeping remainder for next clip: {segment_start} -> {scene['end']}")
109
+ current_duration = remaining_scene_duration
110
+ break
111
+ else:
112
+ current_segments.append({
113
+ "start": scene["start"],
114
+ "end": scene["end"],
115
+ "confidence": scene["confidence"]
116
+ })
117
+ current_duration += scene_duration
118
+ print(f"[DEBUG] Added full scene to current clip. New duration: {current_duration:.2f}s")
119
+
120
+ if current_segments:
121
+ print(f"\n[DEBUG] Processing remaining segments")
122
+ clip_data = self._create_clip(video_path, current_segments, clip_number)
123
+ clips_data.append(clip_data)
124
+ print(f"[DEBUG] Created final clip {clip_number} with {len(current_segments)} segments")
125
+
126
+ print(f"\n[DEBUG] Generated {len(clips_data)} clips in total")
127
+ return clips_data
128
+
129
+ def _create_clip(self, video_path: str, segments: list, clip_number: int) -> dict:
130
+ """Crée un clip à partir des segments donnés."""
131
+ total_duration = sum(
132
+ self._time_to_seconds(segment["end"]) - self._time_to_seconds(segment["start"])
133
+ for segment in segments
134
+ )
135
+
136
+ print(f"[DEBUG] Creating clip {clip_number}")
137
+ print(f"[DEBUG] Total duration: {total_duration:.2f}s")
138
+
139
+ # Créer les sous-clips pour chaque segment
140
+ subclips = []
141
+ video = VideoFileClip(video_path)
142
+
143
+ for segment in segments:
144
+ start_time = self._time_to_seconds(segment["start"])
145
+ end_time = self._time_to_seconds(segment["end"])
146
+ print(f"[DEBUG] Extracting segment: {segment['start']} -> {segment['end']}")
147
+ subclip = video.subclip(start_time, end_time)
148
+ subclips.append(subclip)
149
+
150
+ # Concaténer tous les sous-clips
151
+ final_clip = concatenate_videoclips(subclips)
152
+
153
+ # Créer un fichier temporaire pour le clip
154
+ temp_clip_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
155
+ final_clip.write_videofile(
156
+ temp_clip_file.name,
157
+ codec='libx264',
158
+ audio_codec='aac',
159
+ temp_audiofile='temp-audio.m4a',
160
+ remove_temp=True
161
+ )
162
+
163
+ # Fermer les clips pour libérer la mémoire
164
+ final_clip.close()
165
+ for subclip in subclips:
166
+ subclip.close()
167
+ video.close()
168
+
169
+ total_confidence = sum(segment["confidence"] for segment in segments)
170
+ avg_confidence = total_confidence / len(segments)
171
+
172
+ return {
173
+ "segments": segments,
174
+ "clip_number": clip_number,
175
+ "confidence": avg_confidence,
176
+ "duration": total_duration,
177
+ "file_path": temp_clip_file.name # Ajouter le chemin du fichier
178
+ }
179
+
180
+ if __name__ == "__main__":
181
+ # Test local
182
+ test_video_path = "test_video.mp4" # Remplacer par un chemin réel
183
+ test_scenes = [
184
+ {
185
+ "start": "00:00:00.000",
186
+ "end": "00:00:30.000",
187
+ "recognized_sport": "surf",
188
+ "confidence": 0.95
189
+ },
190
+ {
191
+ "start": "00:00:40.000",
192
+ "end": "00:01:20.000",
193
+ "recognized_sport": "surf",
194
+ "confidence": 0.92
195
+ },
196
+ {
197
+ "start": "00:01:30.000",
198
+ "end": "00:02:00.000",
199
+ "recognized_sport": "not_surf",
200
+ "confidence": 0.88
201
+ }
202
+ ]
203
+
204
+ clip_generator = ClipGenerator(target_duration=60, min_confidence=0.9)
205
+ clips = clip_generator.generate_clips(test_video_path, test_scenes, "surf")
206
+
207
+ print("\nClips générés :")
208
+ for i, clip in enumerate(clips, 1):
209
+ print(f"\nClip {i}:")
210
+ print(f"Confidence: {clip['confidence']:.2%}")
211
+ print("Segments:")
212
+ for segment in clip['segments']:
213
+ print(f" {segment['start']} -> {segment['end']} (conf: {segment['confidence']:.2%})")
app/services/video_processing/compression.py CHANGED
@@ -1,7 +1,7 @@
1
  import subprocess
2
  from fastapi import HTTPException
3
 
4
- def compress_video(input_path: str, output_path: str, resolution: str = "192x144"):
5
  """Compress video using FFmpeg for maximum reduction, with audio removed."""
6
  command = [
7
  "ffmpeg",
 
1
  import subprocess
2
  from fastapi import HTTPException
3
 
4
+ def compress_video(input_path: str, output_path: str, resolution: str = "256x144"):
5
  """Compress video using FFmpeg for maximum reduction, with audio removed."""
6
  command = [
7
  "ffmpeg",
app/services/video_processing/hf_upload.py CHANGED
@@ -1,57 +1,58 @@
1
- from huggingface_hub import HfApi, create_repo
2
- import os
3
-
4
- class HFUploader:
5
- def __init__(self):
6
- self.hf_api = HfApi()
7
- self.repo_id = os.getenv("HUGGINGFACE_REPO_ID")
8
- self._ensure_repo_exists()
9
-
10
-
11
- def _ensure_repo_exists(self):
12
- try:
13
- # Vérifie si le repo existe
14
- self.hf_api.repo_info(repo_id=self.repo_id, repo_type="dataset")
15
- print(f"Repository {self.repo_id} exists")
16
- except Exception as e:
17
- if "404" in str(e): # Repo n'existe pas
18
- try:
19
- create_repo(
20
- self.repo_id,
21
- private=False,
22
- repo_type="dataset",
23
- token=os.getenv("HUGGINGFACE_TOKEN")
24
- )
25
- print(f"Created repository {self.repo_id}")
26
- except Exception as create_error:
27
- if "You already created this dataset repo" not in str(create_error):
28
- raise create_error
29
- else:
30
- raise e
31
-
32
- def ensure_folder_structure(self, sport_id: str):
33
- paths = [
34
- f"{sport_id}/raw",
35
- f"{sport_id}/compressed"
36
- ]
37
- for path in paths:
38
- try:
39
- self.hf_api.upload_file(
40
- path_or_fileobj="",
41
- path_in_repo=f"{path}/.gitkeep",
42
- repo_id=self.repo_id,
43
- repo_type="dataset",
44
- token=os.getenv("HUGGINGFACE_TOKEN")
45
- )
46
- except Exception:
47
- pass
48
-
49
- def upload_video(self, file_path: str, destination_path: str):
50
- self.hf_api.upload_file(
51
- path_or_fileobj=file_path,
52
- path_in_repo=destination_path,
53
- repo_id=self.repo_id,
54
- repo_type="dataset",
55
- token=os.getenv("HUGGINGFACE_TOKEN")
56
- )
 
57
  return f"https://huggingface.co/datasets/{self.repo_id}/raw/main/{destination_path}"
 
1
+ from huggingface_hub import HfApi, create_repo
2
+ import os
3
+
4
+ class HFUploader:
5
+ def __init__(self):
6
+ self.hf_api = HfApi()
7
+ self.repo_id = os.getenv("HUGGINGFACE_REPO_ID")
8
+ self._ensure_repo_exists()
9
+
10
+
11
+ def _ensure_repo_exists(self):
12
+ try:
13
+ # Vérifie si le repo existe
14
+ self.hf_api.repo_info(repo_id=self.repo_id, repo_type="dataset")
15
+ print(f"Repository {self.repo_id} exists")
16
+ except Exception as e:
17
+ if "404" in str(e): # Repo n'existe pas
18
+ try:
19
+ create_repo(
20
+ self.repo_id,
21
+ private=False,
22
+ repo_type="dataset",
23
+ token=os.getenv("HUGGINGFACE_TOKEN")
24
+ )
25
+ print(f"Created repository {self.repo_id}")
26
+ except Exception as create_error:
27
+ if "You already created this dataset repo" not in str(create_error):
28
+ raise create_error
29
+ else:
30
+ raise e
31
+
32
+ def ensure_folder_structure(self, sport_id: str):
33
+ paths = [
34
+ f"{sport_id}/raw",
35
+ f"{sport_id}/compressed",
36
+ f"{sport_id}/clips"
37
+ ]
38
+ for path in paths:
39
+ try:
40
+ self.hf_api.upload_file(
41
+ path_or_fileobj="",
42
+ path_in_repo=f"{path}/.gitkeep",
43
+ repo_id=self.repo_id,
44
+ repo_type="dataset",
45
+ token=os.getenv("HUGGINGFACE_TOKEN")
46
+ )
47
+ except Exception:
48
+ pass
49
+
50
+ def upload_video(self, file_path: str, destination_path: str):
51
+ self.hf_api.upload_file(
52
+ path_or_fileobj=file_path,
53
+ path_in_repo=destination_path,
54
+ repo_id=self.repo_id,
55
+ repo_type="dataset",
56
+ token=os.getenv("HUGGINGFACE_TOKEN")
57
+ )
58
  return f"https://huggingface.co/datasets/{self.repo_id}/raw/main/{destination_path}"
app/services/video_processing/scene_classifier.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
4
+ from collections import Counter
5
+ from PIL import Image
6
+ import os
7
+
8
+ class SceneClassifier:
9
+ def __init__(self, model_path: str = "2nzi/Image_Surf_NotSurf"):
10
+ # print(f"[DEBUG] Initializing SceneClassifier with model: {model_path}")
11
+ try:
12
+ # Initialiser le processeur et le modèle
13
+ self.processor = AutoImageProcessor.from_pretrained(
14
+ "google/vit-base-patch16-224",
15
+ use_fast=True
16
+ )
17
+ self.model = AutoModelForImageClassification.from_pretrained(
18
+ model_path,
19
+ trust_remote_code=True
20
+ )
21
+ self.id_to_label = self.model.config.id2label
22
+ # print("[DEBUG] Model loaded successfully")
23
+ except Exception as e:
24
+ # print(f"[ERROR] Failed to load model: {str(e)}")
25
+ raise
26
+
27
+ def _time_to_seconds(self, time_str: str) -> float:
28
+ h, m, s = time_str.split(':')
29
+ return int(h) * 3600 + int(m) * 60 + float(s)
30
+
31
+ def _extract_frames(self, video_path: str, start_time: str, end_time: str, num_frames: int = 5) -> list:
32
+ cap = cv2.VideoCapture(video_path)
33
+ start_sec = self._time_to_seconds(start_time)
34
+ end_sec = self._time_to_seconds(end_time)
35
+ scene_duration = end_sec - start_sec
36
+ frame_interval = scene_duration / (num_frames + 1)
37
+
38
+ frames = []
39
+ for i in range(num_frames):
40
+ timestamp = start_sec + frame_interval * (i + 1)
41
+ cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000)
42
+ success, frame = cap.read()
43
+ if success:
44
+ image_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
45
+ frames.append(image_pil)
46
+ else:
47
+ print(f"[WARNING] Failed to extract frame at {timestamp} seconds")
48
+
49
+ cap.release()
50
+ return frames
51
+
52
+ def _classify_frame(self, frame: Image) -> dict:
53
+ inputs = self.processor(images=frame, return_tensors="pt")
54
+ with torch.no_grad():
55
+ outputs = self.model(**inputs)
56
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
57
+ confidence, predicted_class = torch.max(probs, dim=-1)
58
+
59
+ return {
60
+ "label": self.id_to_label[predicted_class.item()],
61
+ "confidence": float(confidence.item())
62
+ }
63
+
64
+ def classify_scene(self, video_path: str, scene: dict) -> dict:
65
+ print(f"[DEBUG] Classifying scene: {scene['start']} -> {scene['end']}")
66
+ frames = self._extract_frames(video_path, scene["start"], scene["end"])
67
+ if not frames:
68
+ print("[WARNING] No frames extracted for classification")
69
+ return {"recognized_sport": "Unknown", "confidence": 0.0}
70
+
71
+ classifications = [self._classify_frame(frame) for frame in frames]
72
+ labels = [c["label"] for c in classifications]
73
+
74
+ label_counts = Counter(labels)
75
+ predominant_label, count = label_counts.most_common(1)[0]
76
+
77
+ confidence_avg = sum(
78
+ c["confidence"] for c in classifications
79
+ if c["label"] == predominant_label
80
+ ) / count
81
+
82
+ result = {
83
+ "recognized_sport": predominant_label,
84
+ "confidence": confidence_avg
85
+ }
86
+ print(f"[DEBUG] Classification result: {result}")
87
+ return result
app/services/video_processing/scene_detection.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ from fastapi import HTTPException
4
+ from scenedetect import detect, ContentDetector, SceneManager, open_video, scene_manager
5
+
6
+ class SceneDetector:
7
+ def __init__(self, min_scene_duration: float = 1.5, threshold: int = 30):
8
+ self.min_scene_duration = min_scene_duration
9
+ self.threshold = threshold
10
+
11
+ def detect_scenes(self, video_path: str) -> dict:
12
+ """Pipeline complet de détection et de filtrage des scènes."""
13
+ print(f"\n[DEBUG] Starting scene detection on video: {video_path}")
14
+ print(f"[DEBUG] Video file exists: {os.path.exists(video_path)}")
15
+
16
+ try:
17
+ # Ouvrir la vidéo
18
+ video = open_video(video_path)
19
+
20
+ # Configurer le détecteur de scènes
21
+ scene_manager = SceneManager()
22
+ scene_manager.add_detector(
23
+ ContentDetector(threshold=self.threshold)
24
+ )
25
+
26
+ # Détecter les scènes
27
+ scene_manager.detect_scenes(video)
28
+ scenes = scene_manager.get_scene_list()
29
+
30
+ # Formater les scènes
31
+ formatted_scenes = []
32
+ for scene in scenes:
33
+ start_time = scene[0].get_timecode()
34
+ end_time = scene[1].get_timecode()
35
+ duration = scene[1].get_seconds() - scene[0].get_seconds()
36
+
37
+ if duration >= self.min_scene_duration:
38
+ formatted_scenes.append({
39
+ "start": str(start_time),
40
+ "end": str(end_time),
41
+ "recognized_sport": "", # Sera rempli par le classificateur
42
+ "confidence": None # Sera rempli par le classificateur
43
+ })
44
+
45
+ result = {
46
+ "total_scenes": len(formatted_scenes),
47
+ "scenes": formatted_scenes
48
+ }
49
+
50
+ # Debug logs
51
+ print("\n[DEBUG] Scene Detection Results:")
52
+ print(f"Total scenes detected: {result['total_scenes']}")
53
+ if result['scenes']:
54
+ print("\nFirst 3 scenes details:")
55
+ for i, scene in enumerate(result['scenes'][:3]):
56
+ print(f"\nScene {i+1}:")
57
+ print(f" Start: {scene['start']}")
58
+ print(f" End: {scene['end']}")
59
+ print(f" Sport: {scene['recognized_sport']}")
60
+ print(f" Confidence: {scene['confidence']}")
61
+ else:
62
+ print("No scenes detected!")
63
+ print("\n")
64
+
65
+ return result
66
+
67
+ except Exception as e:
68
+ print(f"[ERROR] Scene detection failed: {str(e)}")
69
+ raise HTTPException(status_code=500, detail=f"Scene detection failed: {str(e)}")
app/services/video_processing/scrape_hf.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ import requests
3
+
4
+ def extract_video_urls(url):
5
+ # Récupérer le contenu de la page
6
+ response = requests.get(url)
7
+ html_content = response.text
8
+
9
+ # Créer un objet BeautifulSoup
10
+ soup = BeautifulSoup(html_content, 'html.parser')
11
+
12
+ # Trouver toutes les balises source
13
+ video_sources = soup.find_all('source')
14
+
15
+ # Extraire les URLs
16
+ video_urls = []
17
+ for source in video_sources:
18
+ url = source.get('src')
19
+ if url:
20
+ # Nettoyer l'URL en retirant le #t=0.001
21
+ clean_url = url.split('#')[0]
22
+ video_urls.append(clean_url)
23
+
24
+ return video_urls
main.py CHANGED
@@ -20,56 +20,6 @@ app.add_middleware(
20
  allow_headers=["*"]
21
  )
22
 
23
- # Configuration CORS
24
- app.add_middleware(
25
- CORSMiddleware,
26
- allow_origins=["*"],
27
- allow_credentials=True,
28
- allow_methods=["POST", "GET"],
29
- allow_headers=["*"]
30
- )
31
-
32
- def get_user(res: Response,
33
- cred: HTTPAuthorizationCredentials = Depends(HTTPBearer(auto_error=False))):
34
- if cred is None:
35
- raise HTTPException(
36
- status_code=status.HTTP_401_UNAUTHORIZED,
37
- detail="Bearer authentication required",
38
- headers={'WWW-Authenticate': 'Bearer realm="auth_required"'},
39
- )
40
- try:
41
- decoded_token = auth.verify_id_token(
42
- cred.credentials,
43
- check_revoked=True,
44
- clock_skew_seconds=1800
45
- )
46
- user_id = decoded_token['uid']
47
-
48
- user_doc = db.collection('users').document(user_id).get()
49
- if not user_doc.exists:
50
- raise HTTPException(status_code=401, detail="Utilisateur non trouvé dans Firestore")
51
-
52
- user_data = user_doc.to_dict()
53
- user_role = user_data.get('role', 'user_extern')
54
- decoded_token['role'] = user_role
55
- res.headers['WWW-Authenticate'] = 'Bearer realm="auth_required"'
56
-
57
- return decoded_token
58
- except Exception as err:
59
- raise HTTPException(
60
- status_code=status.HTTP_401_UNAUTHORIZED,
61
- detail=f"Invalid authentication credentials. {err}",
62
- headers={'WWW-Authenticate': 'Bearer error="invalid_token"'},
63
- )
64
-
65
- def require_role(allowed_roles):
66
- def role_checker(user_info=Depends(get_user)):
67
- if user_info['role'] not in allowed_roles:
68
- raise HTTPException(status_code=403, detail="Accès non autorisé")
69
- return user_info
70
- return role_checker
71
-
72
- # Inclure le router videos
73
  app.include_router(videos_router, prefix="/api")
74
 
75
  @app.get("/")
 
20
  allow_headers=["*"]
21
  )
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  app.include_router(videos_router, prefix="/api")
24
 
25
  @app.get("/")