File size: 7,069 Bytes
923cd30
 
 
 
 
 
 
 
16c970a
 
 
 
 
923cd30
16c970a
923cd30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16c970a
 
 
 
 
923cd30
16c970a
 
 
 
 
 
 
 
 
 
 
 
923cd30
16c970a
 
923cd30
16c970a
 
 
 
 
 
 
 
 
 
 
 
923cd30
16c970a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
923cd30
 
 
 
 
 
 
 
 
 
 
 
16c970a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
from firebase_admin import firestore
import os
from ..core.config import settings
from ..core.firebase import db
from huggingface_hub import HfApi, create_repo
import tempfile
from .video_processing.hf_upload import HFUploader
from .video_processing.compression import compress_video
from .video_processing.scene_detection import SceneDetector
from .video_processing.scene_classifier import SceneClassifier
from .video_processing.clip_generator import ClipGenerator
from .video_processing.scrape_hf import extract_video_urls
import time

async def process_video(video_uuid: str, content: bytes, user_id: str, sport_id: str):
    temp_files = []
    try:
        video_ref = db.collection('videos').document(video_uuid)
        video_data = video_ref.get().to_dict()
        
        hf_uploader = HFUploader()
        sport_id = video_data['sport_id']
        
        # Ensure folder structure exists
        hf_uploader.ensure_folder_structure(sport_id)
            
        # Create temp files
        temp_raw_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
        temp_compressed_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
        temp_files.extend([temp_raw_file.name, temp_compressed_file.name])
        
        # Write raw video and close file
        temp_raw_file.write(content)
        temp_raw_file.close()
        
        # Compress video
        compress_video(temp_raw_file.name, temp_compressed_file.name)
        temp_compressed_file.close()

        # Detect scenes
        scene_detector = SceneDetector()
        scene_classifier = SceneClassifier()
        scenes_data = scene_detector.detect_scenes(temp_compressed_file.name)
        
        # Classify each scene
        for scene in scenes_data["scenes"]:
            classification = scene_classifier.classify_scene(
                temp_compressed_file.name, 
                scene
            )
            scene["recognized_sport"] = classification["recognized_sport"]
            scene["confidence"] = classification["confidence"]

        # Generate clips
        clip_generator = ClipGenerator()
        generated_clips = clip_generator.generate_clips(
            temp_compressed_file.name,
            scenes_data["scenes"],
            sport_id
        )

        # Create and upload clips
        final_clips_data = []
        user_ref = db.collection("users").document(user_id)
        user_data = user_ref.get().to_dict() or {"clips": []}

        if "clips" not in user_data:
            user_data["clips"] = []

        # Upload both versions first
        raw_path = f"{sport_id}/raw/{video_uuid}.mp4"
        compressed_path = f"{sport_id}/compressed/{video_uuid}.mp4"
        
        hf_uploader.upload_video(temp_raw_file.name, raw_path)
        hf_uploader.upload_video(temp_compressed_file.name, compressed_path)

        # Attendre que HF indexe les fichiers
        time.sleep(3)

        # Construire l'URL de base pour le scraping
        base_viewer_url = f"https://huggingface.co/datasets/{hf_uploader.repo_id}/viewer/default/files"
        
        # Récupérer toutes les URLs
        max_retries = 3
        raw_url = None
        compressed_url = None
        
        for attempt in range(max_retries):
            try:
                urls = extract_video_urls(base_viewer_url)
                
                # Chercher les URLs correspondantes
                raw_url = next((url for url in urls if raw_path in url), None)
                compressed_url = next((url for url in urls if compressed_path in url), None)
                
                if raw_url and compressed_url:
                    print(f"[SUCCESS] URLs trouvées à la tentative {attempt + 1}")
                    break
                    
                print(f"[RETRY] Tentative {attempt + 1}/{max_retries}")
                time.sleep(2)
            except Exception as e:
                print(f"[ERROR] Tentative {attempt + 1} échouée: {str(e)}")
                if attempt == max_retries - 1:
                    raise e
                time.sleep(2)

        # Process clips
        for clip_data in generated_clips:
            clip_number = clip_data["clip_number"]
            clip_file_path = clip_data["file_path"]
            temp_files.append(clip_file_path)
            
            clip_path = f"{sport_id}/clips/{video_uuid}_clip_{clip_number}.mp4"
            hf_uploader.upload_video(clip_file_path, clip_path)
            
            # Attendre et scraper l'URL du clip
            time.sleep(2)
            clip_url = None
            
            for attempt in range(max_retries):
                try:
                    urls = extract_video_urls(base_viewer_url)
                    clip_url = next((url for url in urls if clip_path in url), None)
                    if clip_url:
                        break
                    time.sleep(2)
                except Exception:
                    if attempt == max_retries - 1:
                        raise
                    time.sleep(2)
            
            if clip_url:
                final_clips_data.append({
                    "clip_id": f"{video_uuid}_clip_{clip_number}",
                    "video_uuid": video_uuid,
                    "url": clip_url,
                    "duration": clip_data["duration"],
                    "confidence": clip_data["confidence"],
                    "segments": clip_data["segments"]
                })

                clip_ref = db.collection("clips").document(f"{video_uuid}_clip_{clip_number}")
                clip_ref.set({
                    "clip_id": f"{video_uuid}_clip_{clip_number}",
                    "sport_id": sport_id,
                    "url": clip_url,
                    "duration": clip_data["duration"]
                })

        # Update user data
        user_ref.set(user_data)

        # Update video data with scraped URLs
        update_data = {
            "scenes": scenes_data,
            "clips": final_clips_data,
            "clips_count": len(final_clips_data),
            "status": "ready",
            "last_updated": firestore.SERVER_TIMESTAMP
        }

        if raw_url:
            update_data["raw_video_url"] = raw_url
        if compressed_url:
            update_data["compressed_video_url"] = compressed_url

        video_ref.update(update_data)
                
    except Exception as e:
        print(f"Erreur lors du traitement de la vidéo {video_uuid}: {str(e)}")
        video_ref.update({"status": "error", "error": str(e)})
        
    finally:
        # Clean up temp files
        for temp_file in temp_files:
            try:
                if os.path.exists(temp_file):
                    os.unlink(temp_file)
            except Exception as e:
                print(f"[WARNING] Erreur lors de la suppression du fichier temporaire {temp_file}: {str(e)}")