arieltoledo commited on
Commit
b2ab75b
·
verified ·
1 Parent(s): 7e5dd0a

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +84 -0
  3. requirements.txt +86 -0
  4. yolov8n.pt +3 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Aegis Air
3
  emoji: 🐠
4
  colorFrom: pink
5
  colorTo: purple
 
1
  ---
2
+ title: aegis_air
3
  emoji: 🐠
4
  colorFrom: pink
5
  colorTo: purple
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import supervision as sv
3
+ from ultralytics import YOLO
4
+ import gradio as gr
5
+ import torch
6
+ import spaces
7
+ import os
8
+
9
+ tracker = sv.ByteTrack()
10
+ box_annotator = sv.BoxAnnotator()
11
+ label_annotator = sv.LabelAnnotator()
12
+ trace_annotator = sv.TraceAnnotator()
13
+
14
+ device = "cuda" if torch.cuda.is_available() else "cpu"
15
+ model = YOLO("yolov8n.pt").to(device)
16
+
17
+ FRAME_INTERVAL = 35
18
+ LOW_RES_SIZE = (640,640)
19
+ MID_RES_SIZE = (1280,1280)
20
+ HIGH_RES_SIZE =(1088,1920)
21
+ MAX_PERSONS = 0
22
+
23
+ auth_users = [(os.getenv('USERNAME'), os.getenv('PASSWORD'))]
24
+
25
+ @spaces.GPU
26
+ def process_video(frame: np.ndarray, _: int) -> np.ndarray:
27
+ global MAX_PERSONS
28
+
29
+ results = model(frame, imgsz=HIGH_RES_SIZE)[0]
30
+ detections = sv.Detections.from_ultralytics(results)
31
+ person_detections = detections[detections.class_id == 0]
32
+
33
+ current_person_count = len(person_detections)
34
+ if current_person_count > MAX_PERSONS:
35
+ MAX_PERSONS = current_person_count
36
+
37
+ print(f'Personas detectadas: {current_person_count}, Máximo: {MAX_PERSONS}')
38
+
39
+ tracked_persons = tracker.update_with_detections(person_detections)
40
+
41
+ labels = [
42
+ f"#{tracker_id} {results.names[class_id]}"
43
+ for class_id, tracker_id
44
+ in zip(tracked_persons.class_id, tracked_persons.tracker_id)
45
+ ]
46
+
47
+ annotated_frame = box_annotator.annotate(frame.copy(), detections=tracked_persons)
48
+ annotated_frame = label_annotator.annotate(annotated_frame, detections=tracked_persons, labels=labels)
49
+
50
+ return trace_annotator.annotate(annotated_frame, detections=tracked_persons)
51
+
52
+ def upload_video(video_path):
53
+ global MAX_PERSONS
54
+ MAX_PERSONS = 0 # Reiniciar el contador para cada video
55
+
56
+ output_video_path = "output_video.mp4"
57
+
58
+ if os.path.exists(output_video_path):
59
+ os.remove(output_video_path)
60
+
61
+ sv.process_video(source_path=video_path, target_path=output_video_path, callback=process_video)
62
+
63
+ return output_video_path, MAX_PERSONS
64
+
65
+ with gr.Blocks() as demo:
66
+ gr.Markdown("# Aegis Air Demo")
67
+ with gr.Row():
68
+ video_input = gr.Video(label="Sube tu video aquí")
69
+
70
+
71
+ video_output = gr.Video(label="Video Anotado")
72
+
73
+ persons_output = gr.Textbox(label="Total de Personas Detectadas", interactive=False)
74
+ def process_video_gradio(video):
75
+ if video is None:
76
+ return None, "No se ha subido ningún video"
77
+
78
+ processed_video, total_persons = upload_video(video)
79
+ return processed_video, f"Máximo número de personas detectadas: {total_persons}"
80
+
81
+ submit_button = gr.Button("Procesar")
82
+ submit_button.click(process_video_gradio, inputs=video_input, outputs=[video_output, persons_output])
83
+
84
+ demo.launch(auth=auth_users)
requirements.txt ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ annotated-types==0.7.0
3
+ anyio==4.4.0
4
+ certifi==2024.8.30
5
+ charset-normalizer==3.3.2
6
+ click==8.1.7
7
+ contourpy==1.3.0
8
+ cycler==0.12.1
9
+ defusedxml==0.7.1
10
+ fastapi==0.114.1
11
+ ffmpy==0.4.0
12
+ filelock==3.16.0
13
+ fonttools==4.53.1
14
+ fsspec==2024.9.0
15
+ gradio==4.44.0
16
+ gradio_client==1.3.0
17
+ h11==0.14.0
18
+ httpcore==1.0.5
19
+ httpx==0.27.2
20
+ huggingface-hub==0.24.7
21
+ idna==3.8
22
+ importlib_resources==6.4.5
23
+ Jinja2==3.1.4
24
+ kiwisolver==1.4.7
25
+ markdown-it-py==3.0.0
26
+ MarkupSafe==2.1.5
27
+ matplotlib==3.9.2
28
+ mdurl==0.1.2
29
+ mpmath==1.3.0
30
+ networkx==3.3
31
+ numpy==1.26.4
32
+ nvidia-cublas-cu12==12.1.3.1
33
+ nvidia-cuda-cupti-cu12==12.1.105
34
+ nvidia-cuda-nvrtc-cu12==12.1.105
35
+ nvidia-cuda-runtime-cu12==12.1.105
36
+ nvidia-cudnn-cu12==9.1.0.70
37
+ nvidia-cufft-cu12==11.0.2.54
38
+ nvidia-curand-cu12==10.3.2.106
39
+ nvidia-cusolver-cu12==11.4.5.107
40
+ nvidia-cusparse-cu12==12.1.0.106
41
+ nvidia-nccl-cu12==2.20.5
42
+ nvidia-nvjitlink-cu12==12.6.68
43
+ nvidia-nvtx-cu12==12.1.105
44
+ opencv-python==4.10.0.84
45
+ opencv-python-headless==4.10.0.84
46
+ orjson==3.10.7
47
+ packaging==24.1
48
+ pandas==2.2.2
49
+ pillow==10.4.0
50
+ psutil==6.0.0
51
+ py-cpuinfo==9.0.0
52
+ pydantic==2.9.1
53
+ pydantic_core==2.23.3
54
+ pydub==0.25.1
55
+ Pygments==2.18.0
56
+ pyparsing==3.1.4
57
+ python-dateutil==2.9.0.post0
58
+ python-multipart==0.0.9
59
+ pytz==2024.1
60
+ PyYAML==6.0.2
61
+ requests==2.32.3
62
+ rich==13.8.1
63
+ ruff==0.6.4
64
+ scipy==1.14.1
65
+ seaborn==0.13.2
66
+ semantic-version==2.10.0
67
+ setuptools==74.1.2
68
+ shellingham==1.5.4
69
+ six==1.16.0
70
+ sniffio==1.3.1
71
+ starlette==0.38.5
72
+ supervision==0.23.0
73
+ sympy==1.13.2
74
+ tomlkit==0.12.0
75
+ torch
76
+ torchvision
77
+ tqdm==4.66.5
78
+ triton==3.0.0
79
+ typer==0.12.5
80
+ typing_extensions==4.12.2
81
+ tzdata==2024.1
82
+ ultralytics==8.2.90
83
+ ultralytics-thop==2.0.6
84
+ urllib3==2.2.2
85
+ uvicorn==0.30.6
86
+ websockets==12.0
yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
3
+ size 6549796