Upload 11 files
Browse files- .gitattributes +6 -35
- .gitignore +207 -0
- CITATION.cff +29 -0
- CONTRIBUTING.md +123 -0
- DEPLOYMENT.md +269 -0
- LICENSE +21 -0
- Vehicle_Speed_Estimation_main.ipynb +0 -0
- app.py +329 -0
- config.py +205 -0
- main.py +417 -0
- requirements.txt +10 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,6 @@
|
|
| 1 |
-
*.
|
| 2 |
-
*.
|
| 3 |
-
*.
|
| 4 |
-
*.
|
| 5 |
-
*.
|
| 6 |
-
*.
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.avi filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.mov filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[codz]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py.cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
#poetry.toml
|
| 110 |
+
|
| 111 |
+
# pdm
|
| 112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 113 |
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
| 114 |
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
| 115 |
+
#pdm.lock
|
| 116 |
+
#pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# pixi
|
| 121 |
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
| 122 |
+
#pixi.lock
|
| 123 |
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
| 124 |
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
| 125 |
+
.pixi
|
| 126 |
+
|
| 127 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 128 |
+
__pypackages__/
|
| 129 |
+
|
| 130 |
+
# Celery stuff
|
| 131 |
+
celerybeat-schedule
|
| 132 |
+
celerybeat.pid
|
| 133 |
+
|
| 134 |
+
# SageMath parsed files
|
| 135 |
+
*.sage.py
|
| 136 |
+
|
| 137 |
+
# Environments
|
| 138 |
+
.env
|
| 139 |
+
.envrc
|
| 140 |
+
.venv
|
| 141 |
+
env/
|
| 142 |
+
venv/
|
| 143 |
+
ENV/
|
| 144 |
+
env.bak/
|
| 145 |
+
venv.bak/
|
| 146 |
+
|
| 147 |
+
# Spyder project settings
|
| 148 |
+
.spyderproject
|
| 149 |
+
.spyproject
|
| 150 |
+
|
| 151 |
+
# Rope project settings
|
| 152 |
+
.ropeproject
|
| 153 |
+
|
| 154 |
+
# mkdocs documentation
|
| 155 |
+
/site
|
| 156 |
+
|
| 157 |
+
# mypy
|
| 158 |
+
.mypy_cache/
|
| 159 |
+
.dmypy.json
|
| 160 |
+
dmypy.json
|
| 161 |
+
|
| 162 |
+
# Pyre type checker
|
| 163 |
+
.pyre/
|
| 164 |
+
|
| 165 |
+
# pytype static type analyzer
|
| 166 |
+
.pytype/
|
| 167 |
+
|
| 168 |
+
# Cython debug symbols
|
| 169 |
+
cython_debug/
|
| 170 |
+
|
| 171 |
+
# PyCharm
|
| 172 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 173 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 174 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 175 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 176 |
+
#.idea/
|
| 177 |
+
|
| 178 |
+
# Abstra
|
| 179 |
+
# Abstra is an AI-powered process automation framework.
|
| 180 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 181 |
+
# Learn more at https://abstra.io/docs
|
| 182 |
+
.abstra/
|
| 183 |
+
|
| 184 |
+
# Visual Studio Code
|
| 185 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 186 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 187 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 188 |
+
# you could uncomment the following to ignore the entire vscode folder
|
| 189 |
+
# .vscode/
|
| 190 |
+
|
| 191 |
+
# Ruff stuff:
|
| 192 |
+
.ruff_cache/
|
| 193 |
+
|
| 194 |
+
# PyPI configuration file
|
| 195 |
+
.pypirc
|
| 196 |
+
|
| 197 |
+
# Cursor
|
| 198 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 199 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 200 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 201 |
+
.cursorignore
|
| 202 |
+
.cursorindexingignore
|
| 203 |
+
|
| 204 |
+
# Marimo
|
| 205 |
+
marimo/_static/
|
| 206 |
+
marimo/_lsp/
|
| 207 |
+
__marimo__/
|
CITATION.cff
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, please cite it as below."
|
| 3 |
+
type: software
|
| 4 |
+
title: "Vehicle Speed Estimation and Counting System"
|
| 5 |
+
version: 1.0.0
|
| 6 |
+
date-released: 2024-12-06
|
| 7 |
+
authors:
|
| 8 |
+
- family-names: "Gupta"
|
| 9 |
+
given-names: "Abhay"
|
| 10 |
+
affiliation: "Student ID: 0205CC221005"
|
| 11 |
+
- family-names: "Lakhera"
|
| 12 |
+
given-names: "Aditi"
|
| 13 |
+
affiliation: "Student ID: 0205CC221011"
|
| 14 |
+
- family-names: "Patel"
|
| 15 |
+
given-names: "Balraj"
|
| 16 |
+
affiliation: "Student ID: 0205CC221049"
|
| 17 |
+
- family-names: "Patel"
|
| 18 |
+
given-names: "Bhumika"
|
| 19 |
+
affiliation: "Student ID: 0205CC221050"
|
| 20 |
+
repository-code: "https://huggingface.co/spaces/YOUR_USERNAME/vehicle-speed-estimation"
|
| 21 |
+
keywords:
|
| 22 |
+
- computer-vision
|
| 23 |
+
- object-detection
|
| 24 |
+
- vehicle-tracking
|
| 25 |
+
- speed-estimation
|
| 26 |
+
- yolo
|
| 27 |
+
- opencv
|
| 28 |
+
- traffic-analysis
|
| 29 |
+
license: MIT
|
CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to Vehicle Speed Estimation System
|
| 2 |
+
|
| 3 |
+
Thank you for your interest in contributing to this project!
|
| 4 |
+
|
| 5 |
+
## Development Team
|
| 6 |
+
|
| 7 |
+
This project was developed by:
|
| 8 |
+
- Abhay Gupta (0205CC221005)
|
| 9 |
+
- Aditi Lakhera (0205CC221011)
|
| 10 |
+
- Balraj Patel (0205CC221049)
|
| 11 |
+
- Bhumika Patel (0205CC221050)
|
| 12 |
+
|
| 13 |
+
## How to Contribute
|
| 14 |
+
|
| 15 |
+
### Reporting Issues
|
| 16 |
+
|
| 17 |
+
If you find a bug or have a suggestion:
|
| 18 |
+
|
| 19 |
+
1. Check if the issue already exists
|
| 20 |
+
2. Create a new issue with:
|
| 21 |
+
- Clear description
|
| 22 |
+
- Steps to reproduce (for bugs)
|
| 23 |
+
- Expected vs actual behavior
|
| 24 |
+
- System information (OS, Python version, etc.)
|
| 25 |
+
|
| 26 |
+
### Code Contributions
|
| 27 |
+
|
| 28 |
+
1. **Fork the repository**
|
| 29 |
+
2. **Create a feature branch**
|
| 30 |
+
```bash
|
| 31 |
+
git checkout -b feature/your-feature-name
|
| 32 |
+
```
|
| 33 |
+
|
| 34 |
+
3. **Make your changes**
|
| 35 |
+
- Follow the existing code style
|
| 36 |
+
- Add docstrings to new functions/classes
|
| 37 |
+
- Include type hints where appropriate
|
| 38 |
+
- Add error handling
|
| 39 |
+
|
| 40 |
+
4. **Test your changes**
|
| 41 |
+
- Ensure existing functionality still works
|
| 42 |
+
- Test edge cases
|
| 43 |
+
- Verify error handling
|
| 44 |
+
|
| 45 |
+
5. **Commit your changes**
|
| 46 |
+
```bash
|
| 47 |
+
git commit -m "Add: brief description of changes"
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
6. **Push and create a Pull Request**
|
| 51 |
+
```bash
|
| 52 |
+
git push origin feature/your-feature-name
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
## Code Style Guidelines
|
| 56 |
+
|
| 57 |
+
### Python Style
|
| 58 |
+
|
| 59 |
+
- Follow PEP 8 guidelines
|
| 60 |
+
- Use meaningful variable names
|
| 61 |
+
- Maximum line length: 100 characters
|
| 62 |
+
- Use type hints for function parameters and returns
|
| 63 |
+
|
| 64 |
+
### Documentation
|
| 65 |
+
|
| 66 |
+
- Add docstrings to all functions and classes
|
| 67 |
+
- Use Google-style docstrings
|
| 68 |
+
- Include examples where helpful
|
| 69 |
+
- Update README.md for new features
|
| 70 |
+
|
| 71 |
+
### Example Docstring
|
| 72 |
+
|
| 73 |
+
```python
|
| 74 |
+
def calculate_speed(distance: float, time: float) -> float:
|
| 75 |
+
"""
|
| 76 |
+
Calculate speed from distance and time.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
distance: Distance traveled in meters
|
| 80 |
+
time: Time elapsed in seconds
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Speed in meters per second
|
| 84 |
+
|
| 85 |
+
Raises:
|
| 86 |
+
ValueError: If time is zero or negative
|
| 87 |
+
|
| 88 |
+
Example:
|
| 89 |
+
>>> calculate_speed(100, 10)
|
| 90 |
+
10.0
|
| 91 |
+
"""
|
| 92 |
+
if time <= 0:
|
| 93 |
+
raise ValueError("Time must be positive")
|
| 94 |
+
return distance / time
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
## Project Structure
|
| 98 |
+
|
| 99 |
+
```
|
| 100 |
+
src/
|
| 101 |
+
├── __init__.py # Package initialization
|
| 102 |
+
├── annotator.py # Frame annotation
|
| 103 |
+
├── speed_estimator.py # Speed calculation
|
| 104 |
+
├── view_transformer.py # Perspective transformation
|
| 105 |
+
└── exceptions.py # Custom exceptions
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
## Testing
|
| 109 |
+
|
| 110 |
+
Before submitting:
|
| 111 |
+
|
| 112 |
+
1. Test with different video inputs
|
| 113 |
+
2. Verify error handling works
|
| 114 |
+
3. Check that configuration changes work
|
| 115 |
+
4. Ensure Gradio interface functions properly
|
| 116 |
+
|
| 117 |
+
## Questions?
|
| 118 |
+
|
| 119 |
+
Feel free to open an issue for any questions or clarifications.
|
| 120 |
+
|
| 121 |
+
## License
|
| 122 |
+
|
| 123 |
+
By contributing, you agree that your contributions will be licensed under the MIT License.
|
DEPLOYMENT.md
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hugging Face Deployment Guide
|
| 2 |
+
|
| 3 |
+
## 📋 Pre-Deployment Checklist
|
| 4 |
+
|
| 5 |
+
Before uploading to Hugging Face Spaces, ensure:
|
| 6 |
+
|
| 7 |
+
- [x] All code files are plagiarism-free with original implementations
|
| 8 |
+
- [x] Student attribution is present in all files
|
| 9 |
+
- [x] README.md has HF metadata header
|
| 10 |
+
- [x] requirements.txt includes all dependencies
|
| 11 |
+
- [x] .gitattributes is configured for Git LFS
|
| 12 |
+
- [x] LICENSE file is present
|
| 13 |
+
- [x] Error handling is comprehensive
|
| 14 |
+
- [ ] Sample video is available (optional)
|
| 15 |
+
|
| 16 |
+
## 🚀 Deployment Steps
|
| 17 |
+
|
| 18 |
+
### 1. Create a New Space on Hugging Face
|
| 19 |
+
|
| 20 |
+
1. Go to [Hugging Face Spaces](https://huggingface.co/new-space)
|
| 21 |
+
2. Click "Create new Space"
|
| 22 |
+
3. Fill in the details:
|
| 23 |
+
- **Space name**: `vehicle-speed-estimation` (or your choice)
|
| 24 |
+
- **License**: MIT
|
| 25 |
+
- **SDK**: Gradio
|
| 26 |
+
- **Visibility**: Public or Private
|
| 27 |
+
|
| 28 |
+
### 2. Upload Files
|
| 29 |
+
|
| 30 |
+
You can upload files via:
|
| 31 |
+
|
| 32 |
+
#### Option A: Web Interface
|
| 33 |
+
|
| 34 |
+
1. Navigate to your Space
|
| 35 |
+
2. Click "Files and versions"
|
| 36 |
+
3. Upload all files from the project directory:
|
| 37 |
+
```
|
| 38 |
+
app.py
|
| 39 |
+
main.py
|
| 40 |
+
config.py
|
| 41 |
+
requirements.txt
|
| 42 |
+
README.md
|
| 43 |
+
LICENSE
|
| 44 |
+
CITATION.cff
|
| 45 |
+
CONTRIBUTING.md
|
| 46 |
+
.gitattributes
|
| 47 |
+
src/
|
| 48 |
+
├── __init__.py
|
| 49 |
+
├── annotator.py
|
| 50 |
+
├── speed_estimator.py
|
| 51 |
+
├── view_transformer.py
|
| 52 |
+
└── exceptions.py
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
#### Option B: Git Command Line
|
| 56 |
+
|
| 57 |
+
```bash
|
| 58 |
+
# Clone your Space repository
|
| 59 |
+
git clone https://huggingface.co/spaces/YOUR_USERNAME/vehicle-speed-estimation
|
| 60 |
+
cd vehicle-speed-estimation
|
| 61 |
+
|
| 62 |
+
# Copy all files from the project
|
| 63 |
+
cp -r /path/to/Vehicle-Speed-Estimation-and-Counting-YOLO-Supervision/* .
|
| 64 |
+
|
| 65 |
+
# Add and commit
|
| 66 |
+
git add .
|
| 67 |
+
git commit -m "Initial commit: Vehicle Speed Estimation System"
|
| 68 |
+
|
| 69 |
+
# Push to Hugging Face
|
| 70 |
+
git push
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
### 3. Configure Git LFS for Large Files
|
| 74 |
+
|
| 75 |
+
If you have model files or sample videos:
|
| 76 |
+
|
| 77 |
+
```bash
|
| 78 |
+
# Install Git LFS
|
| 79 |
+
git lfs install
|
| 80 |
+
|
| 81 |
+
# Track large files
|
| 82 |
+
git lfs track "*.pt"
|
| 83 |
+
git lfs track "*.mp4"
|
| 84 |
+
|
| 85 |
+
# Add .gitattributes
|
| 86 |
+
git add .gitattributes
|
| 87 |
+
git commit -m "Configure Git LFS"
|
| 88 |
+
git push
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
### 4. Wait for Build
|
| 92 |
+
|
| 93 |
+
- Hugging Face will automatically build your Space
|
| 94 |
+
- Check the "Logs" tab for build progress
|
| 95 |
+
- Build typically takes 2-5 minutes
|
| 96 |
+
|
| 97 |
+
### 5. Test Your Space
|
| 98 |
+
|
| 99 |
+
Once built:
|
| 100 |
+
1. Upload a test video
|
| 101 |
+
2. Verify processing works
|
| 102 |
+
3. Check output video quality
|
| 103 |
+
4. Test error handling with invalid inputs
|
| 104 |
+
|
| 105 |
+
## ⚙️ Configuration Notes
|
| 106 |
+
|
| 107 |
+
### Model Files
|
| 108 |
+
|
| 109 |
+
The YOLO model will be downloaded automatically on first run. If you want to include a pre-downloaded model:
|
| 110 |
+
|
| 111 |
+
1. Download the model (e.g., `yolov8n.pt`)
|
| 112 |
+
2. Create a `models/` directory
|
| 113 |
+
3. Upload via Git LFS:
|
| 114 |
+
```bash
|
| 115 |
+
git lfs track "models/*.pt"
|
| 116 |
+
git add models/yolov8n.pt
|
| 117 |
+
git commit -m "Add YOLO model"
|
| 118 |
+
git push
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
### Sample Videos
|
| 122 |
+
|
| 123 |
+
To include example videos:
|
| 124 |
+
|
| 125 |
+
1. Create a `data/` directory
|
| 126 |
+
2. Add sample videos (keep under 100MB each)
|
| 127 |
+
3. Upload via Git LFS:
|
| 128 |
+
```bash
|
| 129 |
+
git lfs track "data/*.mp4"
|
| 130 |
+
git add data/vehicles.mp4
|
| 131 |
+
git commit -m "Add sample video"
|
| 132 |
+
git push
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
### Environment Variables
|
| 136 |
+
|
| 137 |
+
If you need to set environment variables:
|
| 138 |
+
|
| 139 |
+
1. Go to Space Settings
|
| 140 |
+
2. Click "Variables and secrets"
|
| 141 |
+
3. Add any required variables
|
| 142 |
+
|
| 143 |
+
## 🐛 Troubleshooting
|
| 144 |
+
|
| 145 |
+
### Build Fails
|
| 146 |
+
|
| 147 |
+
**Issue**: Dependencies fail to install
|
| 148 |
+
|
| 149 |
+
**Solution**:
|
| 150 |
+
- Check `requirements.txt` for version conflicts
|
| 151 |
+
- Ensure all packages are available on PyPI
|
| 152 |
+
- Try pinning specific versions
|
| 153 |
+
|
| 154 |
+
### Out of Memory
|
| 155 |
+
|
| 156 |
+
**Issue**: Space runs out of memory during processing
|
| 157 |
+
|
| 158 |
+
**Solution**:
|
| 159 |
+
- Upgrade to a better hardware tier (Settings → Hardware)
|
| 160 |
+
- Reduce video resolution in preprocessing
|
| 161 |
+
- Use smaller YOLO model (yolov8n instead of yolov8l)
|
| 162 |
+
|
| 163 |
+
### Slow Processing
|
| 164 |
+
|
| 165 |
+
**Issue**: Video processing is very slow
|
| 166 |
+
|
| 167 |
+
**Solution**:
|
| 168 |
+
- Upgrade to GPU hardware (Settings → Hardware)
|
| 169 |
+
- Use smaller model for faster inference
|
| 170 |
+
- Reduce input video resolution
|
| 171 |
+
|
| 172 |
+
### Model Download Fails
|
| 173 |
+
|
| 174 |
+
**Issue**: YOLO model fails to download
|
| 175 |
+
|
| 176 |
+
**Solution**:
|
| 177 |
+
- Pre-download model and include in repository
|
| 178 |
+
- Check internet connectivity in Space
|
| 179 |
+
- Use alternative model source
|
| 180 |
+
|
| 181 |
+
## 📊 Hardware Recommendations
|
| 182 |
+
|
| 183 |
+
| Video Resolution | Model Size | Recommended Hardware |
|
| 184 |
+
|-----------------|------------|---------------------|
|
| 185 |
+
| 720p | YOLOv8n | CPU Basic (Free) |
|
| 186 |
+
| 1080p | YOLOv8n/s | CPU Upgrade |
|
| 187 |
+
| 1080p | YOLOv8m/l | T4 Small (GPU) |
|
| 188 |
+
| 4K | YOLOv8l | A10G Small (GPU) |
|
| 189 |
+
|
| 190 |
+
## 🔒 Security Notes
|
| 191 |
+
|
| 192 |
+
- Never commit API keys or secrets to the repository
|
| 193 |
+
- Use Hugging Face Secrets for sensitive data
|
| 194 |
+
- Validate all user inputs
|
| 195 |
+
- Limit file upload sizes
|
| 196 |
+
|
| 197 |
+
## 📈 Monitoring
|
| 198 |
+
|
| 199 |
+
After deployment:
|
| 200 |
+
|
| 201 |
+
1. Monitor Space usage in Settings
|
| 202 |
+
2. Check error logs regularly
|
| 203 |
+
3. Review user feedback
|
| 204 |
+
4. Update dependencies periodically
|
| 205 |
+
|
| 206 |
+
## 🔄 Updating Your Space
|
| 207 |
+
|
| 208 |
+
To update after deployment:
|
| 209 |
+
|
| 210 |
+
```bash
|
| 211 |
+
# Make changes locally
|
| 212 |
+
# Test thoroughly
|
| 213 |
+
|
| 214 |
+
# Commit and push
|
| 215 |
+
git add .
|
| 216 |
+
git commit -m "Description of changes"
|
| 217 |
+
git push
|
| 218 |
+
|
| 219 |
+
# Space will automatically rebuild
|
| 220 |
+
```
|
| 221 |
+
|
| 222 |
+
## 📝 Post-Deployment
|
| 223 |
+
|
| 224 |
+
1. Test all functionality
|
| 225 |
+
2. Share Space link with team
|
| 226 |
+
3. Add Space to your profile
|
| 227 |
+
4. Consider writing a blog post about the project
|
| 228 |
+
5. Share on social media
|
| 229 |
+
|
| 230 |
+
## 🎓 Academic Use
|
| 231 |
+
|
| 232 |
+
For academic submissions:
|
| 233 |
+
|
| 234 |
+
1. Ensure all student names are visible in README
|
| 235 |
+
2. Include enrollment numbers
|
| 236 |
+
3. Add CITATION.cff for proper attribution
|
| 237 |
+
4. Document all external libraries used
|
| 238 |
+
5. Include technical documentation
|
| 239 |
+
|
| 240 |
+
## ✅ Final Verification
|
| 241 |
+
|
| 242 |
+
Before marking as complete:
|
| 243 |
+
|
| 244 |
+
- [ ] Space builds successfully
|
| 245 |
+
- [ ] Video upload works
|
| 246 |
+
- [ ] Processing completes without errors
|
| 247 |
+
- [ ] Output video is downloadable
|
| 248 |
+
- [ ] Statistics are displayed correctly
|
| 249 |
+
- [ ] Error messages are user-friendly
|
| 250 |
+
- [ ] All student names are visible
|
| 251 |
+
- [ ] README renders correctly
|
| 252 |
+
- [ ] License is visible
|
| 253 |
+
|
| 254 |
+
## 🎉 Success!
|
| 255 |
+
|
| 256 |
+
Your Vehicle Speed Estimation System is now live on Hugging Face Spaces!
|
| 257 |
+
|
| 258 |
+
Share your Space URL:
|
| 259 |
+
```
|
| 260 |
+
https://huggingface.co/spaces/YOUR_USERNAME/vehicle-speed-estimation
|
| 261 |
+
```
|
| 262 |
+
|
| 263 |
+
---
|
| 264 |
+
|
| 265 |
+
**Developed by:**
|
| 266 |
+
- Abhay Gupta (0205CC221005)
|
| 267 |
+
- Aditi Lakhera (0205CC221011)
|
| 268 |
+
- Balraj Patel (0205CC221049)
|
| 269 |
+
- Bhumika Patel (0205CC221050)
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Abhay Gupta, Aditi Lakhera, Balraj Patel, Bhumika Patel
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
Vehicle_Speed_Estimation_main.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Vehicle Speed Estimation and Counting - Gradio Interface
|
| 3 |
+
=========================================================
|
| 4 |
+
|
| 5 |
+
A real-time vehicle detection, tracking, counting, and speed estimation system
|
| 6 |
+
using YOLO object detection and perspective transformation techniques.
|
| 7 |
+
|
| 8 |
+
Authors:
|
| 9 |
+
- Abhay Gupta (0205CC221005)
|
| 10 |
+
- Aditi Lakhera (0205CC221011)
|
| 11 |
+
- Balraj Patel (0205CC221049)
|
| 12 |
+
- Bhumika Patel (0205CC221050)
|
| 13 |
+
|
| 14 |
+
This application provides an interactive web interface for analyzing traffic videos
|
| 15 |
+
and estimating vehicle speeds using computer vision techniques.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
import tempfile
|
| 21 |
+
import logging
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
from typing import Optional, Tuple
|
| 24 |
+
|
| 25 |
+
import gradio as gr
|
| 26 |
+
import cv2
|
| 27 |
+
import numpy as np
|
| 28 |
+
|
| 29 |
+
# Configure logging
|
| 30 |
+
logging.basicConfig(
|
| 31 |
+
level=logging.INFO,
|
| 32 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 33 |
+
)
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
# Import application modules
|
| 37 |
+
try:
|
| 38 |
+
from main import process_video
|
| 39 |
+
from config import VehicleDetectionConfig
|
| 40 |
+
except ImportError as e:
|
| 41 |
+
logger.error(f"Failed to import required modules: {e}")
|
| 42 |
+
raise
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def validate_video_file(video_path: str) -> Tuple[bool, str]:
|
| 46 |
+
"""
|
| 47 |
+
Validate uploaded video file.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
video_path: Path to video file
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
Tuple of (is_valid, error_message)
|
| 54 |
+
"""
|
| 55 |
+
if not video_path:
|
| 56 |
+
return False, "No video file provided"
|
| 57 |
+
|
| 58 |
+
if not os.path.exists(video_path):
|
| 59 |
+
return False, f"Video file not found: {video_path}"
|
| 60 |
+
|
| 61 |
+
# Check file size (limit to 100MB for HF Spaces)
|
| 62 |
+
file_size_mb = os.path.getsize(video_path) / (1024 * 1024)
|
| 63 |
+
if file_size_mb > 100:
|
| 64 |
+
return False, f"Video file too large ({file_size_mb:.1f}MB). Maximum size is 100MB"
|
| 65 |
+
|
| 66 |
+
# Validate video can be opened
|
| 67 |
+
try:
|
| 68 |
+
cap = cv2.VideoCapture(video_path)
|
| 69 |
+
if not cap.isOpened():
|
| 70 |
+
return False, "Unable to open video file. Please ensure it's a valid video format"
|
| 71 |
+
|
| 72 |
+
# Check if video has frames
|
| 73 |
+
ret, _ = cap.read()
|
| 74 |
+
cap.release()
|
| 75 |
+
|
| 76 |
+
if not ret:
|
| 77 |
+
return False, "Video file appears to be empty or corrupted"
|
| 78 |
+
|
| 79 |
+
return True, ""
|
| 80 |
+
except Exception as e:
|
| 81 |
+
return False, f"Error validating video: {str(e)}"
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def estimate_vehicle_speed(
|
| 85 |
+
video_file: str,
|
| 86 |
+
model_choice: str,
|
| 87 |
+
line_position: int,
|
| 88 |
+
confidence_threshold: float,
|
| 89 |
+
progress=gr.Progress()
|
| 90 |
+
) -> Tuple[Optional[str], str]:
|
| 91 |
+
"""
|
| 92 |
+
Process video and estimate vehicle speeds.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
video_file: Path to uploaded video
|
| 96 |
+
model_choice: YOLO model selection
|
| 97 |
+
line_position: Y-coordinate for counting line
|
| 98 |
+
confidence_threshold: Detection confidence threshold
|
| 99 |
+
progress: Gradio progress tracker
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
Tuple of (output_video_path, statistics_text)
|
| 103 |
+
"""
|
| 104 |
+
try:
|
| 105 |
+
# Validate input
|
| 106 |
+
progress(0, desc="Validating video file...")
|
| 107 |
+
is_valid, error_msg = validate_video_file(video_file)
|
| 108 |
+
if not is_valid:
|
| 109 |
+
logger.error(f"Video validation failed: {error_msg}")
|
| 110 |
+
return None, f"❌ Error: {error_msg}"
|
| 111 |
+
|
| 112 |
+
# Create temporary output file
|
| 113 |
+
output_path = tempfile.mktemp(suffix='.mp4')
|
| 114 |
+
|
| 115 |
+
# Configure processing
|
| 116 |
+
progress(0.1, desc="Configuring detection parameters...")
|
| 117 |
+
config = VehicleDetectionConfig(
|
| 118 |
+
input_video=video_file,
|
| 119 |
+
output_video=output_path,
|
| 120 |
+
model_name=model_choice,
|
| 121 |
+
line_y=line_position,
|
| 122 |
+
confidence_threshold=confidence_threshold
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Process video
|
| 126 |
+
progress(0.2, desc="Processing video (this may take a few minutes)...")
|
| 127 |
+
logger.info(f"Starting video processing: {video_file}")
|
| 128 |
+
|
| 129 |
+
try:
|
| 130 |
+
stats = process_video(
|
| 131 |
+
config=config,
|
| 132 |
+
progress_callback=lambda p: progress(0.2 + p * 0.7, desc=f"Processing... {int(p*100)}%")
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
progress(0.95, desc="Finalizing output...")
|
| 136 |
+
|
| 137 |
+
# Format statistics
|
| 138 |
+
stats_text = f"""
|
| 139 |
+
## 📊 Processing Results
|
| 140 |
+
|
| 141 |
+
### Vehicle Count Statistics
|
| 142 |
+
- **Total Vehicles Detected:** {stats['total_count']}
|
| 143 |
+
- **Vehicles Entering (In):** {stats['in_count']}
|
| 144 |
+
- **Vehicles Exiting (Out):** {stats['out_count']}
|
| 145 |
+
|
| 146 |
+
### Speed Analysis
|
| 147 |
+
- **Average Speed:** {stats['avg_speed']:.1f} km/h
|
| 148 |
+
- **Maximum Speed:** {stats['max_speed']:.1f} km/h
|
| 149 |
+
- **Minimum Speed:** {stats['min_speed']:.1f} km/h
|
| 150 |
+
|
| 151 |
+
### Processing Information
|
| 152 |
+
- **Frames Processed:** {stats['frames_processed']}
|
| 153 |
+
- **Processing Time:** {stats['processing_time']:.2f} seconds
|
| 154 |
+
- **Model Used:** {model_choice}
|
| 155 |
+
- **Detection Confidence:** {confidence_threshold:.2f}
|
| 156 |
+
|
| 157 |
+
✅ **Processing completed successfully!**
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
progress(1.0, desc="Complete!")
|
| 161 |
+
logger.info("Video processing completed successfully")
|
| 162 |
+
return output_path, stats_text
|
| 163 |
+
|
| 164 |
+
except Exception as e:
|
| 165 |
+
logger.error(f"Error during video processing: {e}", exc_info=True)
|
| 166 |
+
return None, f"❌ **Processing Error:** {str(e)}\n\nPlease try with different settings or a different video."
|
| 167 |
+
|
| 168 |
+
except Exception as e:
|
| 169 |
+
logger.error(f"Unexpected error in estimate_vehicle_speed: {e}", exc_info=True)
|
| 170 |
+
return None, f"❌ **Unexpected Error:** {str(e)}"
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def create_demo_interface() -> gr.Blocks:
|
| 174 |
+
"""
|
| 175 |
+
Create Gradio interface for vehicle speed estimation.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
Gradio Blocks interface
|
| 179 |
+
"""
|
| 180 |
+
with gr.Blocks(
|
| 181 |
+
title="Vehicle Speed Estimation & Counting",
|
| 182 |
+
theme=gr.themes.Soft()
|
| 183 |
+
) as demo:
|
| 184 |
+
|
| 185 |
+
gr.Markdown("""
|
| 186 |
+
# 🚗 Vehicle Speed Estimation & Counting System
|
| 187 |
+
|
| 188 |
+
An intelligent traffic analysis system that detects, tracks, counts, and estimates the speed of vehicles in video footage using advanced computer vision techniques.
|
| 189 |
+
|
| 190 |
+
### 🎯 Features
|
| 191 |
+
- **Real-time Vehicle Detection** using YOLO
|
| 192 |
+
- **Multi-Object Tracking** with ByteTrack
|
| 193 |
+
- **Accurate Speed Estimation** via perspective transformation
|
| 194 |
+
- **Vehicle Counting** with configurable detection zones
|
| 195 |
+
|
| 196 |
+
### 👥 Developed By
|
| 197 |
+
- **Abhay Gupta** (0205CC221005)
|
| 198 |
+
- **Aditi Lakhera** (0205CC221011)
|
| 199 |
+
- **Balraj Patel** (0205CC221049)
|
| 200 |
+
- **Bhumika Patel** (0205CC221050)
|
| 201 |
+
|
| 202 |
+
---
|
| 203 |
+
""")
|
| 204 |
+
|
| 205 |
+
with gr.Row():
|
| 206 |
+
with gr.Column(scale=1):
|
| 207 |
+
gr.Markdown("### 📤 Input Configuration")
|
| 208 |
+
|
| 209 |
+
video_input = gr.Video(
|
| 210 |
+
label="Upload Traffic Video",
|
| 211 |
+
format="mp4"
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 215 |
+
model_choice = gr.Dropdown(
|
| 216 |
+
choices=["yolov8n", "yolov8s", "yolov8m", "yolov8l"],
|
| 217 |
+
value="yolov8n",
|
| 218 |
+
label="YOLO Model",
|
| 219 |
+
info="Larger models are more accurate but slower"
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
line_position = gr.Slider(
|
| 223 |
+
minimum=100,
|
| 224 |
+
maximum=1000,
|
| 225 |
+
value=480,
|
| 226 |
+
step=10,
|
| 227 |
+
label="Counting Line Position (Y-coordinate)",
|
| 228 |
+
info="Vertical position of the vehicle counting line"
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
confidence_threshold = gr.Slider(
|
| 232 |
+
minimum=0.1,
|
| 233 |
+
maximum=0.9,
|
| 234 |
+
value=0.3,
|
| 235 |
+
step=0.05,
|
| 236 |
+
label="Detection Confidence Threshold",
|
| 237 |
+
info="Higher values reduce false positives"
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
process_btn = gr.Button(
|
| 241 |
+
"🚀 Process Video",
|
| 242 |
+
variant="primary",
|
| 243 |
+
size="lg"
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
gr.Markdown("""
|
| 247 |
+
### 📋 Instructions
|
| 248 |
+
1. Upload a traffic video (MP4 format, max 100MB)
|
| 249 |
+
2. Adjust settings if needed (optional)
|
| 250 |
+
3. Click "Process Video" and wait for results
|
| 251 |
+
4. Download the annotated video with speed estimates
|
| 252 |
+
|
| 253 |
+
### 💡 Tips
|
| 254 |
+
- Use videos with clear vehicle visibility
|
| 255 |
+
- Ensure consistent camera angle
|
| 256 |
+
- Better lighting improves detection accuracy
|
| 257 |
+
""")
|
| 258 |
+
|
| 259 |
+
with gr.Column(scale=1):
|
| 260 |
+
gr.Markdown("### 📥 Output Results")
|
| 261 |
+
|
| 262 |
+
video_output = gr.Video(
|
| 263 |
+
label="Processed Video with Annotations"
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
stats_output = gr.Markdown(
|
| 267 |
+
label="Statistics",
|
| 268 |
+
value="*Processing results will appear here...*"
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
# Example videos section
|
| 272 |
+
gr.Markdown("""
|
| 273 |
+
---
|
| 274 |
+
### 🎬 Example Videos
|
| 275 |
+
Upload your own traffic video or use sample footage to test the system.
|
| 276 |
+
""")
|
| 277 |
+
|
| 278 |
+
gr.Examples(
|
| 279 |
+
examples=[
|
| 280 |
+
["./data/vehicles.mp4", "yolov8n", 480, 0.3],
|
| 281 |
+
],
|
| 282 |
+
inputs=[video_input, model_choice, line_position, confidence_threshold],
|
| 283 |
+
outputs=[video_output, stats_output],
|
| 284 |
+
fn=estimate_vehicle_speed,
|
| 285 |
+
cache_examples=False,
|
| 286 |
+
label="Sample Videos"
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
# Connect processing function
|
| 290 |
+
process_btn.click(
|
| 291 |
+
fn=estimate_vehicle_speed,
|
| 292 |
+
inputs=[video_input, model_choice, line_position, confidence_threshold],
|
| 293 |
+
outputs=[video_output, stats_output]
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
gr.Markdown("""
|
| 297 |
+
---
|
| 298 |
+
### 🔬 Technical Details
|
| 299 |
+
|
| 300 |
+
This system uses:
|
| 301 |
+
- **YOLO (You Only Look Once)** for real-time object detection
|
| 302 |
+
- **ByteTrack** for multi-object tracking across frames
|
| 303 |
+
- **Perspective Transformation** for accurate speed calculation
|
| 304 |
+
- **OpenCV** for video processing and computer vision operations
|
| 305 |
+
|
| 306 |
+
### 📚 References
|
| 307 |
+
- [Ultralytics YOLO](https://github.com/ultralytics/ultralytics)
|
| 308 |
+
- [Supervision Library](https://github.com/roboflow/supervision)
|
| 309 |
+
- [OpenCV](https://opencv.org/)
|
| 310 |
+
|
| 311 |
+
### 📄 License
|
| 312 |
+
MIT License - See LICENSE file for details
|
| 313 |
+
""")
|
| 314 |
+
|
| 315 |
+
return demo
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
if __name__ == "__main__":
|
| 319 |
+
try:
|
| 320 |
+
logger.info("Starting Vehicle Speed Estimation application...")
|
| 321 |
+
demo = create_demo_interface()
|
| 322 |
+
demo.launch(
|
| 323 |
+
server_name="0.0.0.0",
|
| 324 |
+
server_port=7860,
|
| 325 |
+
share=False
|
| 326 |
+
)
|
| 327 |
+
except Exception as e:
|
| 328 |
+
logger.error(f"Failed to launch application: {e}", exc_info=True)
|
| 329 |
+
sys.exit(1)
|
config.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Vehicle Detection Configuration Module
|
| 3 |
+
=======================================
|
| 4 |
+
|
| 5 |
+
Manages configuration settings for vehicle detection, tracking, and speed estimation.
|
| 6 |
+
|
| 7 |
+
Authors:
|
| 8 |
+
- Abhay Gupta (0205CC221005)
|
| 9 |
+
- Aditi Lakhera (0205CC221011)
|
| 10 |
+
- Balraj Patel (0205CC221049)
|
| 11 |
+
- Bhumika Patel (0205CC221050)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
from dataclasses import dataclass, field
|
| 16 |
+
from typing import List, Tuple, Optional
|
| 17 |
+
import logging
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclass
|
| 23 |
+
class VehicleDetectionConfig:
|
| 24 |
+
"""
|
| 25 |
+
Configuration class for vehicle detection and speed estimation system.
|
| 26 |
+
|
| 27 |
+
This class encapsulates all configuration parameters needed for the
|
| 28 |
+
vehicle detection pipeline, including video paths, model settings,
|
| 29 |
+
detection zones, and perspective transformation parameters.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
# Video Configuration
|
| 33 |
+
input_video: str = "./data/vehicles.mp4"
|
| 34 |
+
output_video: str = "./data/vehicles_output.mp4"
|
| 35 |
+
|
| 36 |
+
# Model Configuration
|
| 37 |
+
model_name: str = "yolov8n"
|
| 38 |
+
model_path: Optional[str] = None
|
| 39 |
+
confidence_threshold: float = 0.3
|
| 40 |
+
iou_threshold: float = 0.7
|
| 41 |
+
|
| 42 |
+
# Detection Zone Configuration
|
| 43 |
+
line_y: int = 480
|
| 44 |
+
line_offset: int = 55
|
| 45 |
+
crossing_threshold: int = 1
|
| 46 |
+
|
| 47 |
+
# Perspective Transformation Configuration
|
| 48 |
+
# Source points define the region in the original video frame
|
| 49 |
+
source_points: List[List[int]] = field(default_factory=lambda: [
|
| 50 |
+
[450, 300], # Top-left
|
| 51 |
+
[860, 300], # Top-right
|
| 52 |
+
[1900, 720], # Bottom-right
|
| 53 |
+
[-660, 720] # Bottom-left
|
| 54 |
+
])
|
| 55 |
+
|
| 56 |
+
# Target points define the transformed top-down view dimensions (in meters)
|
| 57 |
+
target_width_meters: float = 25.0
|
| 58 |
+
target_height_meters: float = 100.0
|
| 59 |
+
|
| 60 |
+
# Display Configuration
|
| 61 |
+
window_name: str = "Vehicle Speed Estimation - Traffic Analysis"
|
| 62 |
+
display_enabled: bool = True
|
| 63 |
+
|
| 64 |
+
# Annotation Configuration
|
| 65 |
+
enable_boxes: bool = True
|
| 66 |
+
enable_labels: bool = True
|
| 67 |
+
enable_traces: bool = True
|
| 68 |
+
enable_line_zones: bool = True
|
| 69 |
+
trace_length: int = 20
|
| 70 |
+
|
| 71 |
+
# Speed Estimation Configuration
|
| 72 |
+
speed_history_seconds: int = 1
|
| 73 |
+
speed_unit: str = "km/h" # Options: "km/h", "mph", "m/s"
|
| 74 |
+
|
| 75 |
+
def __post_init__(self):
|
| 76 |
+
"""Validate configuration after initialization."""
|
| 77 |
+
self._validate_config()
|
| 78 |
+
self._setup_model_path()
|
| 79 |
+
|
| 80 |
+
def _validate_config(self) -> None:
|
| 81 |
+
"""
|
| 82 |
+
Validate configuration parameters.
|
| 83 |
+
|
| 84 |
+
Raises:
|
| 85 |
+
ValueError: If configuration parameters are invalid
|
| 86 |
+
"""
|
| 87 |
+
# Validate video paths
|
| 88 |
+
if not self.input_video:
|
| 89 |
+
raise ValueError("Input video path cannot be empty")
|
| 90 |
+
|
| 91 |
+
# Validate model configuration
|
| 92 |
+
if not 0.0 <= self.confidence_threshold <= 1.0:
|
| 93 |
+
raise ValueError(f"Confidence threshold must be between 0 and 1, got {self.confidence_threshold}")
|
| 94 |
+
|
| 95 |
+
if not 0.0 <= self.iou_threshold <= 1.0:
|
| 96 |
+
raise ValueError(f"IOU threshold must be between 0 and 1, got {self.iou_threshold}")
|
| 97 |
+
|
| 98 |
+
# Validate detection zone
|
| 99 |
+
if self.line_y < 0:
|
| 100 |
+
raise ValueError(f"Line Y position must be positive, got {self.line_y}")
|
| 101 |
+
|
| 102 |
+
if self.line_offset < 0:
|
| 103 |
+
raise ValueError(f"Line offset must be positive, got {self.line_offset}")
|
| 104 |
+
|
| 105 |
+
# Validate perspective transformation
|
| 106 |
+
if len(self.source_points) != 4:
|
| 107 |
+
raise ValueError(f"Source points must contain exactly 4 points, got {len(self.source_points)}")
|
| 108 |
+
|
| 109 |
+
for i, point in enumerate(self.source_points):
|
| 110 |
+
if len(point) != 2:
|
| 111 |
+
raise ValueError(f"Source point {i} must have 2 coordinates, got {len(point)}")
|
| 112 |
+
|
| 113 |
+
if self.target_width_meters <= 0 or self.target_height_meters <= 0:
|
| 114 |
+
raise ValueError("Target dimensions must be positive")
|
| 115 |
+
|
| 116 |
+
# Validate speed configuration
|
| 117 |
+
if self.speed_unit not in ["km/h", "mph", "m/s"]:
|
| 118 |
+
raise ValueError(f"Invalid speed unit: {self.speed_unit}. Must be 'km/h', 'mph', or 'm/s'")
|
| 119 |
+
|
| 120 |
+
logger.info("Configuration validation successful")
|
| 121 |
+
|
| 122 |
+
def _setup_model_path(self) -> None:
|
| 123 |
+
"""Set up the model path based on model name."""
|
| 124 |
+
if self.model_path is None:
|
| 125 |
+
# Try to find model in models directory
|
| 126 |
+
model_dir = "./models"
|
| 127 |
+
potential_paths = [
|
| 128 |
+
f"{model_dir}/{self.model_name}.pt",
|
| 129 |
+
f"{model_dir}/VisDrone_YOLO_x2.pt", # Custom trained model
|
| 130 |
+
self.model_name # Let ultralytics download from hub
|
| 131 |
+
]
|
| 132 |
+
|
| 133 |
+
for path in potential_paths:
|
| 134 |
+
if os.path.exists(path):
|
| 135 |
+
self.model_path = path
|
| 136 |
+
logger.info(f"Using model from: {path}")
|
| 137 |
+
return
|
| 138 |
+
|
| 139 |
+
# Use model name directly (will be downloaded by ultralytics)
|
| 140 |
+
self.model_path = self.model_name
|
| 141 |
+
logger.info(f"Model will be downloaded: {self.model_name}")
|
| 142 |
+
|
| 143 |
+
@property
|
| 144 |
+
def target_points(self) -> List[List[float]]:
|
| 145 |
+
"""
|
| 146 |
+
Generate target points for perspective transformation.
|
| 147 |
+
|
| 148 |
+
Returns:
|
| 149 |
+
List of 4 points defining the target perspective in meters
|
| 150 |
+
"""
|
| 151 |
+
w, h = self.target_width_meters, self.target_height_meters
|
| 152 |
+
return [
|
| 153 |
+
[0, 0], # Top-left
|
| 154 |
+
[w, 0], # Top-right
|
| 155 |
+
[w, h], # Bottom-right
|
| 156 |
+
[0, h] # Bottom-left
|
| 157 |
+
]
|
| 158 |
+
|
| 159 |
+
def get_speed_conversion_factor(self) -> float:
|
| 160 |
+
"""
|
| 161 |
+
Get conversion factor for speed unit.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
Conversion factor from m/s to desired unit
|
| 165 |
+
"""
|
| 166 |
+
conversions = {
|
| 167 |
+
"km/h": 3.6,
|
| 168 |
+
"mph": 2.23694,
|
| 169 |
+
"m/s": 1.0
|
| 170 |
+
}
|
| 171 |
+
return conversions[self.speed_unit]
|
| 172 |
+
|
| 173 |
+
def to_dict(self) -> dict:
|
| 174 |
+
"""
|
| 175 |
+
Convert configuration to dictionary.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
Dictionary representation of configuration
|
| 179 |
+
"""
|
| 180 |
+
return {
|
| 181 |
+
"input_video": self.input_video,
|
| 182 |
+
"output_video": self.output_video,
|
| 183 |
+
"model_name": self.model_name,
|
| 184 |
+
"model_path": self.model_path,
|
| 185 |
+
"confidence_threshold": self.confidence_threshold,
|
| 186 |
+
"line_y": self.line_y,
|
| 187 |
+
"speed_unit": self.speed_unit,
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
def __repr__(self) -> str:
|
| 191 |
+
"""String representation of configuration."""
|
| 192 |
+
return f"VehicleDetectionConfig(model={self.model_name}, input={self.input_video})"
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Default configuration instance for backward compatibility
|
| 196 |
+
DEFAULT_CONFIG = VehicleDetectionConfig()
|
| 197 |
+
|
| 198 |
+
# Export commonly used configuration values
|
| 199 |
+
IN_VIDEO_PATH = DEFAULT_CONFIG.input_video
|
| 200 |
+
OUT_VIDEO_PATH = DEFAULT_CONFIG.output_video
|
| 201 |
+
YOLO_MODEL_PATH = DEFAULT_CONFIG.model_path
|
| 202 |
+
LINE_Y = DEFAULT_CONFIG.line_y
|
| 203 |
+
SOURCE_POINTS = DEFAULT_CONFIG.source_points
|
| 204 |
+
TARGET_POINTS = DEFAULT_CONFIG.target_points
|
| 205 |
+
WINDOW_NAME = DEFAULT_CONFIG.window_name
|
main.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Vehicle Detection, Tracking, Counting, and Speed Estimation System
|
| 3 |
+
===================================================================
|
| 4 |
+
|
| 5 |
+
A comprehensive computer vision pipeline for analyzing traffic videos,
|
| 6 |
+
detecting vehicles, tracking their movement, counting them, and estimating
|
| 7 |
+
their speeds using YOLO object detection and perspective transformation.
|
| 8 |
+
|
| 9 |
+
Authors:
|
| 10 |
+
- Abhay Gupta (0205CC221005)
|
| 11 |
+
- Aditi Lakhera (0205CC221011)
|
| 12 |
+
- Balraj Patel (0205CC221049)
|
| 13 |
+
- Bhumika Patel (0205CC221050)
|
| 14 |
+
|
| 15 |
+
Technical Approach:
|
| 16 |
+
- YOLO for real-time object detection
|
| 17 |
+
- ByteTrack for multi-object tracking
|
| 18 |
+
- Perspective transformation for speed calculation
|
| 19 |
+
- Line zones for vehicle counting
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import sys
|
| 23 |
+
import logging
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
from typing import Dict, Optional, Callable
|
| 26 |
+
from time import time
|
| 27 |
+
|
| 28 |
+
import cv2
|
| 29 |
+
import numpy as np
|
| 30 |
+
import supervision as sv
|
| 31 |
+
from ultralytics import YOLO
|
| 32 |
+
|
| 33 |
+
from src import FrameAnnotator, VehicleSpeedEstimator, PerspectiveTransformer
|
| 34 |
+
from src.exceptions import (
|
| 35 |
+
VideoProcessingError,
|
| 36 |
+
ModelLoadError,
|
| 37 |
+
ConfigurationError
|
| 38 |
+
)
|
| 39 |
+
from config import VehicleDetectionConfig
|
| 40 |
+
|
| 41 |
+
# Configure logging
|
| 42 |
+
logging.basicConfig(
|
| 43 |
+
level=logging.INFO,
|
| 44 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 45 |
+
)
|
| 46 |
+
logger = logging.getLogger(__name__)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class VehicleDetectionPipeline:
|
| 50 |
+
"""
|
| 51 |
+
Main pipeline for vehicle detection, tracking, counting, and speed estimation.
|
| 52 |
+
|
| 53 |
+
This class orchestrates the entire processing workflow, from loading the model
|
| 54 |
+
to processing each frame and generating the output video.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self, config: VehicleDetectionConfig):
|
| 58 |
+
"""
|
| 59 |
+
Initialize the detection pipeline.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
config: Configuration object with all parameters
|
| 63 |
+
|
| 64 |
+
Raises:
|
| 65 |
+
ModelLoadError: If model cannot be loaded
|
| 66 |
+
ConfigurationError: If configuration is invalid
|
| 67 |
+
"""
|
| 68 |
+
self.config = config
|
| 69 |
+
self.model = None
|
| 70 |
+
self.tracker = None
|
| 71 |
+
self.line_zone = None
|
| 72 |
+
self.speed_estimator = None
|
| 73 |
+
self.annotator = None
|
| 74 |
+
self.video_info = None
|
| 75 |
+
|
| 76 |
+
logger.info(f"Initializing pipeline with config: {config}")
|
| 77 |
+
self._initialize_components()
|
| 78 |
+
|
| 79 |
+
def _initialize_components(self) -> None:
|
| 80 |
+
"""Initialize all pipeline components."""
|
| 81 |
+
try:
|
| 82 |
+
# Load YOLO model
|
| 83 |
+
logger.info(f"Loading YOLO model: {self.config.model_path}")
|
| 84 |
+
self.model = YOLO(self.config.model_path)
|
| 85 |
+
self.model.conf = self.config.confidence_threshold
|
| 86 |
+
self.model.iou = self.config.iou_threshold
|
| 87 |
+
logger.info("Model loaded successfully")
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logger.error(f"Failed to load model: {e}")
|
| 91 |
+
raise ModelLoadError(f"Could not load YOLO model from {self.config.model_path}: {e}")
|
| 92 |
+
|
| 93 |
+
def _setup_video_components(self, video_path: str) -> None:
|
| 94 |
+
"""
|
| 95 |
+
Set up video-specific components.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
video_path: Path to input video
|
| 99 |
+
|
| 100 |
+
Raises:
|
| 101 |
+
VideoProcessingError: If video cannot be opened
|
| 102 |
+
"""
|
| 103 |
+
try:
|
| 104 |
+
# Get video information
|
| 105 |
+
self.video_info = sv.VideoInfo.from_video_path(video_path)
|
| 106 |
+
logger.info(f"Video info: {self.video_info.width}x{self.video_info.height} @ {self.video_info.fps}fps")
|
| 107 |
+
|
| 108 |
+
# Initialize ByteTrack tracker
|
| 109 |
+
self.tracker = sv.ByteTrack(
|
| 110 |
+
frame_rate=self.video_info.fps,
|
| 111 |
+
track_activation_threshold=self.config.confidence_threshold
|
| 112 |
+
)
|
| 113 |
+
logger.info("Tracker initialized")
|
| 114 |
+
|
| 115 |
+
# Set up counting line zone
|
| 116 |
+
line_start = sv.Point(
|
| 117 |
+
x=self.config.line_offset,
|
| 118 |
+
y=self.config.line_y
|
| 119 |
+
)
|
| 120 |
+
line_end = sv.Point(
|
| 121 |
+
x=self.video_info.width - self.config.line_offset,
|
| 122 |
+
y=self.config.line_y
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self.line_zone = sv.LineZone(
|
| 126 |
+
start=line_start,
|
| 127 |
+
end=line_end,
|
| 128 |
+
triggering_anchors=(sv.Position.BOTTOM_CENTER,)
|
| 129 |
+
)
|
| 130 |
+
logger.info(f"Line zone created at y={self.config.line_y}")
|
| 131 |
+
|
| 132 |
+
# Initialize perspective transformer
|
| 133 |
+
source_pts = np.array(self.config.source_points, dtype=np.float32)
|
| 134 |
+
target_pts = np.array(self.config.target_points, dtype=np.float32)
|
| 135 |
+
|
| 136 |
+
transformer = PerspectiveTransformer(
|
| 137 |
+
source_points=source_pts,
|
| 138 |
+
target_points=target_pts
|
| 139 |
+
)
|
| 140 |
+
logger.info("Perspective transformer initialized")
|
| 141 |
+
|
| 142 |
+
# Initialize speed estimator
|
| 143 |
+
self.speed_estimator = VehicleSpeedEstimator(
|
| 144 |
+
fps=self.video_info.fps,
|
| 145 |
+
transformer=transformer,
|
| 146 |
+
history_duration=self.config.speed_history_seconds,
|
| 147 |
+
speed_unit=self.config.speed_unit
|
| 148 |
+
)
|
| 149 |
+
logger.info("Speed estimator initialized")
|
| 150 |
+
|
| 151 |
+
# Initialize frame annotator
|
| 152 |
+
self.annotator = FrameAnnotator(
|
| 153 |
+
video_resolution=(self.video_info.width, self.video_info.height),
|
| 154 |
+
show_boxes=self.config.enable_boxes,
|
| 155 |
+
show_labels=self.config.enable_labels,
|
| 156 |
+
show_traces=self.config.enable_traces,
|
| 157 |
+
show_line_zones=self.config.enable_line_zones,
|
| 158 |
+
trace_length=self.config.trace_length,
|
| 159 |
+
zone_polygon=source_pts
|
| 160 |
+
)
|
| 161 |
+
logger.info("Frame annotator initialized")
|
| 162 |
+
|
| 163 |
+
except Exception as e:
|
| 164 |
+
logger.error(f"Failed to setup video components: {e}")
|
| 165 |
+
raise VideoProcessingError(f"Error setting up video processing: {e}")
|
| 166 |
+
|
| 167 |
+
def _process_single_frame(self, frame: np.ndarray) -> tuple:
|
| 168 |
+
"""
|
| 169 |
+
Process a single video frame.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
frame: Input video frame
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
Tuple of (annotated_frame, detections)
|
| 176 |
+
"""
|
| 177 |
+
# Run YOLO detection
|
| 178 |
+
results = self.model(frame, verbose=False)[0]
|
| 179 |
+
detections = sv.Detections.from_ultralytics(results)
|
| 180 |
+
|
| 181 |
+
# Update tracker
|
| 182 |
+
detections = self.tracker.update_with_detections(detections)
|
| 183 |
+
|
| 184 |
+
# Trigger line zone counting
|
| 185 |
+
self.line_zone.trigger(detections)
|
| 186 |
+
|
| 187 |
+
# Estimate speeds
|
| 188 |
+
detections = self.speed_estimator.estimate(detections)
|
| 189 |
+
|
| 190 |
+
# Generate labels
|
| 191 |
+
labels = self._create_labels(detections)
|
| 192 |
+
|
| 193 |
+
# Annotate frame
|
| 194 |
+
annotated_frame = self.annotator.draw_annotations(
|
| 195 |
+
frame=frame,
|
| 196 |
+
detections=detections,
|
| 197 |
+
labels=labels,
|
| 198 |
+
line_zones=[self.line_zone]
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
return annotated_frame, detections
|
| 202 |
+
|
| 203 |
+
def _create_labels(self, detections: sv.Detections) -> list:
|
| 204 |
+
"""
|
| 205 |
+
Create display labels for detected vehicles.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
detections: Detection results
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
List of label strings
|
| 212 |
+
"""
|
| 213 |
+
labels = []
|
| 214 |
+
|
| 215 |
+
if not hasattr(detections, 'tracker_id') or detections.tracker_id is None:
|
| 216 |
+
return labels
|
| 217 |
+
|
| 218 |
+
for idx, tracker_id in enumerate(detections.tracker_id):
|
| 219 |
+
# Get class name
|
| 220 |
+
class_name = "Vehicle"
|
| 221 |
+
if "class_name" in detections.data:
|
| 222 |
+
class_name = detections.data["class_name"][idx]
|
| 223 |
+
|
| 224 |
+
# Get speed
|
| 225 |
+
speed_text = ""
|
| 226 |
+
if "speed" in detections.data:
|
| 227 |
+
speed = detections.data["speed"][idx]
|
| 228 |
+
if speed > 0:
|
| 229 |
+
speed_text = f" {speed:.0f}{self.config.speed_unit}"
|
| 230 |
+
|
| 231 |
+
# Create label
|
| 232 |
+
label = f"{class_name} #{tracker_id}{speed_text}"
|
| 233 |
+
labels.append(label)
|
| 234 |
+
|
| 235 |
+
return labels
|
| 236 |
+
|
| 237 |
+
def process_video(
|
| 238 |
+
self,
|
| 239 |
+
progress_callback: Optional[Callable[[float], None]] = None
|
| 240 |
+
) -> Dict:
|
| 241 |
+
"""
|
| 242 |
+
Process the entire video.
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
progress_callback: Optional callback for progress updates
|
| 246 |
+
|
| 247 |
+
Returns:
|
| 248 |
+
Dictionary with processing statistics
|
| 249 |
+
|
| 250 |
+
Raises:
|
| 251 |
+
VideoProcessingError: If video processing fails
|
| 252 |
+
"""
|
| 253 |
+
start_time = time()
|
| 254 |
+
|
| 255 |
+
try:
|
| 256 |
+
# Validate input video
|
| 257 |
+
if not Path(self.config.input_video).exists():
|
| 258 |
+
raise VideoProcessingError(f"Input video not found: {self.config.input_video}")
|
| 259 |
+
|
| 260 |
+
# Setup components
|
| 261 |
+
self._setup_video_components(self.config.input_video)
|
| 262 |
+
|
| 263 |
+
# Create output directory if needed
|
| 264 |
+
output_path = Path(self.config.output_video)
|
| 265 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 266 |
+
|
| 267 |
+
# Initialize statistics
|
| 268 |
+
frame_count = 0
|
| 269 |
+
total_frames = self.video_info.total_frames or 0
|
| 270 |
+
all_speeds = []
|
| 271 |
+
|
| 272 |
+
# Setup display window if enabled
|
| 273 |
+
if self.config.display_enabled:
|
| 274 |
+
cv2.namedWindow(self.config.window_name, cv2.WINDOW_NORMAL)
|
| 275 |
+
cv2.resizeWindow(
|
| 276 |
+
self.config.window_name,
|
| 277 |
+
self.video_info.width,
|
| 278 |
+
self.video_info.height
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
# Process video
|
| 282 |
+
logger.info("Starting video processing...")
|
| 283 |
+
frame_generator = sv.get_video_frames_generator(self.config.input_video)
|
| 284 |
+
|
| 285 |
+
with sv.VideoSink(self.config.output_video, self.video_info) as sink:
|
| 286 |
+
for frame in frame_generator:
|
| 287 |
+
try:
|
| 288 |
+
# Process frame
|
| 289 |
+
annotated_frame, detections = self._process_single_frame(frame)
|
| 290 |
+
|
| 291 |
+
# Collect speed statistics
|
| 292 |
+
if "speed" in detections.data:
|
| 293 |
+
speeds = detections.data["speed"]
|
| 294 |
+
all_speeds.extend([s for s in speeds if s > 0])
|
| 295 |
+
|
| 296 |
+
# Write to output
|
| 297 |
+
sink.write_frame(annotated_frame)
|
| 298 |
+
|
| 299 |
+
# Display if enabled
|
| 300 |
+
if self.config.display_enabled:
|
| 301 |
+
cv2.imshow(self.config.window_name, annotated_frame)
|
| 302 |
+
|
| 303 |
+
# Check for quit
|
| 304 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
| 305 |
+
logger.info("Processing interrupted by user")
|
| 306 |
+
break
|
| 307 |
+
|
| 308 |
+
# Check if window was closed
|
| 309 |
+
if cv2.getWindowProperty(
|
| 310 |
+
self.config.window_name,
|
| 311 |
+
cv2.WND_PROP_VISIBLE
|
| 312 |
+
) < 1:
|
| 313 |
+
logger.info("Window closed by user")
|
| 314 |
+
break
|
| 315 |
+
|
| 316 |
+
# Update progress
|
| 317 |
+
frame_count += 1
|
| 318 |
+
if progress_callback and total_frames > 0:
|
| 319 |
+
progress = frame_count / total_frames
|
| 320 |
+
progress_callback(progress)
|
| 321 |
+
|
| 322 |
+
except Exception as e:
|
| 323 |
+
logger.warning(f"Error processing frame {frame_count}: {e}")
|
| 324 |
+
continue
|
| 325 |
+
|
| 326 |
+
# Cleanup
|
| 327 |
+
if self.config.display_enabled:
|
| 328 |
+
cv2.destroyAllWindows()
|
| 329 |
+
|
| 330 |
+
# Calculate statistics
|
| 331 |
+
processing_time = time() - start_time
|
| 332 |
+
stats = {
|
| 333 |
+
'total_count': self.line_zone.in_count + self.line_zone.out_count,
|
| 334 |
+
'in_count': self.line_zone.in_count,
|
| 335 |
+
'out_count': self.line_zone.out_count,
|
| 336 |
+
'avg_speed': np.mean(all_speeds) if all_speeds else 0.0,
|
| 337 |
+
'max_speed': np.max(all_speeds) if all_speeds else 0.0,
|
| 338 |
+
'min_speed': np.min(all_speeds) if all_speeds else 0.0,
|
| 339 |
+
'frames_processed': frame_count,
|
| 340 |
+
'processing_time': processing_time,
|
| 341 |
+
'fps': frame_count / processing_time if processing_time > 0 else 0
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
logger.info(f"Processing complete: {frame_count} frames in {processing_time:.2f}s")
|
| 345 |
+
logger.info(f"Vehicles counted: {stats['total_count']} (In: {stats['in_count']}, Out: {stats['out_count']})")
|
| 346 |
+
|
| 347 |
+
return stats
|
| 348 |
+
|
| 349 |
+
except Exception as e:
|
| 350 |
+
logger.error(f"Video processing failed: {e}", exc_info=True)
|
| 351 |
+
raise VideoProcessingError(f"Failed to process video: {e}")
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def process_video(
|
| 355 |
+
config: VehicleDetectionConfig,
|
| 356 |
+
progress_callback: Optional[Callable[[float], None]] = None
|
| 357 |
+
) -> Dict:
|
| 358 |
+
"""
|
| 359 |
+
Convenience function to process a video with given configuration.
|
| 360 |
+
|
| 361 |
+
Args:
|
| 362 |
+
config: Configuration object
|
| 363 |
+
progress_callback: Optional progress callback
|
| 364 |
+
|
| 365 |
+
Returns:
|
| 366 |
+
Processing statistics dictionary
|
| 367 |
+
"""
|
| 368 |
+
pipeline = VehicleDetectionPipeline(config)
|
| 369 |
+
return pipeline.process_video(progress_callback)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def main():
|
| 373 |
+
"""Main entry point for CLI usage."""
|
| 374 |
+
try:
|
| 375 |
+
logger.info("=" * 60)
|
| 376 |
+
logger.info("Vehicle Speed Estimation & Counting System")
|
| 377 |
+
logger.info("=" * 60)
|
| 378 |
+
|
| 379 |
+
# Load configuration
|
| 380 |
+
config = VehicleDetectionConfig()
|
| 381 |
+
logger.info(f"Configuration: {config}")
|
| 382 |
+
|
| 383 |
+
# Process video
|
| 384 |
+
stats = process_video(config)
|
| 385 |
+
|
| 386 |
+
# Display results
|
| 387 |
+
print("\n" + "=" * 60)
|
| 388 |
+
print("PROCESSING RESULTS")
|
| 389 |
+
print("=" * 60)
|
| 390 |
+
print(f"Output saved to: {config.output_video}")
|
| 391 |
+
print(f"\nVehicle Count:")
|
| 392 |
+
print(f" Total: {stats['total_count']}")
|
| 393 |
+
print(f" In: {stats['in_count']}")
|
| 394 |
+
print(f" Out: {stats['out_count']}")
|
| 395 |
+
print(f"\nSpeed Statistics ({config.speed_unit}):")
|
| 396 |
+
print(f" Average: {stats['avg_speed']:.1f}")
|
| 397 |
+
print(f" Maximum: {stats['max_speed']:.1f}")
|
| 398 |
+
print(f" Minimum: {stats['min_speed']:.1f}")
|
| 399 |
+
print(f"\nProcessing Info:")
|
| 400 |
+
print(f" Frames: {stats['frames_processed']}")
|
| 401 |
+
print(f" Time: {stats['processing_time']:.2f}s")
|
| 402 |
+
print(f" FPS: {stats['fps']:.1f}")
|
| 403 |
+
print("=" * 60)
|
| 404 |
+
|
| 405 |
+
return 0
|
| 406 |
+
|
| 407 |
+
except KeyboardInterrupt:
|
| 408 |
+
logger.info("Processing interrupted by user")
|
| 409 |
+
return 1
|
| 410 |
+
except Exception as e:
|
| 411 |
+
logger.error(f"Fatal error: {e}", exc_info=True)
|
| 412 |
+
print(f"\n❌ Error: {e}", file=sys.stderr)
|
| 413 |
+
return 1
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
if __name__ == "__main__":
|
| 417 |
+
sys.exit(main())
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python dependencies
|
| 2 |
+
gradio==4.44.0
|
| 3 |
+
numpy==2.3.1
|
| 4 |
+
opencv-contrib-python==4.11.0.86
|
| 5 |
+
opencv-python==4.11.0.86
|
| 6 |
+
supervision==0.26.0
|
| 7 |
+
ultralytics==8.3.161
|
| 8 |
+
|
| 9 |
+
# Additional utilities
|
| 10 |
+
Pillow>=10.0.0
|