baldevsngh02 commited on
Commit
4c4059c
·
verified ·
1 Parent(s): 5070a52

Upload 16 files

Browse files
Files changed (16) hide show
  1. .gitignore +123 -0
  2. .owners.yml +14 -0
  3. .pre-commit-config-zh-cn.yaml +61 -0
  4. .pre-commit-config.yaml +50 -0
  5. .readthedocs.yml +14 -0
  6. CITATION.cff +8 -0
  7. LICENSE +203 -0
  8. MANIFEST.in +7 -0
  9. README.md +455 -10
  10. README_zh-CN.md +476 -0
  11. dataset-index.yml +18 -0
  12. model-index.yml +102 -0
  13. pytest.ini +7 -0
  14. requirements.txt +3 -0
  15. setup.cfg +24 -0
  16. setup.py +224 -0
.gitignore ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+ db.sqlite3
58
+
59
+ # Flask stuff:
60
+ instance/
61
+ .webassets-cache
62
+
63
+ # Scrapy stuff:
64
+ .scrapy
65
+
66
+ # Sphinx documentation
67
+ docs/en/_build/
68
+ docs/zh_cn/_build/
69
+
70
+ # PyBuilder
71
+ target/
72
+
73
+ # Jupyter Notebook
74
+ .ipynb_checkpoints
75
+
76
+ # pyenv
77
+ .python-version
78
+
79
+ # celery beat schedule file
80
+ celerybeat-schedule
81
+
82
+ # SageMath parsed files
83
+ *.sage.py
84
+
85
+ # Environments
86
+ .env
87
+ .venv
88
+ env/
89
+ venv/
90
+ ENV/
91
+ env.bak/
92
+ venv.bak/
93
+
94
+ # Spyder project settings
95
+ .spyderproject
96
+ .spyproject
97
+
98
+ # Rope project settings
99
+ .ropeproject
100
+
101
+ # mkdocs documentation
102
+ /site
103
+
104
+ # mypy
105
+ .mypy_cache/
106
+ data/
107
+ data
108
+ .vscode
109
+ .idea
110
+ .DS_Store
111
+
112
+ # custom
113
+ *.pkl
114
+ *.pkl.json
115
+ *.log.json
116
+ docs/modelzoo_statistics.md
117
+ mmdet/.mim
118
+ work_dirs/
119
+
120
+ # Pytorch
121
+ *.pth
122
+ *.py~
123
+ *.sh~
.owners.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ assign:
2
+ strategy:
3
+ # random
4
+ daily-shift-based
5
+ scedule:
6
+ '*/1 * * * *'
7
+ assignees:
8
+ - Czm369
9
+ - hhaAndroid
10
+ - jbwang1997
11
+ - RangiLyu
12
+ - BIGWangYuDong
13
+ - chhluo
14
+ - ZwwWayne
.pre-commit-config-zh-cn.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: ^tests/data/
2
+ repos:
3
+ - repo: https://gitee.com/openmmlab/mirrors-flake8
4
+ rev: 5.0.4
5
+ hooks:
6
+ - id: flake8
7
+ - repo: https://gitee.com/openmmlab/mirrors-isort
8
+ rev: 5.11.5
9
+ hooks:
10
+ - id: isort
11
+ - repo: https://gitee.com/openmmlab/mirrors-yapf
12
+ rev: v0.32.0
13
+ hooks:
14
+ - id: yapf
15
+ - repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
16
+ rev: v4.3.0
17
+ hooks:
18
+ - id: trailing-whitespace
19
+ - id: check-yaml
20
+ - id: end-of-file-fixer
21
+ - id: requirements-txt-fixer
22
+ - id: double-quote-string-fixer
23
+ - id: check-merge-conflict
24
+ - id: fix-encoding-pragma
25
+ args: ["--remove"]
26
+ - id: mixed-line-ending
27
+ args: ["--fix=lf"]
28
+ - repo: https://gitee.com/openmmlab/mirrors-mdformat
29
+ rev: 0.7.9
30
+ hooks:
31
+ - id: mdformat
32
+ args: ["--number"]
33
+ additional_dependencies:
34
+ - mdformat-openmmlab
35
+ - mdformat_frontmatter
36
+ - linkify-it-py
37
+ - repo: https://gitee.com/openmmlab/mirrors-codespell
38
+ rev: v2.2.1
39
+ hooks:
40
+ - id: codespell
41
+ - repo: https://gitee.com/openmmlab/mirrors-docformatter
42
+ rev: v1.3.1
43
+ hooks:
44
+ - id: docformatter
45
+ args: ["--in-place", "--wrap-descriptions", "79"]
46
+ - repo: https://gitee.com/openmmlab/mirrors-pyupgrade
47
+ rev: v3.0.0
48
+ hooks:
49
+ - id: pyupgrade
50
+ args: ["--py36-plus"]
51
+ - repo: https://gitee.com/open-mmlab/pre-commit-hooks
52
+ rev: v0.2.0
53
+ hooks:
54
+ - id: check-algo-readme
55
+ - id: check-copyright
56
+ args: ["mmdet"]
57
+ # - repo: https://gitee.com/openmmlab/mirrors-mypy
58
+ # rev: v0.812
59
+ # hooks:
60
+ # - id: mypy
61
+ # exclude: "docs"
.pre-commit-config.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/PyCQA/flake8
3
+ rev: 5.0.4
4
+ hooks:
5
+ - id: flake8
6
+ - repo: https://github.com/PyCQA/isort
7
+ rev: 5.11.5
8
+ hooks:
9
+ - id: isort
10
+ - repo: https://github.com/pre-commit/mirrors-yapf
11
+ rev: v0.32.0
12
+ hooks:
13
+ - id: yapf
14
+ - repo: https://github.com/pre-commit/pre-commit-hooks
15
+ rev: v4.3.0
16
+ hooks:
17
+ - id: trailing-whitespace
18
+ - id: check-yaml
19
+ - id: end-of-file-fixer
20
+ - id: requirements-txt-fixer
21
+ - id: double-quote-string-fixer
22
+ - id: check-merge-conflict
23
+ - id: fix-encoding-pragma
24
+ args: ["--remove"]
25
+ - id: mixed-line-ending
26
+ args: ["--fix=lf"]
27
+ - repo: https://github.com/codespell-project/codespell
28
+ rev: v2.2.1
29
+ hooks:
30
+ - id: codespell
31
+ - repo: https://github.com/executablebooks/mdformat
32
+ rev: 0.7.9
33
+ hooks:
34
+ - id: mdformat
35
+ args: ["--number"]
36
+ additional_dependencies:
37
+ - mdformat-openmmlab
38
+ - mdformat_frontmatter
39
+ - linkify-it-py
40
+ - repo: https://github.com/myint/docformatter
41
+ rev: v1.3.1
42
+ hooks:
43
+ - id: docformatter
44
+ args: ["--in-place", "--wrap-descriptions", "79"]
45
+ - repo: https://github.com/open-mmlab/pre-commit-hooks
46
+ rev: v0.2.0 # Use the ref you want to point at
47
+ hooks:
48
+ - id: check-algo-readme
49
+ - id: check-copyright
50
+ args: ["mmdet"] # replace the dir_to_check with your expected directory to check
.readthedocs.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+
3
+ build:
4
+ os: ubuntu-22.04
5
+ tools:
6
+ python: "3.8"
7
+
8
+ formats:
9
+ - epub
10
+
11
+ python:
12
+ install:
13
+ - requirements: requirements/docs.txt
14
+ - requirements: requirements/readthedocs.txt
CITATION.cff ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ message: "If you use this software, please cite it as below."
3
+ authors:
4
+ - name: "MMDetection Contributors"
5
+ title: "OpenMMLab Detection Toolbox and Benchmark"
6
+ date-released: 2018-08-22
7
+ url: "https://github.com/open-mmlab/mmdetection"
8
+ license: Apache-2.0
LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2018-2023 OpenMMLab. All rights reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright 2018-2023 OpenMMLab.
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
MANIFEST.in ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include requirements/*.txt
2
+ include mmdet/VERSION
3
+ include mmdet/.mim/model-index.yml
4
+ include mmdet/.mim/dataset-index.yml
5
+ include mmdet/.mim/demo/*/*
6
+ recursive-include mmdet/.mim/configs *.py *.yml
7
+ recursive-include mmdet/.mim/tools *.sh *.py
README.md CHANGED
@@ -1,10 +1,455 @@
1
- ---
2
- title: Mmdetection
3
- emoji: ⚡
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img src="resources/mmdet-logo.png" width="600"/>
3
+ <div>&nbsp;</div>
4
+ <div align="center">
5
+ <b><font size="5">OpenMMLab website</font></b>
6
+ <sup>
7
+ <a href="https://openmmlab.com">
8
+ <i><font size="4">HOT</font></i>
9
+ </a>
10
+ </sup>
11
+ &nbsp;&nbsp;&nbsp;&nbsp;
12
+ <b><font size="5">OpenMMLab platform</font></b>
13
+ <sup>
14
+ <a href="https://platform.openmmlab.com">
15
+ <i><font size="4">TRY IT OUT</font></i>
16
+ </a>
17
+ </sup>
18
+ </div>
19
+ <div>&nbsp;</div>
20
+
21
+ [![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet)
22
+ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/)
23
+ [![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions)
24
+ [![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection)
25
+ [![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/main/LICENSE)
26
+ [![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
27
+ [![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
28
+ [![Open in OpenXLab](https://cdn-static.openxlab.org.cn/app-center/openxlab_demo.svg)](https://openxlab.org.cn/apps?search=mmdet)
29
+
30
+ [📘Documentation](https://mmdetection.readthedocs.io/en/latest/) |
31
+ [🛠️Installation](https://mmdetection.readthedocs.io/en/latest/get_started.html) |
32
+ [👀Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html) |
33
+ [🆕Update News](https://mmdetection.readthedocs.io/en/latest/notes/changelog.html) |
34
+ [🚀Ongoing Projects](https://github.com/open-mmlab/mmdetection/projects) |
35
+ [🤔Reporting Issues](https://github.com/open-mmlab/mmdetection/issues/new/choose)
36
+
37
+ </div>
38
+
39
+ <div align="center">
40
+
41
+ English | [简体中文](README_zh-CN.md)
42
+
43
+ </div>
44
+
45
+ <div align="center">
46
+ <a href="https://openmmlab.medium.com/" style="text-decoration:none;">
47
+ <img src="https://user-images.githubusercontent.com/25839884/219255827-67c1a27f-f8c5-46a9-811d-5e57448c61d1.png" width="3%" alt="" /></a>
48
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
49
+ <a href="https://discord.com/channels/1037617289144569886/1046608014234370059" style="text-decoration:none;">
50
+ <img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
51
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
52
+ <a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
53
+ <img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
54
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
55
+ <a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
56
+ <img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
57
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
58
+ <a href="https://space.bilibili.com/1293512903" style="text-decoration:none;">
59
+ <img src="https://user-images.githubusercontent.com/25839884/219026751-d7d14cce-a7c9-4e82-9942-8375fca65b99.png" width="3%" alt="" /></a>
60
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
61
+ <a href="https://www.zhihu.com/people/openmmlab" style="text-decoration:none;">
62
+ <img src="https://user-images.githubusercontent.com/25839884/219026120-ba71e48b-6e94-4bd4-b4e9-b7d175b5e362.png" width="3%" alt="" /></a>
63
+ </div>
64
+
65
+ <div align="center">
66
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/6c29886f-ae7a-4a55-8be4-352ee85b7d3e"/>
67
+ </div>
68
+
69
+ ## Introduction
70
+
71
+ MMDetection is an open source object detection toolbox based on PyTorch. It is
72
+ a part of the [OpenMMLab](https://openmmlab.com/) project.
73
+
74
+ The main branch works with **PyTorch 1.8+**.
75
+
76
+ <img src="https://user-images.githubusercontent.com/12907710/187674113-2074d658-f2fb-42d1-ac15-9c4a695e64d7.png"/>
77
+
78
+ <details open>
79
+ <summary>Major features</summary>
80
+
81
+ - **Modular Design**
82
+
83
+ We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.
84
+
85
+ - **Support of multiple tasks out of box**
86
+
87
+ The toolbox directly supports multiple detection tasks such as **object detection**, **instance segmentation**, **panoptic segmentation**, and **semi-supervised object detection**.
88
+
89
+ - **High efficiency**
90
+
91
+ All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet).
92
+
93
+ - **State of the art**
94
+
95
+ The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward.
96
+ The newly released [RTMDet](configs/rtmdet) also obtains new state-of-the-art results on real-time instance segmentation and rotated object detection tasks and the best parameter-accuracy trade-off on object detection.
97
+
98
+ </details>
99
+
100
+ Apart from MMDetection, we also released [MMEngine](https://github.com/open-mmlab/mmengine) for model training and [MMCV](https://github.com/open-mmlab/mmcv) for computer vision research, which are heavily depended on by this toolbox.
101
+
102
+ ## What's New
103
+
104
+ 💎 **We have released the pre-trained weights for MM-Grounding-DINO Swin-B and Swin-L, welcome to try and give feedback.**
105
+
106
+ ### Highlight
107
+
108
+ **v3.3.0** was released in 5/1/2024:
109
+
110
+ **[MM-Grounding-DINO: An Open and Comprehensive Pipeline for Unified Object Grounding and Detection](https://arxiv.org/abs/2401.02361)**
111
+
112
+ Grounding DINO is a grounding pre-training model that unifies 2d open vocabulary object detection and phrase grounding, with wide applications. However, its training part has not been open sourced. Therefore, we propose MM-Grounding-DINO, which not only serves as an open source replication version of Grounding DINO, but also achieves significant performance improvement based on reconstructed data types, exploring different dataset combinations and initialization strategies. Moreover, we conduct evaluations from multiple dimensions, including OOD, REC, Phrase Grounding, OVD, and Fine-tune, to fully excavate the advantages and disadvantages of Grounding pre-training, hoping to provide inspiration for future work.
113
+
114
+ code: [mm_grounding_dino/README.md](configs/mm_grounding_dino/README.md)
115
+
116
+ <div align=center>
117
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/fb14d1ee-5469-44d2-b865-aac9850c429c"/>
118
+ </div>
119
+
120
+ We are excited to announce our latest work on real-time object recognition tasks, **RTMDet**, a family of fully convolutional single-stage detectors. RTMDet not only achieves the best parameter-accuracy trade-off on object detection from tiny to extra-large model sizes but also obtains new state-of-the-art performance on instance segmentation and rotated object detection tasks. Details can be found in the [technical report](https://arxiv.org/abs/2212.07784). Pre-trained models are [here](configs/rtmdet).
121
+
122
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
123
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
124
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
125
+
126
+ | Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
127
+ | ------------------------ | ------- | ------------------------------------ | ---------------------- |
128
+ | Object Detection | COCO | 52.8 | 322 |
129
+ | Instance Segmentation | COCO | 44.6 | 188 |
130
+ | Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
131
+
132
+ <div align=center>
133
+ <img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
134
+ </div>
135
+
136
+ ## Installation
137
+
138
+ Please refer to [Installation](https://mmdetection.readthedocs.io/en/latest/get_started.html) for installation instructions.
139
+
140
+ ## Getting Started
141
+
142
+ Please see [Overview](https://mmdetection.readthedocs.io/en/latest/get_started.html) for the general introduction of MMDetection.
143
+
144
+ For detailed user guides and advanced guides, please refer to our [documentation](https://mmdetection.readthedocs.io/en/latest/):
145
+
146
+ - User Guides
147
+
148
+ <details>
149
+
150
+ - [Train & Test](https://mmdetection.readthedocs.io/en/latest/user_guides/index.html#train-test)
151
+ - [Learn about Configs](https://mmdetection.readthedocs.io/en/latest/user_guides/config.html)
152
+ - [Inference with existing models](https://mmdetection.readthedocs.io/en/latest/user_guides/inference.html)
153
+ - [Dataset Prepare](https://mmdetection.readthedocs.io/en/latest/user_guides/dataset_prepare.html)
154
+ - [Test existing models on standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/test.html)
155
+ - [Train predefined models on standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/train.html)
156
+ - [Train with customized datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/train.html#train-with-customized-datasets)
157
+ - [Train with customized models and standard datasets](https://mmdetection.readthedocs.io/en/latest/user_guides/new_model.html)
158
+ - [Finetuning Models](https://mmdetection.readthedocs.io/en/latest/user_guides/finetune.html)
159
+ - [Test Results Submission](https://mmdetection.readthedocs.io/en/latest/user_guides/test_results_submission.html)
160
+ - [Weight initialization](https://mmdetection.readthedocs.io/en/latest/user_guides/init_cfg.html)
161
+ - [Use a single stage detector as RPN](https://mmdetection.readthedocs.io/en/latest/user_guides/single_stage_as_rpn.html)
162
+ - [Semi-supervised Object Detection](https://mmdetection.readthedocs.io/en/latest/user_guides/semi_det.html)
163
+ - [Useful Tools](https://mmdetection.readthedocs.io/en/latest/user_guides/index.html#useful-tools)
164
+
165
+ </details>
166
+
167
+ - Advanced Guides
168
+
169
+ <details>
170
+
171
+ - [Basic Concepts](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#basic-concepts)
172
+ - [Component Customization](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#component-customization)
173
+ - [How to](https://mmdetection.readthedocs.io/en/latest/advanced_guides/index.html#how-to)
174
+
175
+ </details>
176
+
177
+ We also provide object detection colab tutorial [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb) and instance segmentation colab tutorial [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_InstanceSeg_Tutorial.ipynb).
178
+
179
+ To migrate from MMDetection 2.x, please refer to [migration](https://mmdetection.readthedocs.io/en/latest/migration.html).
180
+
181
+ ## Overview of Benchmark and Model Zoo
182
+
183
+ Results and models are available in the [model zoo](docs/en/model_zoo.md).
184
+
185
+ <div align="center">
186
+ <b>Architectures</b>
187
+ </div>
188
+ <table align="center">
189
+ <tbody>
190
+ <tr align="center" valign="bottom">
191
+ <td>
192
+ <b>Object Detection</b>
193
+ </td>
194
+ <td>
195
+ <b>Instance Segmentation</b>
196
+ </td>
197
+ <td>
198
+ <b>Panoptic Segmentation</b>
199
+ </td>
200
+ <td>
201
+ <b>Other</b>
202
+ </td>
203
+ </tr>
204
+ <tr valign="top">
205
+ <td>
206
+ <ul>
207
+ <li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
208
+ <li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
209
+ <li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
210
+ <li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
211
+ <li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
212
+ <li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
213
+ <li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
214
+ <li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
215
+ <li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
216
+ <li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
217
+ <li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
218
+ <li><a href="configs/centernet">CenterNet (CVPR'2019)</a></li>
219
+ <li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
220
+ <li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
221
+ <li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
222
+ <li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
223
+ <li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
224
+ <li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
225
+ <li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
226
+ <li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
227
+ <li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
228
+ <li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
229
+ <li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
230
+ <li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
231
+ <li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
232
+ <li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
233
+ <li><a href="configs/detr">DETR (ECCV'2020)</a></li>
234
+ <li><a href="configs/paa">PAA (ECCV'2020)</a></li>
235
+ <li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
236
+ <li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
237
+ <li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
238
+ <li><a href="configs/yolox">YOLOX (CVPR'2021)</a></li>
239
+ <li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
240
+ <li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
241
+ <li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
242
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
243
+ <li><a href="configs/conditional_detr">Conditional DETR (ICCV'2021)</a></li>
244
+ <li><a href="configs/dab_detr">DAB-DETR (ICLR'2022)</a></li>
245
+ <li><a href="configs/dino">DINO (ICLR'2023)</a></li>
246
+ <li><a href="configs/glip">GLIP (CVPR'2022)</a></li>
247
+ <li><a href="configs/ddq">DDQ (CVPR'2023)</a></li>
248
+ <li><a href="projects/DiffusionDet">DiffusionDet (ArXiv'2023)</a></li>
249
+ <li><a href="projects/EfficientDet">EfficientDet (CVPR'2020)</a></li>
250
+ <li><a href="projects/ViTDet">ViTDet (ECCV'2022)</a></li>
251
+ <li><a href="projects/Detic">Detic (ECCV'2022)</a></li>
252
+ <li><a href="projects/CO-DETR">CO-DETR (ICCV'2023)</a></li>
253
+ </ul>
254
+ </td>
255
+ <td>
256
+ <ul>
257
+ <li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
258
+ <li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
259
+ <li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
260
+ <li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
261
+ <li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
262
+ <li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
263
+ <li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
264
+ <li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
265
+ <li><a href="configs/detectors">DetectoRS (ArXiv'2020)</a></li>
266
+ <li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
267
+ <li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
268
+ <li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
269
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
270
+ <li><a href="configs/condinst">CondInst (ECCV'2020)</a></li>
271
+ <li><a href="projects/SparseInst">SparseInst (CVPR'2022)</a></li>
272
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
273
+ <li><a href="configs/boxinst">BoxInst (CVPR'2021)</a></li>
274
+ <li><a href="projects/ConvNeXt-V2">ConvNeXt-V2 (Arxiv'2023)</a></li>
275
+ </ul>
276
+ </td>
277
+ <td>
278
+ <ul>
279
+ <li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
280
+ <li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
281
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
282
+ <li><a href="configs/XDecoder">XDecoder (CVPR'2023)</a></li>
283
+ </ul>
284
+ </td>
285
+ <td>
286
+ </ul>
287
+ <li><b>Contrastive Learning</b></li>
288
+ <ul>
289
+ <ul>
290
+ <li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
291
+ <li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
292
+ <li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
293
+ </ul>
294
+ </ul>
295
+ </ul>
296
+ <li><b>Distillation</b></li>
297
+ <ul>
298
+ <ul>
299
+ <li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
300
+ <li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
301
+ </ul>
302
+ </ul>
303
+ <li><b>Semi-Supervised Object Detection</b></li>
304
+ <ul>
305
+ <ul>
306
+ <li><a href="configs/soft_teacher">Soft Teacher (ICCV'2021)</a></li>
307
+ </ul>
308
+ </ul>
309
+ </ul>
310
+ </td>
311
+ </tr>
312
+ </td>
313
+ </tr>
314
+ </tbody>
315
+ </table>
316
+
317
+ <div align="center">
318
+ <b>Components</b>
319
+ </div>
320
+ <table align="center">
321
+ <tbody>
322
+ <tr align="center" valign="bottom">
323
+ <td>
324
+ <b>Backbones</b>
325
+ </td>
326
+ <td>
327
+ <b>Necks</b>
328
+ </td>
329
+ <td>
330
+ <b>Loss</b>
331
+ </td>
332
+ <td>
333
+ <b>Common</b>
334
+ </td>
335
+ </tr>
336
+ <tr valign="top">
337
+ <td>
338
+ <ul>
339
+ <li>VGG (ICLR'2015)</li>
340
+ <li>ResNet (CVPR'2016)</li>
341
+ <li>ResNeXt (CVPR'2017)</li>
342
+ <li>MobileNetV2 (CVPR'2018)</li>
343
+ <li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
344
+ <li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
345
+ <li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
346
+ <li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
347
+ <li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
348
+ <li><a href="configs/resnest">ResNeSt (ArXiv'2020)</a></li>
349
+ <li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
350
+ <li><a href="configs/swin">Swin (CVPR'2021)</a></li>
351
+ <li><a href="configs/pvt">PVTv2 (ArXiv'2021)</a></li>
352
+ <li><a href="configs/resnet_strikes_back">ResNet strikes back (ArXiv'2021)</a></li>
353
+ <li><a href="configs/efficientnet">EfficientNet (ArXiv'2021)</a></li>
354
+ <li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
355
+ <li><a href="projects/ConvNeXt-V2">ConvNeXtv2 (ArXiv'2023)</a></li>
356
+ </ul>
357
+ </td>
358
+ <td>
359
+ <ul>
360
+ <li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
361
+ <li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
362
+ <li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
363
+ <li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
364
+ <li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
365
+ <li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
366
+ </ul>
367
+ </td>
368
+ <td>
369
+ <ul>
370
+ <li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
371
+ <li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
372
+ <li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
373
+ </ul>
374
+ </td>
375
+ <td>
376
+ <ul>
377
+ <li><a href="configs/faster_rcnn/faster-rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
378
+ <li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
379
+ <li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
380
+ <li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
381
+ <li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
382
+ <li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
383
+ <li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
384
+ <li><a href="configs/resnet_strikes_back">Resnet strikes back (ArXiv'2021)</a></li>
385
+ </ul>
386
+ </td>
387
+ </tr>
388
+ </td>
389
+ </tr>
390
+ </tbody>
391
+ </table>
392
+
393
+ Some other methods are also supported in [projects using MMDetection](./docs/en/notes/projects.md).
394
+
395
+ ## FAQ
396
+
397
+ Please refer to [FAQ](docs/en/notes/faq.md) for frequently asked questions.
398
+
399
+ ## Contributing
400
+
401
+ We appreciate all contributions to improve MMDetection. Ongoing projects can be found in out [GitHub Projects](https://github.com/open-mmlab/mmdetection/projects). Welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
402
+
403
+ ## Acknowledgement
404
+
405
+ MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
406
+ We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.
407
+
408
+ ## Citation
409
+
410
+ If you use this toolbox or benchmark in your research, please cite this project.
411
+
412
+ ```
413
+ @article{mmdetection,
414
+ title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
415
+ author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
416
+ Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
417
+ Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
418
+ Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
419
+ Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
420
+ and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
421
+ journal= {arXiv preprint arXiv:1906.07155},
422
+ year={2019}
423
+ }
424
+ ```
425
+
426
+ ## License
427
+
428
+ This project is released under the [Apache 2.0 license](LICENSE).
429
+
430
+ ## Projects in OpenMMLab
431
+
432
+ - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models.
433
+ - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
434
+ - [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark.
435
+ - [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox.
436
+ - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
437
+ - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
438
+ - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
439
+ - [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark.
440
+ - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
441
+ - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
442
+ - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
443
+ - [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
444
+ - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
445
+ - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
446
+ - [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
447
+ - [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
448
+ - [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
449
+ - [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
450
+ - [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
451
+ - [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
452
+ - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework.
453
+ - [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
454
+ - [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries.
455
+ - [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab.
README_zh-CN.md ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <img src="resources/mmdet-logo.png" width="600"/>
3
+ <div>&nbsp;</div>
4
+ <div align="center">
5
+ <b><font size="5">OpenMMLab 官网</font></b>
6
+ <sup>
7
+ <a href="https://openmmlab.com">
8
+ <i><font size="4">HOT</font></i>
9
+ </a>
10
+ </sup>
11
+ &nbsp;&nbsp;&nbsp;&nbsp;
12
+ <b><font size="5">OpenMMLab 开放平台</font></b>
13
+ <sup>
14
+ <a href="https://platform.openmmlab.com">
15
+ <i><font size="4">TRY IT OUT</font></i>
16
+ </a>
17
+ </sup>
18
+ </div>
19
+ <div>&nbsp;</div>
20
+
21
+ [![PyPI](https://img.shields.io/pypi/v/mmdet)](https://pypi.org/project/mmdet)
22
+ [![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmdetection.readthedocs.io/en/latest/)
23
+ [![badge](https://github.com/open-mmlab/mmdetection/workflows/build/badge.svg)](https://github.com/open-mmlab/mmdetection/actions)
24
+ [![codecov](https://codecov.io/gh/open-mmlab/mmdetection/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmdetection)
25
+ [![license](https://img.shields.io/github/license/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/blob/main/LICENSE)
26
+ [![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
27
+ [![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmdetection.svg)](https://github.com/open-mmlab/mmdetection/issues)
28
+ [![Open in OpenXLab](https://cdn-static.openxlab.org.cn/app-center/openxlab_demo.svg)](https://openxlab.org.cn/apps?search=mmdet)
29
+
30
+ [📘使用文档](https://mmdetection.readthedocs.io/zh_CN/latest/) |
31
+ [🛠️安装教程](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html) |
32
+ [👀模型库](https://mmdetection.readthedocs.io/zh_CN/latest/model_zoo.html) |
33
+ [🆕更新日志](https://mmdetection.readthedocs.io/en/latest/notes/changelog.html) |
34
+ [🚀进行中的项目](https://github.com/open-mmlab/mmdetection/projects) |
35
+ [🤔报告问题](https://github.com/open-mmlab/mmdetection/issues/new/choose)
36
+
37
+ </div>
38
+
39
+ <div align="center">
40
+
41
+ [English](README.md) | 简体中文
42
+
43
+ </div>
44
+
45
+ <div align="center">
46
+ <a href="https://openmmlab.medium.com/" style="text-decoration:none;">
47
+ <img src="https://user-images.githubusercontent.com/25839884/219255827-67c1a27f-f8c5-46a9-811d-5e57448c61d1.png" width="3%" alt="" /></a>
48
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
49
+ <a href="https://discord.com/channels/1037617289144569886/1046608014234370059" style="text-decoration:none;">
50
+ <img src="https://user-images.githubusercontent.com/25839884/218347213-c080267f-cbb6-443e-8532-8e1ed9a58ea9.png" width="3%" alt="" /></a>
51
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
52
+ <a href="https://twitter.com/OpenMMLab" style="text-decoration:none;">
53
+ <img src="https://user-images.githubusercontent.com/25839884/218346637-d30c8a0f-3eba-4699-8131-512fb06d46db.png" width="3%" alt="" /></a>
54
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
55
+ <a href="https://www.youtube.com/openmmlab" style="text-decoration:none;">
56
+ <img src="https://user-images.githubusercontent.com/25839884/218346691-ceb2116a-465a-40af-8424-9f30d2348ca9.png" width="3%" alt="" /></a>
57
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
58
+ <a href="https://space.bilibili.com/1293512903" style="text-decoration:none;">
59
+ <img src="https://user-images.githubusercontent.com/25839884/219026751-d7d14cce-a7c9-4e82-9942-8375fca65b99.png" width="3%" alt="" /></a>
60
+ <img src="https://user-images.githubusercontent.com/25839884/218346358-56cc8e2f-a2b8-487f-9088-32480cceabcf.png" width="3%" alt="" />
61
+ <a href="https://www.zhihu.com/people/openmmlab" style="text-decoration:none;">
62
+ <img src="https://user-images.githubusercontent.com/25839884/219026120-ba71e48b-6e94-4bd4-b4e9-b7d175b5e362.png" width="3%" alt="" /></a>
63
+ </div>
64
+
65
+ <div align="center">
66
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/6c29886f-ae7a-4a55-8be4-352ee85b7d3e"/>
67
+ </div>
68
+
69
+ ## 简介
70
+
71
+ MMDetection 是一个基于 PyTorch 的目标检测开源工具箱。它是 [OpenMMLab](https://openmmlab.com/) 项目的一部分。
72
+
73
+ 主分支代码目前支持 PyTorch 1.8 及其以上的版本。
74
+
75
+ <img src="https://user-images.githubusercontent.com/12907710/187674113-2074d658-f2fb-42d1-ac15-9c4a695e64d7.png"/>
76
+
77
+ <details open>
78
+ <summary>主要特性</summary>
79
+
80
+ - **模块化设计**
81
+
82
+ MMDetection 将检测框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的检测模型
83
+
84
+ - **支持多种检测任务**
85
+
86
+ MMDetection 支持了各种不同的检测任务,包括**目标检���**,**实例分割**,**全景分割**,以及**半监督目标检测**。
87
+
88
+ - **速度快**
89
+
90
+ 基本的框和 mask 操作都实现了 GPU 版本,训练速度比其他代码库更快或者相当,包括 [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) 和 [SimpleDet](https://github.com/TuSimple/simpledet)。
91
+
92
+ - **性能高**
93
+
94
+ MMDetection 这个算法库源自于 COCO 2018 目标检测竞赛的冠军团队 *MMDet* 团队开发的代码,我们在之后持续进行了改进和提升。
95
+ 新发布的 [RTMDet](configs/rtmdet) 还在实时实例分割和旋转目标检测任务中取得了最先进的成果,同时也在目标检测模型中取得了最佳的的参数量和精度平衡。
96
+
97
+ </details>
98
+
99
+ 除了 MMDetection 之外,我们还开源了深度学习训练库 [MMEngine](https://github.com/open-mmlab/mmengine) 和计算机视觉基础库 [MMCV](https://github.com/open-mmlab/mmcv),它们是 MMDetection 的主要依赖。
100
+
101
+ ## 最新进展
102
+
103
+ 💎 **我们已经发布了 MM-Grounding-DINO Swin-B 和 Swin-L 预训练权重,欢迎试用和反馈.**
104
+
105
+ ### 亮点
106
+
107
+ **v3.3.0** 版本已经在 2024.1.5 发布:
108
+
109
+ **MM-Grounding-DINO: 轻松涨点,数据到评测全面开源**
110
+
111
+ Grounding DINO 是一个统一了 2d 开放词汇目标检测和 Phrase Grounding 的检测预训练模型,应用广泛,但是其训练部分并未开源,为此提出了 MM-Grounding-DINO。其不仅作为 Grounding DINO 的开源复现版,MM-Grounding-DINO 基于重新构建的数据类型出发,在探索了不同数据集组合和初始化策略基础上实现了 Grounding DINO 的性能极大提升,并且从多个维度包括 OOD、REC、Phrase Grounding、OVD 和 Finetune 等方面进行评测,充分挖掘 Grounding 预训练优缺点,希望能为后续工作提供启发。
112
+
113
+ arxiv 技术报告:https://arxiv.org/abs/2401.02361
114
+
115
+ 代码地址: [mm_grounding_dino/README.md](configs/mm_grounding_dino/README.md)
116
+
117
+ <div align=center>
118
+ <img src="https://github.com/open-mmlab/mmdetection/assets/17425982/fb14d1ee-5469-44d2-b865-aac9850c429c"/>
119
+ </div>
120
+
121
+ 我们很高兴向大家介绍我们在实时目标识别任务方面的最新成果 RTMDet,包含了一系列的全卷积单阶段检测模型。 RTMDet 不仅在从 tiny 到 extra-large 尺寸的目标检测模型上实现了最佳的参数量和精度的平衡,而且在实时实例分割和旋转目标检测任务上取得了最先进的成果。 更多细节请参阅[技术报告](https://arxiv.org/abs/2212.07784)。 预训练模型可以在[这里](configs/rtmdet)找到。
122
+
123
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/real-time-instance-segmentation-on-mscoco)](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco?p=rtmdet-an-empirical-study-of-designing-real)
124
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-dota-1)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-dota-1?p=rtmdet-an-empirical-study-of-designing-real)
125
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/rtmdet-an-empirical-study-of-designing-real/object-detection-in-aerial-images-on-hrsc2016)](https://paperswithcode.com/sota/object-detection-in-aerial-images-on-hrsc2016?p=rtmdet-an-empirical-study-of-designing-real)
126
+
127
+ | Task | Dataset | AP | FPS(TRT FP16 BS1 3090) |
128
+ | ------------------------ | ------- | ------------------------------------ | ---------------------- |
129
+ | Object Detection | COCO | 52.8 | 322 |
130
+ | Instance Segmentation | COCO | 44.6 | 188 |
131
+ | Rotated Object Detection | DOTA | 78.9(single-scale)/81.3(multi-scale) | 121 |
132
+
133
+ <div align=center>
134
+ <img src="https://user-images.githubusercontent.com/12907710/208044554-1e8de6b5-48d8-44e4-a7b5-75076c7ebb71.png"/>
135
+ </div>
136
+
137
+ ## 安装
138
+
139
+ 请参考[快速入门文档](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html)进行安装。
140
+
141
+ ## 教程
142
+
143
+ 请阅读[概述](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html)对 MMDetection 进行初步的了解。
144
+
145
+ 为了帮助用户更进一步了解 MMDetection,我们准备了用户指南和进阶指南,请阅读我们的[文档](https://mmdetection.readthedocs.io/zh_CN/latest/):
146
+
147
+ - 用户指南
148
+
149
+ <details>
150
+
151
+ - [训练 & 测试](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/index.html#train-test)
152
+ - [学习配置文件](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/config.html)
153
+ - [使用已有模型在标准数据集上进行推理](https://mmdetection.readthedocs.io/en/latest/user_guides/inference.html)
154
+ - [数据集准备](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/dataset_prepare.html)
155
+ - [测试现有模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/test.html)
156
+ - [在标准数据集上训练预定义的模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/train.html)
157
+ - [在自定义数据集上进行训练](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/train.html#train-with-customized-datasets)
158
+ - [在标准数据集上训练自定义模型](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/new_model.html)
159
+ - [模型微调](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/finetune.html)
160
+ - [提交测试结果](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/test_results_submission.html)
161
+ - [权重初始化](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/init_cfg.html)
162
+ - [将单阶段检测器作为 RPN](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/single_stage_as_rpn.html)
163
+ - [半监督目标检测](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/semi_det.html)
164
+ - [实用工具](https://mmdetection.readthedocs.io/zh_CN/latest/user_guides/index.html#useful-tools)
165
+
166
+ </details>
167
+
168
+ - 进阶指南
169
+
170
+ <details>
171
+
172
+ - [基础概念](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#basic-concepts)
173
+ - [组件定制](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#component-customization)
174
+ - [How to](https://mmdetection.readthedocs.io/zh_CN/latest/advanced_guides/index.html#how-to)
175
+
176
+ </details>
177
+
178
+ 我们提供了检测的 colab 教程 [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb) 和 实例分割的 colab 教程 [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](demo/MMDet_Tutorial.ipynb)
179
+
180
+ 同时,我们还提供了 [MMDetection 中文解读文案汇总](docs/zh_cn/article.md)
181
+
182
+ 若需要将2.x版本的代码迁移至新版,请参考[迁移文档](https://mmdetection.readthedocs.io/en/latest/migration.html)。
183
+
184
+ ## 基准测试和模型库
185
+
186
+ 测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。
187
+
188
+ <div align="center">
189
+ <b>算法架构</b>
190
+ </div>
191
+ <table align="center">
192
+ <tbody>
193
+ <tr align="center" valign="bottom">
194
+ <td>
195
+ <b>Object Detection</b>
196
+ </td>
197
+ <td>
198
+ <b>Instance Segmentation</b>
199
+ </td>
200
+ <td>
201
+ <b>Panoptic Segmentation</b>
202
+ </td>
203
+ <td>
204
+ <b>Other</b>
205
+ </td>
206
+ </tr>
207
+ <tr valign="top">
208
+ <td>
209
+ <ul>
210
+ <li><a href="configs/fast_rcnn">Fast R-CNN (ICCV'2015)</a></li>
211
+ <li><a href="configs/faster_rcnn">Faster R-CNN (NeurIPS'2015)</a></li>
212
+ <li><a href="configs/rpn">RPN (NeurIPS'2015)</a></li>
213
+ <li><a href="configs/ssd">SSD (ECCV'2016)</a></li>
214
+ <li><a href="configs/retinanet">RetinaNet (ICCV'2017)</a></li>
215
+ <li><a href="configs/cascade_rcnn">Cascade R-CNN (CVPR'2018)</a></li>
216
+ <li><a href="configs/yolo">YOLOv3 (ArXiv'2018)</a></li>
217
+ <li><a href="configs/cornernet">CornerNet (ECCV'2018)</a></li>
218
+ <li><a href="configs/grid_rcnn">Grid R-CNN (CVPR'2019)</a></li>
219
+ <li><a href="configs/guided_anchoring">Guided Anchoring (CVPR'2019)</a></li>
220
+ <li><a href="configs/fsaf">FSAF (CVPR'2019)</a></li>
221
+ <li><a href="configs/centernet">CenterNet (CVPR'2019)</a></li>
222
+ <li><a href="configs/libra_rcnn">Libra R-CNN (CVPR'2019)</a></li>
223
+ <li><a href="configs/tridentnet">TridentNet (ICCV'2019)</a></li>
224
+ <li><a href="configs/fcos">FCOS (ICCV'2019)</a></li>
225
+ <li><a href="configs/reppoints">RepPoints (ICCV'2019)</a></li>
226
+ <li><a href="configs/free_anchor">FreeAnchor (NeurIPS'2019)</a></li>
227
+ <li><a href="configs/cascade_rpn">CascadeRPN (NeurIPS'2019)</a></li>
228
+ <li><a href="configs/foveabox">Foveabox (TIP'2020)</a></li>
229
+ <li><a href="configs/double_heads">Double-Head R-CNN (CVPR'2020)</a></li>
230
+ <li><a href="configs/atss">ATSS (CVPR'2020)</a></li>
231
+ <li><a href="configs/nas_fcos">NAS-FCOS (CVPR'2020)</a></li>
232
+ <li><a href="configs/centripetalnet">CentripetalNet (CVPR'2020)</a></li>
233
+ <li><a href="configs/autoassign">AutoAssign (ArXiv'2020)</a></li>
234
+ <li><a href="configs/sabl">Side-Aware Boundary Localization (ECCV'2020)</a></li>
235
+ <li><a href="configs/dynamic_rcnn">Dynamic R-CNN (ECCV'2020)</a></li>
236
+ <li><a href="configs/detr">DETR (ECCV'2020)</a></li>
237
+ <li><a href="configs/paa">PAA (ECCV'2020)</a></li>
238
+ <li><a href="configs/vfnet">VarifocalNet (CVPR'2021)</a></li>
239
+ <li><a href="configs/sparse_rcnn">Sparse R-CNN (CVPR'2021)</a></li>
240
+ <li><a href="configs/yolof">YOLOF (CVPR'2021)</a></li>
241
+ <li><a href="configs/yolox">YOLOX (CVPR'2021)</a></li>
242
+ <li><a href="configs/deformable_detr">Deformable DETR (ICLR'2021)</a></li>
243
+ <li><a href="configs/tood">TOOD (ICCV'2021)</a></li>
244
+ <li><a href="configs/ddod">DDOD (ACM MM'2021)</a></li>
245
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
246
+ <li><a href="configs/conditional_detr">Conditional DETR (ICCV'2021)</a></li>
247
+ <li><a href="configs/dab_detr">DAB-DETR (ICLR'2022)</a></li>
248
+ <li><a href="configs/dino">DINO (ICLR'2023)</a></li>
249
+ <li><a href="configs/glip">GLIP (CVPR'2022)</a></li>
250
+ <li><a href="configs/ddq">DDQ (CVPR'2023)</a></li>
251
+ <li><a href="projects/DiffusionDet">DiffusionDet (ArXiv'2023)</a></li>
252
+ <li><a href="projects/EfficientDet">EfficientDet (CVPR'2020)</a></li>
253
+ <li><a href="projects/ViTDet">ViTDet (ECCV'2022)</a></li>
254
+ <li><a href="projects/Detic">Detic (ECCV'2022)</a></li>
255
+ <li><a href="projects/CO-DETR">CO-DETR (ICCV'2023)</a></li>
256
+ </ul>
257
+ </td>
258
+ <td>
259
+ <ul>
260
+ <li><a href="configs/mask_rcnn">Mask R-CNN (ICCV'2017)</a></li>
261
+ <li><a href="configs/cascade_rcnn">Cascade Mask R-CNN (CVPR'2018)</a></li>
262
+ <li><a href="configs/ms_rcnn">Mask Scoring R-CNN (CVPR'2019)</a></li>
263
+ <li><a href="configs/htc">Hybrid Task Cascade (CVPR'2019)</a></li>
264
+ <li><a href="configs/yolact">YOLACT (ICCV'2019)</a></li>
265
+ <li><a href="configs/instaboost">InstaBoost (ICCV'2019)</a></li>
266
+ <li><a href="configs/solo">SOLO (ECCV'2020)</a></li>
267
+ <li><a href="configs/point_rend">PointRend (CVPR'2020)</a></li>
268
+ <li><a href="configs/detectors">DetectoRS (ArXiv'2020)</a></li>
269
+ <li><a href="configs/solov2">SOLOv2 (NeurIPS'2020)</a></li>
270
+ <li><a href="configs/scnet">SCNet (AAAI'2021)</a></li>
271
+ <li><a href="configs/queryinst">QueryInst (ICCV'2021)</a></li>
272
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
273
+ <li><a href="configs/condinst">CondInst (ECCV'2020)</a></li>
274
+ <li><a href="projects/SparseInst">SparseInst (CVPR'2022)</a></li>
275
+ <li><a href="configs/rtmdet">RTMDet (ArXiv'2022)</a></li>
276
+ <li><a href="configs/boxinst">BoxInst (CVPR'2021)</a></li>
277
+ <li><a href="projects/ConvNeXt-V2">ConvNeXt-V2 (Arxiv'2023)</a></li>
278
+ </ul>
279
+ </td>
280
+ <td>
281
+ <ul>
282
+ <li><a href="configs/panoptic_fpn">Panoptic FPN (CVPR'2019)</a></li>
283
+ <li><a href="configs/maskformer">MaskFormer (NeurIPS'2021)</a></li>
284
+ <li><a href="configs/mask2former">Mask2Former (ArXiv'2021)</a></li>
285
+ <li><a href="configs/XDecoder">XDecoder (CVPR'2023)</a></li>
286
+ </ul>
287
+ </td>
288
+ <td>
289
+ </ul>
290
+ <li><b>Contrastive Learning</b></li>
291
+ <ul>
292
+ <ul>
293
+ <li><a href="configs/selfsup_pretrain">SwAV (NeurIPS'2020)</a></li>
294
+ <li><a href="configs/selfsup_pretrain">MoCo (CVPR'2020)</a></li>
295
+ <li><a href="configs/selfsup_pretrain">MoCov2 (ArXiv'2020)</a></li>
296
+ </ul>
297
+ </ul>
298
+ </ul>
299
+ <li><b>Distillation</b></li>
300
+ <ul>
301
+ <ul>
302
+ <li><a href="configs/ld">Localization Distillation (CVPR'2022)</a></li>
303
+ <li><a href="configs/lad">Label Assignment Distillation (WACV'2022)</a></li>
304
+ </ul>
305
+ </ul>
306
+ <li><b>Semi-Supervised Object Detection</b></li>
307
+ <ul>
308
+ <ul>
309
+ <li><a href="configs/soft_teacher">Soft Teacher (ICCV'2021)</a></li>
310
+ </ul>
311
+ </ul>
312
+ </ul>
313
+ </td>
314
+ </tr>
315
+ </td>
316
+ </tr>
317
+ </tbody>
318
+ </table>
319
+
320
+ <div align="center">
321
+ <b>模块组件</b>
322
+ </div>
323
+ <table align="center">
324
+ <tbody>
325
+ <tr align="center" valign="bottom">
326
+ <td>
327
+ <b>Backbones</b>
328
+ </td>
329
+ <td>
330
+ <b>Necks</b>
331
+ </td>
332
+ <td>
333
+ <b>Loss</b>
334
+ </td>
335
+ <td>
336
+ <b>Common</b>
337
+ </td>
338
+ </tr>
339
+ <tr valign="top">
340
+ <td>
341
+ <ul>
342
+ <li>VGG (ICLR'2015)</li>
343
+ <li>ResNet (CVPR'2016)</li>
344
+ <li>ResNeXt (CVPR'2017)</li>
345
+ <li>MobileNetV2 (CVPR'2018)</li>
346
+ <li><a href="configs/hrnet">HRNet (CVPR'2019)</a></li>
347
+ <li><a href="configs/empirical_attention">Generalized Attention (ICCV'2019)</a></li>
348
+ <li><a href="configs/gcnet">GCNet (ICCVW'2019)</a></li>
349
+ <li><a href="configs/res2net">Res2Net (TPAMI'2020)</a></li>
350
+ <li><a href="configs/regnet">RegNet (CVPR'2020)</a></li>
351
+ <li><a href="configs/resnest">ResNeSt (ArXiv'2020)</a></li>
352
+ <li><a href="configs/pvt">PVT (ICCV'2021)</a></li>
353
+ <li><a href="configs/swin">Swin (CVPR'2021)</a></li>
354
+ <li><a href="configs/pvt">PVTv2 (ArXiv'2021)</a></li>
355
+ <li><a href="configs/resnet_strikes_back">ResNet strikes back (ArXiv'2021)</a></li>
356
+ <li><a href="configs/efficientnet">EfficientNet (ArXiv'2021)</a></li>
357
+ <li><a href="configs/convnext">ConvNeXt (CVPR'2022)</a></li>
358
+ <li><a href="projects/ConvNeXt-V2">ConvNeXtv2 (ArXiv'2023)</a></li>
359
+ </ul>
360
+ </td>
361
+ <td>
362
+ <ul>
363
+ <li><a href="configs/pafpn">PAFPN (CVPR'2018)</a></li>
364
+ <li><a href="configs/nas_fpn">NAS-FPN (CVPR'2019)</a></li>
365
+ <li><a href="configs/carafe">CARAFE (ICCV'2019)</a></li>
366
+ <li><a href="configs/fpg">FPG (ArXiv'2020)</a></li>
367
+ <li><a href="configs/groie">GRoIE (ICPR'2020)</a></li>
368
+ <li><a href="configs/dyhead">DyHead (CVPR'2021)</a></li>
369
+ </ul>
370
+ </td>
371
+ <td>
372
+ <ul>
373
+ <li><a href="configs/ghm">GHM (AAAI'2019)</a></li>
374
+ <li><a href="configs/gfl">Generalized Focal Loss (NeurIPS'2020)</a></li>
375
+ <li><a href="configs/seesaw_loss">Seasaw Loss (CVPR'2021)</a></li>
376
+ </ul>
377
+ </td>
378
+ <td>
379
+ <ul>
380
+ <li><a href="configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py">OHEM (CVPR'2016)</a></li>
381
+ <li><a href="configs/gn">Group Normalization (ECCV'2018)</a></li>
382
+ <li><a href="configs/dcn">DCN (ICCV'2017)</a></li>
383
+ <li><a href="configs/dcnv2">DCNv2 (CVPR'2019)</a></li>
384
+ <li><a href="configs/gn+ws">Weight Standardization (ArXiv'2019)</a></li>
385
+ <li><a href="configs/pisa">Prime Sample Attention (CVPR'2020)</a></li>
386
+ <li><a href="configs/strong_baselines">Strong Baselines (CVPR'2021)</a></li>
387
+ <li><a href="configs/resnet_strikes_back">Resnet strikes back (ArXiv'2021)</a></li>
388
+ </ul>
389
+ </td>
390
+ </tr>
391
+ </td>
392
+ </tr>
393
+ </tbody>
394
+ </table>
395
+
396
+ 我们在[基于 MMDetection 的项目](./docs/zh_cn/notes/projects.md)中列举了一些其他的支持的算法。
397
+
398
+ ## 常见问题
399
+
400
+ 请参考 [FAQ](docs/zh_cn/notes/faq.md) 了解其他用户的常见问题。
401
+
402
+ ## 贡献指南
403
+
404
+ 我们感谢所有的贡献者为改进和提升 MMDetection 所作出的努力。我们将正在进行中的项目添加进了[GitHub Projects](https://github.com/open-mmlab/mmdetection/projects)页面,非常欢迎社区用户能参与进这些项目中来。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
405
+
406
+ ## 致谢
407
+
408
+ MMDetection 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。
409
+
410
+ ## 引用
411
+
412
+ 如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDetection。
413
+
414
+ ```
415
+ @article{mmdetection,
416
+ title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
417
+ author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
418
+ Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
419
+ Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
420
+ Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
421
+ Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
422
+ and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
423
+ journal= {arXiv preprint arXiv:1906.07155},
424
+ year={2019}
425
+ }
426
+ ```
427
+
428
+ ## 开源许可证
429
+
430
+ 该项目采用 [Apache 2.0 开源许可证](LICENSE)。
431
+
432
+ ## OpenMMLab 的其他项目
433
+
434
+ - [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab 深度学习模型训练基础库
435
+ - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
436
+ - [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab 深度学习预训练工具箱
437
+ - [MMagic](https://github.com/open-mmlab/mmagic): OpenMMLab 新一代人工智能内容生成(AIGC)工具箱
438
+ - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
439
+ - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
440
+ - [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
441
+ - [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO 系列工具箱与测试基准
442
+ - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
443
+ - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
444
+ - [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
445
+ - [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
446
+ - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
447
+ - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
448
+ - [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
449
+ - [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
450
+ - [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
451
+ - [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
452
+ - [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
453
+ - [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
454
+ - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
455
+ - [MIM](https://github.com/open-mmlab/mim): OpenMMlab 项目、算法、模型的统一入口
456
+ - [MMEval](https://github.com/open-mmlab/mmeval): 统一开放的跨框架算法评测库
457
+ - [Playground](https://github.com/open-mmlab/playground): 收集和展示 OpenMMLab 相关的前沿、有趣的社区项目
458
+
459
+ ## 欢迎加入 OpenMMLab 社区
460
+
461
+ 扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),扫描下方微信二维码添加喵喵好友,进入 MMDectection 微信交流社群。【加好友申请格式:研究方向+地区+学校/公司+姓名】
462
+
463
+ <div align="center">
464
+ <img src="resources/zhihu_qrcode.jpg" height="400" /> <img src="resources/miaomiao_qrcode.jpg" height="400" />
465
+ </div>
466
+
467
+ 我们会在 OpenMMLab 社区为大家
468
+
469
+ - 📢 分享 AI 框架的前沿核心技术
470
+ - 💻 解读 PyTorch 常用模块源码
471
+ - 📰 发布 OpenMMLab 的相关新闻
472
+ - 🚀 介绍 OpenMMLab 开发的前沿算法
473
+ - 🏃 获取更高效的问题答疑和意见反馈
474
+ - 🔥 提供与各行各业开发者充分交流的平台
475
+
476
+ 干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
dataset-index.yml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ openxlab: true
2
+ voc2007:
3
+ dataset: OpenDataLab/PASCAL_VOC2007
4
+ download_root: data
5
+ data_root: data
6
+ script: tools/dataset_converters/scripts/preprocess_voc2007.sh
7
+
8
+ voc2012:
9
+ dataset: OpenDataLab/PASCAL_VOC2012
10
+ download_root: data
11
+ data_root: data
12
+ script: tools/dataset_converters/scripts/preprocess_voc2012.sh
13
+
14
+ coco2017:
15
+ dataset: OpenDataLab/COCO_2017
16
+ download_root: data
17
+ data_root: data/coco
18
+ script: tools/dataset_converters/scripts/preprocess_coco2017.sh
model-index.yml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Import:
2
+ - configs/albu_example/metafile.yml
3
+ - configs/atss/metafile.yml
4
+ - configs/autoassign/metafile.yml
5
+ - configs/boxinst/metafile.yml
6
+ - configs/carafe/metafile.yml
7
+ - configs/cascade_rcnn/metafile.yml
8
+ - configs/cascade_rpn/metafile.yml
9
+ - configs/centernet/metafile.yml
10
+ - configs/centripetalnet/metafile.yml
11
+ - configs/condinst/metafile.yml
12
+ - configs/conditional_detr/metafile.yml
13
+ - configs/cornernet/metafile.yml
14
+ - configs/convnext/metafile.yml
15
+ - configs/crowddet/metafile.yml
16
+ - configs/dab_detr/metafile.yml
17
+ - configs/dcn/metafile.yml
18
+ - configs/dcnv2/metafile.yml
19
+ - configs/ddod/metafile.yml
20
+ - configs/deformable_detr/metafile.yml
21
+ - configs/detectors/metafile.yml
22
+ - configs/detr/metafile.yml
23
+ - configs/dino/metafile.yml
24
+ - configs/double_heads/metafile.yml
25
+ - configs/dyhead/metafile.yml
26
+ - configs/dynamic_rcnn/metafile.yml
27
+ - configs/efficientnet/metafile.yml
28
+ - configs/empirical_attention/metafile.yml
29
+ - configs/faster_rcnn/metafile.yml
30
+ - configs/fcos/metafile.yml
31
+ - configs/foveabox/metafile.yml
32
+ - configs/fpg/metafile.yml
33
+ - configs/free_anchor/metafile.yml
34
+ - configs/fsaf/metafile.yml
35
+ - configs/gcnet/metafile.yml
36
+ - configs/gfl/metafile.yml
37
+ - configs/ghm/metafile.yml
38
+ - configs/gn/metafile.yml
39
+ - configs/gn+ws/metafile.yml
40
+ - configs/grid_rcnn/metafile.yml
41
+ - configs/groie/metafile.yml
42
+ - configs/guided_anchoring/metafile.yml
43
+ - configs/hrnet/metafile.yml
44
+ - configs/htc/metafile.yml
45
+ - configs/instaboost/metafile.yml
46
+ - configs/lad/metafile.yml
47
+ - configs/ld/metafile.yml
48
+ - configs/libra_rcnn/metafile.yml
49
+ - configs/lvis/metafile.yml
50
+ - configs/mask2former/metafile.yml
51
+ - configs/mask_rcnn/metafile.yml
52
+ - configs/maskformer/metafile.yml
53
+ - configs/ms_rcnn/metafile.yml
54
+ - configs/nas_fcos/metafile.yml
55
+ - configs/nas_fpn/metafile.yml
56
+ - configs/openimages/metafile.yml
57
+ - configs/paa/metafile.yml
58
+ - configs/pafpn/metafile.yml
59
+ - configs/panoptic_fpn/metafile.yml
60
+ - configs/pvt/metafile.yml
61
+ - configs/pisa/metafile.yml
62
+ - configs/point_rend/metafile.yml
63
+ - configs/queryinst/metafile.yml
64
+ - configs/regnet/metafile.yml
65
+ - configs/reppoints/metafile.yml
66
+ - configs/res2net/metafile.yml
67
+ - configs/resnest/metafile.yml
68
+ - configs/resnet_strikes_back/metafile.yml
69
+ - configs/retinanet/metafile.yml
70
+ - configs/rpn/metafile.yml
71
+ - configs/rtmdet/metafile.yml
72
+ - configs/sabl/metafile.yml
73
+ - configs/scnet/metafile.yml
74
+ - configs/scratch/metafile.yml
75
+ - configs/seesaw_loss/metafile.yml
76
+ - configs/simple_copy_paste/metafile.yml
77
+ - configs/soft_teacher/metafile.yml
78
+ - configs/sparse_rcnn/metafile.yml
79
+ - configs/solo/metafile.yml
80
+ - configs/solov2/metafile.yml
81
+ - configs/ssd/metafile.yml
82
+ - configs/strong_baselines/metafile.yml
83
+ - configs/swin/metafile.yml
84
+ - configs/tridentnet/metafile.yml
85
+ - configs/tood/metafile.yml
86
+ - configs/vfnet/metafile.yml
87
+ - configs/yolact/metafile.yml
88
+ - configs/yolo/metafile.yml
89
+ - configs/yolof/metafile.yml
90
+ - configs/yolox/metafile.yml
91
+ - configs/bytetrack/metafile.yml
92
+ - configs/strongsort/metafile.yml
93
+ - configs/ocsort/metafile.yml
94
+ - configs/sort/metafile.yml
95
+ - configs/deepsort/metafile.yml
96
+ - configs/qdtrack/metafile.yml
97
+ - configs/mask2former_vis/metafile.yml
98
+ - configs/masktrack_rcnn/metafile.yml
99
+ - configs/glip/metafile.yml
100
+ - configs/ddq/metafile.yml
101
+ - configs/grounding_dino/metafile.yml
102
+ - configs/mm_grounding_dino/metafile.yml
pytest.ini ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [pytest]
2
+ addopts = --xdoctest --xdoctest-style=auto
3
+ norecursedirs = .git ignore build __pycache__ data docker docs .eggs
4
+
5
+ filterwarnings= default
6
+ ignore:.*No cfgstr given in Cacher constructor or call.*:Warning
7
+ ignore:.*Define the __nice__ method for.*:Warning
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ -r requirements/build.txt
2
+ -r requirements/optional.txt
3
+ -r requirements/runtime.txt
setup.cfg ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [isort]
2
+ line_length = 79
3
+ multi_line_output = 0
4
+ extra_standard_library = setuptools
5
+ known_first_party = mmdet
6
+ known_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,mmengine,numpy,onnx,onnxruntime,pycocotools,parameterized,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml
7
+ no_lines_before = STDLIB,LOCALFOLDER
8
+ default_section = THIRDPARTY
9
+
10
+ [yapf]
11
+ BASED_ON_STYLE = pep8
12
+ BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
13
+ SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
14
+
15
+ # ignore-words-list needs to be lowercase format. For example, if we want to
16
+ # ignore word "BA", then we need to append "ba" to ignore-words-list rather
17
+ # than "BA"
18
+ [codespell]
19
+ skip = *.ipynb,configs/v3det/category_name_13204_v3det_2023_v1.txt
20
+ quiet-level = 3
21
+ ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba,warmup,nam,DOTA,dota,conveyer,singed,comittee,extention,moniter,pres,
22
+
23
+ [flake8]
24
+ per-file-ignores = mmdet/configs/*: F401,F403,F405
setup.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright (c) OpenMMLab. All rights reserved.
3
+ import os
4
+ import os.path as osp
5
+ import platform
6
+ import shutil
7
+ import sys
8
+ import warnings
9
+ from setuptools import find_packages, setup
10
+
11
+ import torch
12
+ from torch.utils.cpp_extension import (BuildExtension, CppExtension,
13
+ CUDAExtension)
14
+
15
+
16
+ def readme():
17
+ with open('README.md', encoding='utf-8') as f:
18
+ content = f.read()
19
+ return content
20
+
21
+
22
+ version_file = 'mmdet/version.py'
23
+
24
+
25
+ def get_version():
26
+ with open(version_file, 'r') as f:
27
+ exec(compile(f.read(), version_file, 'exec'))
28
+ return locals()['__version__']
29
+
30
+
31
+ def make_cuda_ext(name, module, sources, sources_cuda=[]):
32
+
33
+ define_macros = []
34
+ extra_compile_args = {'cxx': []}
35
+
36
+ if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
37
+ define_macros += [('WITH_CUDA', None)]
38
+ extension = CUDAExtension
39
+ extra_compile_args['nvcc'] = [
40
+ '-D__CUDA_NO_HALF_OPERATORS__',
41
+ '-D__CUDA_NO_HALF_CONVERSIONS__',
42
+ '-D__CUDA_NO_HALF2_OPERATORS__',
43
+ ]
44
+ sources += sources_cuda
45
+ else:
46
+ print(f'Compiling {name} without CUDA')
47
+ extension = CppExtension
48
+
49
+ return extension(
50
+ name=f'{module}.{name}',
51
+ sources=[os.path.join(*module.split('.'), p) for p in sources],
52
+ define_macros=define_macros,
53
+ extra_compile_args=extra_compile_args)
54
+
55
+
56
+ def parse_requirements(fname='requirements.txt', with_version=True):
57
+ """Parse the package dependencies listed in a requirements file but strips
58
+ specific versioning information.
59
+
60
+ Args:
61
+ fname (str): path to requirements file
62
+ with_version (bool, default=False): if True include version specs
63
+
64
+ Returns:
65
+ List[str]: list of requirements items
66
+
67
+ CommandLine:
68
+ python -c "import setup; print(setup.parse_requirements())"
69
+ """
70
+ import re
71
+ import sys
72
+ from os.path import exists
73
+ require_fpath = fname
74
+
75
+ def parse_line(line):
76
+ """Parse information from a line in a requirements text file."""
77
+ if line.startswith('-r '):
78
+ # Allow specifying requirements in other files
79
+ target = line.split(' ')[1]
80
+ for info in parse_require_file(target):
81
+ yield info
82
+ else:
83
+ info = {'line': line}
84
+ if line.startswith('-e '):
85
+ info['package'] = line.split('#egg=')[1]
86
+ elif '@git+' in line:
87
+ info['package'] = line
88
+ else:
89
+ # Remove versioning from the package
90
+ pat = '(' + '|'.join(['>=', '==', '>']) + ')'
91
+ parts = re.split(pat, line, maxsplit=1)
92
+ parts = [p.strip() for p in parts]
93
+
94
+ info['package'] = parts[0]
95
+ if len(parts) > 1:
96
+ op, rest = parts[1:]
97
+ if ';' in rest:
98
+ # Handle platform specific dependencies
99
+ # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
100
+ version, platform_deps = map(str.strip,
101
+ rest.split(';'))
102
+ info['platform_deps'] = platform_deps
103
+ else:
104
+ version = rest # NOQA
105
+ info['version'] = (op, version)
106
+ yield info
107
+
108
+ def parse_require_file(fpath):
109
+ with open(fpath, 'r') as f:
110
+ for line in f.readlines():
111
+ line = line.strip()
112
+ if line and not line.startswith('#'):
113
+ for info in parse_line(line):
114
+ yield info
115
+
116
+ def gen_packages_items():
117
+ if exists(require_fpath):
118
+ for info in parse_require_file(require_fpath):
119
+ parts = [info['package']]
120
+ if with_version and 'version' in info:
121
+ parts.extend(info['version'])
122
+ if not sys.version.startswith('3.4'):
123
+ # apparently package_deps are broken in 3.4
124
+ platform_deps = info.get('platform_deps')
125
+ if platform_deps is not None:
126
+ parts.append(';' + platform_deps)
127
+ item = ''.join(parts)
128
+ yield item
129
+
130
+ packages = list(gen_packages_items())
131
+ return packages
132
+
133
+
134
+ def add_mim_extension():
135
+ """Add extra files that are required to support MIM into the package.
136
+
137
+ These files will be added by creating a symlink to the originals if the
138
+ package is installed in `editable` mode (e.g. pip install -e .), or by
139
+ copying from the originals otherwise.
140
+ """
141
+
142
+ # parse installment mode
143
+ if 'develop' in sys.argv:
144
+ # installed by `pip install -e .`
145
+ if platform.system() == 'Windows':
146
+ # set `copy` mode here since symlink fails on Windows.
147
+ mode = 'copy'
148
+ else:
149
+ mode = 'symlink'
150
+ elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
151
+ # installed by `pip install .`
152
+ # or create source distribution by `python setup.py sdist`
153
+ mode = 'copy'
154
+ else:
155
+ return
156
+
157
+ filenames = [
158
+ 'tools', 'configs', 'demo', 'model-index.yml', 'dataset-index.yml'
159
+ ]
160
+ repo_path = osp.dirname(__file__)
161
+ mim_path = osp.join(repo_path, 'mmdet', '.mim')
162
+ os.makedirs(mim_path, exist_ok=True)
163
+
164
+ for filename in filenames:
165
+ if osp.exists(filename):
166
+ src_path = osp.join(repo_path, filename)
167
+ tar_path = osp.join(mim_path, filename)
168
+
169
+ if osp.isfile(tar_path) or osp.islink(tar_path):
170
+ os.remove(tar_path)
171
+ elif osp.isdir(tar_path):
172
+ shutil.rmtree(tar_path)
173
+
174
+ if mode == 'symlink':
175
+ src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
176
+ os.symlink(src_relpath, tar_path)
177
+ elif mode == 'copy':
178
+ if osp.isfile(src_path):
179
+ shutil.copyfile(src_path, tar_path)
180
+ elif osp.isdir(src_path):
181
+ shutil.copytree(src_path, tar_path)
182
+ else:
183
+ warnings.warn(f'Cannot copy file {src_path}.')
184
+ else:
185
+ raise ValueError(f'Invalid mode {mode}')
186
+
187
+
188
+ if __name__ == '__main__':
189
+ add_mim_extension()
190
+ setup(
191
+ name='mmdet',
192
+ version=get_version(),
193
+ description='OpenMMLab Detection Toolbox and Benchmark',
194
+ long_description=readme(),
195
+ long_description_content_type='text/markdown',
196
+ author='MMDetection Contributors',
197
+ author_email='[email protected]',
198
+ keywords='computer vision, object detection',
199
+ url='https://github.com/open-mmlab/mmdetection',
200
+ packages=find_packages(exclude=('configs', 'tools', 'demo')),
201
+ include_package_data=True,
202
+ classifiers=[
203
+ 'Development Status :: 5 - Production/Stable',
204
+ 'License :: OSI Approved :: Apache Software License',
205
+ 'Operating System :: OS Independent',
206
+ 'Programming Language :: Python :: 3',
207
+ 'Programming Language :: Python :: 3.7',
208
+ 'Programming Language :: Python :: 3.8',
209
+ 'Programming Language :: Python :: 3.9',
210
+ ],
211
+ license='Apache License 2.0',
212
+ install_requires=parse_requirements('requirements/runtime.txt'),
213
+ extras_require={
214
+ 'all': parse_requirements('requirements.txt'),
215
+ 'tests': parse_requirements('requirements/tests.txt'),
216
+ 'build': parse_requirements('requirements/build.txt'),
217
+ 'optional': parse_requirements('requirements/optional.txt'),
218
+ 'mim': parse_requirements('requirements/mminstall.txt'),
219
+ 'tracking': parse_requirements('requirements/tracking.txt'),
220
+ 'multimodal': parse_requirements('requirements/multimodal.txt'),
221
+ },
222
+ ext_modules=[],
223
+ cmdclass={'build_ext': BuildExtension},
224
+ zip_safe=False)