viruthik commited on
Commit
9df1e4c
·
1 Parent(s): 8c76f84

Upload 10958 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +32 -0
  2. face-main/.idea/face-main.iml +10 -0
  3. face-main/.idea/inspectionProfiles/profiles_settings.xml +6 -0
  4. face-main/.idea/misc.xml +4 -0
  5. face-main/.idea/modules.xml +8 -0
  6. face-main/.idea/workspace.xml +39 -0
  7. face-main/README.md +1 -0
  8. face-main/face-attendance-system-master/LICENSE.md +21 -0
  9. face-main/face-attendance-system-master/README.md +55 -0
  10. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/LICENSE +201 -0
  11. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/README.md +112 -0
  12. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/README_EN.md +112 -0
  13. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/__pycache__/test.cpython-310.pyc +0 -0
  14. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/__pycache__/test.cpython-311.pyc +0 -0
  15. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/datasets/README.md +19 -0
  16. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/demo.gif +3 -0
  17. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/framework.jpg +0 -0
  18. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/logo.jpg +0 -0
  19. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/patch_demo.png +0 -0
  20. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F1.jpg +0 -0
  21. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F1_result.jpg +0 -0
  22. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F2.jpg +0 -0
  23. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F2_result.jpg +0 -0
  24. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_T1.jpg +0 -0
  25. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_T1_result.jpg +0 -0
  26. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images//350/256/276/347/275/256/351/230/210/345/200/274.png +0 -0
  27. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images//351/235/231/351/273/230/346/264/273/344/275/223APK.jpeg +0 -0
  28. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/requirements.txt +8 -0
  29. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth +3 -0
  30. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth +3 -0
  31. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/detection_model/Widerface-RetinaFace.caffemodel +3 -0
  32. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/detection_model/deploy.prototxt +2499 -0
  33. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/saved_logs/jobs/Anti_Spoofing_1_80x80/Jul08_12-51-18/events.out.tfevents.1594183888.old01 +3 -0
  34. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/anti_spoof_predict.cpython-310.pyc +0 -0
  35. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/anti_spoof_predict.cpython-311.pyc +0 -0
  36. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/generate_patches.cpython-310.pyc +0 -0
  37. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/generate_patches.cpython-311.pyc +0 -0
  38. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/utility.cpython-310.pyc +0 -0
  39. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/utility.cpython-311.pyc +0 -0
  40. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/anti_spoof_predict.py +106 -0
  41. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/functional.cpython-310.pyc +0 -0
  42. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/functional.cpython-311.pyc +0 -0
  43. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/transform.cpython-310.pyc +0 -0
  44. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/transform.cpython-311.pyc +0 -0
  45. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/dataset_folder.py +65 -0
  46. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/dataset_loader.py +33 -0
  47. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/functional.py +589 -0
  48. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/transform.py +347 -0
  49. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/default_config.py +73 -0
  50. face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/generate_patches.py +65 -0
.gitattributes CHANGED
@@ -33,3 +33,35 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/demo.gif filter=lfs diff=lfs merge=lfs -text
37
+ face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/detection_model/Widerface-RetinaFace.caffemodel filter=lfs diff=lfs merge=lfs -text
38
+ face-main/venv/Lib/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
39
+ face-main/venv/Lib/site-packages/cmake/data/bin/cmake-gui.exe filter=lfs diff=lfs merge=lfs -text
40
+ face-main/venv/Lib/site-packages/cmake/data/bin/cmake.exe filter=lfs diff=lfs merge=lfs -text
41
+ face-main/venv/Lib/site-packages/cmake/data/bin/cmcldeps.exe filter=lfs diff=lfs merge=lfs -text
42
+ face-main/venv/Lib/site-packages/cmake/data/bin/cpack.exe filter=lfs diff=lfs merge=lfs -text
43
+ face-main/venv/Lib/site-packages/cmake/data/bin/ctest.exe filter=lfs diff=lfs merge=lfs -text
44
+ face-main/venv/Lib/site-packages/cmake/data/doc/cmake/CMake.qch filter=lfs diff=lfs merge=lfs -text
45
+ face-main/venv/Lib/site-packages/cv2/cv2.pyd filter=lfs diff=lfs merge=lfs -text
46
+ face-main/venv/Lib/site-packages/cv2/opencv_videoio_ffmpeg480_64.dll filter=lfs diff=lfs merge=lfs -text
47
+ face-main/venv/Lib/site-packages/face_recognition_models/models/dlib_face_recognition_resnet_model_v1.dat filter=lfs diff=lfs merge=lfs -text
48
+ face-main/venv/Lib/site-packages/face_recognition_models/models/shape_predictor_5_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
49
+ face-main/venv/Lib/site-packages/face_recognition_models/models/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
50
+ face-main/venv/Lib/site-packages/numpy/.libs/libopenblas64__v0.3.23-gcc_10_3_0.dll filter=lfs diff=lfs merge=lfs -text
51
+ face-main/venv/Lib/site-packages/numpy/core/_multiarray_umath.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
52
+ face-main/venv/Lib/site-packages/numpy/core/_simd.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
53
+ face-main/venv/Lib/site-packages/pandas/_libs/algos.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
54
+ face-main/venv/Lib/site-packages/pandas/_libs/groupby.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
55
+ face-main/venv/Lib/site-packages/pandas/_libs/hashtable.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
56
+ face-main/venv/Lib/site-packages/pandas/_libs/join.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
57
+ face-main/venv/Lib/site-packages/PIL/_imaging.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
58
+ face-main/venv/Lib/site-packages/PIL/_imagingft.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
59
+ face-main/venv/Lib/site-packages/pyarrow/arrow_acero.dll filter=lfs diff=lfs merge=lfs -text
60
+ face-main/venv/Lib/site-packages/pyarrow/arrow_dataset.dll filter=lfs diff=lfs merge=lfs -text
61
+ face-main/venv/Lib/site-packages/pyarrow/arrow_flight.dll filter=lfs diff=lfs merge=lfs -text
62
+ face-main/venv/Lib/site-packages/pyarrow/arrow_python.dll filter=lfs diff=lfs merge=lfs -text
63
+ face-main/venv/Lib/site-packages/pyarrow/arrow_substrait.dll filter=lfs diff=lfs merge=lfs -text
64
+ face-main/venv/Lib/site-packages/pyarrow/arrow.dll filter=lfs diff=lfs merge=lfs -text
65
+ face-main/venv/Lib/site-packages/pyarrow/arrow.lib filter=lfs diff=lfs merge=lfs -text
66
+ face-main/venv/Lib/site-packages/pyarrow/lib.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
67
+ face-main/venv/Lib/site-packages/pyarrow/parquet.dll filter=lfs diff=lfs merge=lfs -text
face-main/.idea/face-main.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <sourceFolder url="file://$MODULE_DIR$/face-attendance-system-master/Silent-Face-Anti-Spoofing-master" isTestSource="false" />
6
+ </content>
7
+ <orderEntry type="inheritedJdk" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
face-main/.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
face-main/.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (face-main)" project-jdk-type="Python SDK" />
4
+ </project>
face-main/.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/face-main.iml" filepath="$PROJECT_DIR$/.idea/face-main.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
face-main/.idea/workspace.xml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="5c7ff56d-2857-43d0-8e72-7e87331521e2" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="MarkdownSettingsMigration">
14
+ <option name="stateVersion" value="1" />
15
+ </component>
16
+ <component name="ProjectId" id="2TA2rev5RoDU02vcUsngM9IInEh" />
17
+ <component name="ProjectViewState">
18
+ <option name="hideEmptyMiddlePackages" value="true" />
19
+ <option name="showLibraryContents" value="true" />
20
+ </component>
21
+ <component name="PropertiesComponent"><![CDATA[{
22
+ "keyToString": {
23
+ "RunOnceActivity.OpenProjectViewOnStart": "true",
24
+ "RunOnceActivity.ShowReadmeOnStart": "true",
25
+ "settings.editor.selected.configurable": "com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable"
26
+ }
27
+ }]]></component>
28
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
29
+ <component name="TaskManager">
30
+ <task active="true" id="Default" summary="Default task">
31
+ <changelist id="5c7ff56d-2857-43d0-8e72-7e87331521e2" name="Changes" comment="" />
32
+ <created>1690471765039</created>
33
+ <option name="number" value="Default" />
34
+ <option name="presentableId" value="Default" />
35
+ <updated>1690471765039</updated>
36
+ </task>
37
+ <servers />
38
+ </component>
39
+ </project>
face-main/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # face
face-main/face-attendance-system-master/LICENSE.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 computervisiondeveloper
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
face-main/face-attendance-system-master/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # face-attendance-system
2
+
3
+ Face attendance system using face recognition with Python !
4
+
5
+ ## Face attendance software
6
+
7
+ <p align="center">
8
+ <a href="https://www.youtube.com/watch?v=z_dbnYHAQYg">
9
+ <img width="600" src="https://utils-computervisiondeveloper.s3.amazonaws.com/thumbnails/with_play_button/face_attendance.jpg" alt="Watch the video">
10
+ </br>Watch on YouTube: Face attendance system with Python and face recognition !
11
+ </a>
12
+ </p>
13
+
14
+ ## Spoofing feature
15
+
16
+ <p align="center">
17
+ <a href="https://www.youtube.com/watch?v=_KvtVk8Gk1A">
18
+ <img width="600" src="https://utils-computervisiondeveloper.s3.amazonaws.com/thumbnails/with_play_button/face_attendance_spoofing.jpg" alt="Watch the video">
19
+ </br>Watch on YouTube: Face attendance system with liveness detection !
20
+ </a>
21
+ </p>
22
+
23
+ ## execution
24
+
25
+ - Python 3.8
26
+
27
+ ### windows
28
+
29
+ In Windows, you will need to do a couple of additional steps before starting with this tutorial:
30
+ - Follow the instructions described in this video https://www.youtube.com/watch?v=xaDJ5xnc8dc, from the minute 4:39 to 6:07
31
+ - Install the packages in requirements_windows.txt.
32
+
33
+ ### linux, mac
34
+
35
+ - Install the packages in requirements.txt
36
+
37
+ ## spoofing feature
38
+
39
+ git clone https://github.com/computervisioneng/Silent-Face-Anti-Spoofing.git
40
+ pip install -r Silent-Face-Anti-Spoofing/requirements.txt
41
+
42
+ Remember to add the Silent-Face-Anti-Spoofing directory to your **PYTHONPATH**.
43
+
44
+ ## web app
45
+
46
+ Face attendance + face recognition web app with React and Python!
47
+
48
+ <p align="center">
49
+ <a href="https://www.youtube.com/watch?v=yWmW5uEtNws">
50
+ <img width="600" src="https://utils-computervisiondeveloper.s3.amazonaws.com/thumbnails/with_play_button/face_attendance_web_app_react_python.jpg" alt="Watch the video">
51
+ </br>Watch on YouTube: Face attendance + face recognition web app with React and Python !
52
+ </a>
53
+ </p>
54
+ The code for this web app project is available [here](https://github.com/computervisiondeveloper/face-attendance-web-app-react-python).
55
+
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2020 Minivision
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/README.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **中文版**|[English Version](README_EN.md)
2
+ ![静默活体检测](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/logo.jpg)
3
+ # 静默活体检测 (Silent-Face-Anti-Spoofing)
4
+ 该项目为[小视科技](https://www.minivision.cn/)的静默活体检测项目,您可以扫描下方的二维码获取安卓端APK,体验静默活体的检测效果.
5
+ <img src="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/静默活体APK.jpeg" width="200" height="200" align=center />
6
+ ## 更新
7
+ **2020-07-30:** 开源caffe模型,分享工业级静默活体检测算法技术解析直播视频以及相关文件。
8
+ ## 简介
9
+ 在本工程中我们开源了活体模型训练架构,数据预处理方法,模型训练和测试脚本以及开源的APK供大家测试使用。
10
+
11
+ 活体检测技术主要是判别机器前出现的人脸是真实还是伪造的,其中借助其他媒介呈现的人脸都可以定义为虚假的人脸,包括打印的纸质照片、电子产品的显示屏幕、硅胶面具、立体的3D人像等。目前主流的活体解决方案分为配合式活体检测和非配合式活体检测(静默活体检测)。配合式活体检测需要用户根据提示完成指定的动作,然后再进行活体校验,静默活体则在用户无感的情况下直接进行活体校验。
12
+
13
+ 因傅里叶频谱图一定程度上能够反应真假脸在频域的差异,因此我们采用了一种基于傅里叶频谱图辅助监督的静默活体检测方法, 模型架构由分类主分支和傅里叶频谱图辅助监督分支构成,整体架构如下图所示:
14
+ ![整体架构图](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/framework.jpg)
15
+
16
+ 使用自研的模型剪枝方法,将MobileFaceNet的Flops从0.224G降低待了0.081G,在精度损失不大的情况下,明显提升模型的性能(降低计算量与参数量).
17
+
18
+ |Model|FLOPs|Params|
19
+ | :------:|:-----:|:-----:|
20
+ |MobileFaceNet|0.224G|0.991M|
21
+ |MiniFASNetV1|0.081G|0.414M|
22
+ |MiniFASNetV2|0.081G|0.435M|
23
+
24
+ ## APK
25
+ ### APK源码
26
+ 开源了适用于安卓平台的部署代码:https://github.com/minivision-ai/Silent-Face-Anti-Spoofing-APK
27
+
28
+ ### Demo
29
+ <img src="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/demo.gif" width="300" height="400"/>
30
+
31
+ ### 关键指标
32
+ | Model(input 80x80)|FLOPs|Speed| FPR | TPR |备注 |
33
+ | :------:|:-----:|:-----:| :----: | :----: | :----: |
34
+ | APK模型 |84M| 20ms | 1e-5|97.8%| 开源|
35
+ | 高精度模型 |162M| 40ms| 1e-5 |99.7%| 未开源 |
36
+
37
+ ### 测试方法
38
+ - 显示信息:速度(ms), 置信度(0~1)以及活体检测结果(真脸or假脸)
39
+ - 点击右上角图标可设置阈值,如果置信度大于阈值,为真脸,否则为假脸
40
+
41
+ ### 测试须知
42
+ - 所有测试图片必须通过摄像头采集得到,否则不符合正常场景使用规范,算法效果也无法保证。
43
+ - 因为RGB静默活体对摄像头型号和使用场景鲁棒性受限,所以实际使用体验会有一定差异。
44
+ - 测试时,应保证有完整的人脸出现在视图中,并且人脸旋转角与竖直方向小于30度(符合正常刷脸场景),否则影响体验。  
45
+
46
+ **已测试型号**
47
+
48
+ |型号|麒麟990 5G|麒麟990 |骁龙845 |麒麟810 |RK3288 |
49
+ | :------:|:-----:|:-----:|:-----:|:-----:|:-----:|
50
+ |速度/ms|19|23|24|25|90|
51
+
52
+ ## 工程
53
+ ### 安装依赖库
54
+ ```
55
+ pip install -r requirements.txt
56
+ ```
57
+ ### Clone
58
+ ```
59
+ git clone https://github.com/minivision-ai/Silent-Face-Anti-Spoofing
60
+ cd Silent-Face-Anti-Spoofing
61
+ ```
62
+ ### 数据预处理
63
+ 1.将训练集分为3类,将相同类别的图片放入一个文件夹;
64
+ 2.因采用多尺度模型融合的方法,分别用原图和不同的patch训练模型,所以将数据分为原图和基于原图的patch;
65
+ - 原图(org_1_height**x**width),直接将原图resize到固定尺寸(width, height),如图1所示;
66
+ - 基于原图的patch(scale_height**x**width),采用人脸检测器人脸,获取人脸框,按照一定比例(scale)对人脸框进行扩边,为了保证模型的输入尺寸的一致性,将人脸框区域resize到固定尺寸(width, height),图2-4分别显示了scale为1,2.7和4的patch样例;
67
+ ![patch demo](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/patch_demo.png)
68
+
69
+ 3.采用傅里叶频谱图作为辅助监督,训练集图片在线生成对应的傅里叶频谱图.
70
+ **数据集的目录结构如下所示**
71
+ ```
72
+ ├── datasets
73
+ └── RGB_Images
74
+ ├── org_1_80x60
75
+ ├── 0
76
+ ├── aaa.png
77
+ ├── bbb.png
78
+ └── ...
79
+ ├── 1
80
+ ├── ddd.png
81
+ ├── eee.png
82
+ └── ...
83
+ └── 2
84
+ ├── ggg.png
85
+ ├── hhh.png
86
+ └── ...
87
+ ├── 1_80x80
88
+ └── ...
89
+ ```
90
+ ### 训练
91
+ ```
92
+ python train.py --device_ids 0 --patch_info your_patch
93
+ ```
94
+ ### 测试
95
+ ./resources/anti_spoof_models 活体检测的融合模型
96
+ ./resources/detection_model 检测器模型
97
+ ./images/sample 测试图片
98
+ ```
99
+ python test.py --image_name your_image_name
100
+ ```
101
+ ## 相关资源
102
+ [百度网盘](https://pan.baidu.com/s/1u3BPHIEU4GmTti0G3LIDGQ)提取码:6d8q
103
+ (1)工业级静默活体检测开源算法技术解析[直播回放视频](https://www.bilibili.com/video/BV1qZ4y1T7CH);
104
+ (2)直播视频中的思维导图文件,存放在files目录下;
105
+ (3)开源模型的caffemodel,存放在models目录下;
106
+
107
+ ## 参考
108
+ - 检测器 [RetinaFace](https://github.com/deepinsight/insightface/tree/master/RetinaFace)
109
+
110
+ 针对此项目,为了方便开发者们的技术交流,创建了QQ群:1121178835,欢迎加入。
111
+
112
+ 除了本次开源的静默活体检测算法外,小视科技还拥有多项人脸识别、人体识别相关的自研算法及商用SDK。有兴趣的个人开发者或企业开发者可登录[小视科技Mini-AI开放平台](https://ai.minivision.cn/)了解和联系我们。
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/README_EN.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [中文版](README.md)|**English Version**
2
+ ![Silent-Face-Anti-Spoofing](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/logo.jpg)
3
+ # Silent-Face-Anti-Spoofing
4
+
5
+ This project is Silent-Face-Anti-Spoofing belongs to [minivision technology](https://www.minivision.cn/). You can scan the QR code below to get APK and install it on Android side to experience the effect of real time living detection(silent face anti-spoofing detection).
6
+ <img src="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/静默活体APK.jpeg" width="200" height="200" align=center />
7
+
8
+ ## Introduction
9
+
10
+ In this project, we open source the silent face anti-spoofing model with training architecture, data preprocessing method, model training & test script and open source APK for real time testing.
11
+
12
+ The main purpose of silent face anti-spoofing detection technology is to judge whether the face in front of the machine is real or fake. The face presented by other media can be defined as false face, including printed paper photos, display screen of electronic products, silicone mask, 3D human image, etc. At present, the mainstream solutions includes cooperative living detection and non cooperative living detection (silent living detection). Cooperative living detection requires the user to complete the specified action according to the prompt, and then carry out the live verification, while the silent live detection directly performs the live verification.
13
+
14
+ Since the Fourier spectrum can reflect the difference of true and false faces in frequency domain to a certain extent, we adopt a silent living detection method based on the auxiliary supervision of Fourier spectrum. The model architecture consists of the main classification branch and the auxiliary supervision branch of Fourier spectrum. The overall architecture is shown in the following figure:
15
+
16
+ ![overall architecture](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/framework.jpg)
17
+
18
+ By using our self-developed model pruning method, the FLOPs of MobileFaceNet is reduced from 0.224G to 0.081G, and the performance of the model is significantly improved (the amount of calculation and parameters is reduced) with little loss of precision.
19
+
20
+
21
+ |Model|FLOPs|Params|
22
+ | :------:|:-----:|:-----:|
23
+ |MobileFaceNet|0.224G|0.991M|
24
+ |MiniFASNetV1|0.081G|0.414M|
25
+ |MiniFASNetV2|0.081G|0.435M|
26
+
27
+ ## APK
28
+ ### APK source code
29
+ Open source for Android platform deployment code: https://github.com/minivision-ai/Silent-Face-Anti-Spoofing-APK
30
+
31
+ ### Demo
32
+ <img src="https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/demo.gif" width="300" height="400"/>
33
+
34
+ ### Performance
35
+ | Model|FLOPs|Speed| FPR | TPR |comments |
36
+ | :------:|:-----:|:-----:| :----: | :----: | :----: |
37
+ | APK |84M| 20ms | 1e-5|97.8%| Open Source|
38
+ | High precision model |162M| 40ms| 1e-5 |99.7%| Private |
39
+
40
+ ### Test Method
41
+
42
+ - Display information: speed(ms), confidence(0 ~ 1) and in living detection test results (true face or false face).
43
+ - Click the icon in the upper right corner to set the threshold value. If the confidence level is greater than the threshold value, it is a true face, otherwise it is a fake face.
44
+
45
+ ### Before test you must know
46
+
47
+ - All the test images must be collected by camera, otherwise it does not conform to the normal scene usage specification, and the algorithm effect cannot be guaranteed.
48
+ - Because the robustness of RGB silent living detection depending on camera model and scene, the actual use experience could be different.
49
+ - During the test, it should be ensured that a complete face appears in the view, and the rotation angle and vertical direction of the face are less than 30 degrees (in line with the normal face recognition scene), otherwise, the experience will be affected. 
50
+
51
+ **Tested mobile phone processor**
52
+
53
+ |type|Kirin990 5G|Kirin990 |Qualcomm845 |Kirin810 |RK3288 |
54
+ | :------:|:-----:|:-----:|:-----:|:-----:|:-----:|
55
+ |Speed/ms|19|23|24|25|90|
56
+
57
+ ## Repo
58
+ ### Install dependency Library
59
+ ```
60
+ pip install -r requirements.txt
61
+ ```
62
+ ### Clone
63
+ ```
64
+ git clone https://github.com/minivision-ai/Silent-Face-Anti-Spoofing
65
+ cd Silent-Face-Anti-Spoofing
66
+ ```
67
+ ### Data Preprocessing
68
+ 1.The training set is divided into three categories, and the pictures of the same category are put into a folder;
69
+ 2.Due to the multi-scale model fusion method, the original image and different patch are used to train the model, so the data is divided into the original map and the patch based on the Original picture;
70
+ - Original picture(org_1_height**x**width),resize the original image to a fixed size (width, height), as shown in Figure 1;
71
+ - Patch based on original(scale_height**x**width),The face detector is used to obtain the face frame, and the edge of the face frame is expanded according to a certain scale. In order to ensure the consistency of the input size of the model, the area of the face frame is resized to a fixed size (width, height). Fig. 2-4 shows the patch examples with scales of 1, 2.7 and 4;
72
+ ![patch demo](https://github.com/minivision-ai/Silent-Face-Anti-Spoofing/blob/master/images/patch_demo.png)
73
+
74
+ 3.The Fourier spectrum is used as the auxiliary supervision, and the corresponding Fourier spectrum is generated online from the training set images.
75
+ **The directory structure of the dataset is shown below**
76
+ ```
77
+ ├── datasets
78
+ └── RGB_Images
79
+ ├── org_1_80x60
80
+ ├── 0
81
+ ├── aaa.png
82
+ ├── bbb.png
83
+ └── ...
84
+ ├── 1
85
+ ├── ddd.png
86
+ ├── eee.png
87
+ └── ...
88
+ └── 2
89
+ ├── ggg.png
90
+ ├── hhh.png
91
+ └── ...
92
+ ├── 1_80x80
93
+ └── ...
94
+ ```
95
+ ### Train
96
+ ```
97
+ python train.py --device_ids 0 --patch_info your_patch
98
+ ```
99
+ ### Test
100
+ ./resources/anti_spoof_models Fusion model of in living detection
101
+ ./resources/detection_model Detector
102
+ ./images/sample Test Images
103
+ ```
104
+ python test.py --image_name your_image_name
105
+ ```
106
+ ## Reference
107
+ - Detector [RetinaFace](https://github.com/deepinsight/insightface/tree/master/RetinaFace)
108
+
109
+ For this project, in order to facilitate the technical exchange of developers, we created QQ group: 1121178835, welcome to join.
110
+
111
+ In addition to the open-source silent living detection algorithm, Minivision technology also has a number of self-developed algorithms and SDK related to face recognition and human body recognition. Interested individual developers or enterprise developers can visit our website: [Mini-AI Open Platform](https://ai.minivision.cn/)
112
+ Welcome to contact us.
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/__pycache__/test.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/__pycache__/test.cpython-311.pyc ADDED
Binary file (3.47 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/datasets/README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```
2
+ ├── datasets
3
+ └── RGB_Images
4
+ ├── org_1_80x60
5
+ ├── 0
6
+ ├── aaa.png
7
+ ├── bbb.png
8
+ └── ...
9
+ ├── 1
10
+ ├── ddd.png
11
+ ├── eee.png
12
+ └── ...
13
+ └── 2
14
+ ├── ggg.png
15
+ ├── hhh.png
16
+ └── ...
17
+ ├── 1_80x80
18
+ └── ...
19
+ ```
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/demo.gif ADDED

Git LFS Details

  • SHA256: f42e93e773297be09dd5755e4dcef4f6104c30b4c443d51f98b84777508c6226
  • Pointer size: 132 Bytes
  • Size of remote file: 1.44 MB
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/framework.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/logo.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/patch_demo.png ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F1.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F1_result.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F2.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_F2_result.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_T1.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images/sample/image_T1_result.jpg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images//350/256/276/347/275/256/351/230/210/345/200/274.png ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/images//351/235/231/351/273/230/346/264/273/344/275/223APK.jpeg ADDED
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ easydict==1.9
2
+ numpy==1.17.0
3
+ tqdm==4.31.1
4
+ torchvision==0.4.0
5
+ torch==1.2.0
6
+ opencv_python==4.2.0.34
7
+ Pillow==7.1.2
8
+ tensorboardX==2.0
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/anti_spoof_models/2.7_80x80_MiniFASNetV2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5eb02e1843f19b5386b953cc4c9f011c3f985d0ee2bb9819eea9a142099bec0
3
+ size 1849453
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/anti_spoof_models/4_0_0_80x80_MiniFASNetV1SE.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84ee1d37d96894d5e82de5a57df044ef80a58be2b218b5ed7cdfd875ec2f5990
3
+ size 1856130
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/detection_model/Widerface-RetinaFace.caffemodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d08338a2c207df16a9c566f767fea67fb43ba6fff76ce11e938fe3fabefb9402
3
+ size 1866013
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/resources/detection_model/deploy.prototxt ADDED
@@ -0,0 +1,2499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "20200403141819_Widerface-RetinaFace_mb_640_negscope-0_epoch_4"
2
+ input: "data"
3
+ input_dim: 1
4
+ input_dim: 3
5
+ input_dim: 640
6
+ input_dim: 640
7
+ layer {
8
+ name: "conv1"
9
+ type: "Convolution"
10
+ bottom: "data"
11
+ top: "conv_blob1"
12
+ convolution_param {
13
+ num_output: 8
14
+ bias_term: false
15
+ pad: 1
16
+ kernel_size: 3
17
+ group: 1
18
+ stride: 2
19
+ weight_filler {
20
+ type: "xavier"
21
+ }
22
+ dilation: 1
23
+ }
24
+ }
25
+ layer {
26
+ name: "batch_norm1"
27
+ type: "BatchNorm"
28
+ bottom: "conv_blob1"
29
+ top: "batch_norm_blob1"
30
+ batch_norm_param {
31
+ use_global_stats: true
32
+ eps: 9.9999997e-06
33
+ }
34
+ }
35
+ layer {
36
+ name: "bn_scale1"
37
+ type: "Scale"
38
+ bottom: "batch_norm_blob1"
39
+ top: "batch_norm_blob1"
40
+ scale_param {
41
+ bias_term: true
42
+ }
43
+ }
44
+ layer {
45
+ name: "relu1"
46
+ type: "ReLU"
47
+ bottom: "batch_norm_blob1"
48
+ top: "relu_blob1"
49
+ }
50
+ layer {
51
+ name: "conv2"
52
+ type: "Convolution"
53
+ bottom: "relu_blob1"
54
+ top: "conv_blob2"
55
+ convolution_param {
56
+ num_output: 8
57
+ bias_term: false
58
+ pad: 1
59
+ kernel_size: 3
60
+ group: 8
61
+ stride: 1
62
+ weight_filler {
63
+ type: "xavier"
64
+ }
65
+ dilation: 1
66
+ }
67
+ }
68
+ layer {
69
+ name: "batch_norm2"
70
+ type: "BatchNorm"
71
+ bottom: "conv_blob2"
72
+ top: "batch_norm_blob2"
73
+ batch_norm_param {
74
+ use_global_stats: true
75
+ eps: 9.9999997e-06
76
+ }
77
+ }
78
+ layer {
79
+ name: "bn_scale2"
80
+ type: "Scale"
81
+ bottom: "batch_norm_blob2"
82
+ top: "batch_norm_blob2"
83
+ scale_param {
84
+ bias_term: true
85
+ }
86
+ }
87
+ layer {
88
+ name: "relu2"
89
+ type: "ReLU"
90
+ bottom: "batch_norm_blob2"
91
+ top: "relu_blob2"
92
+ }
93
+ layer {
94
+ name: "conv3"
95
+ type: "Convolution"
96
+ bottom: "relu_blob2"
97
+ top: "conv_blob3"
98
+ convolution_param {
99
+ num_output: 16
100
+ bias_term: false
101
+ pad: 0
102
+ kernel_size: 1
103
+ group: 1
104
+ stride: 1
105
+ weight_filler {
106
+ type: "xavier"
107
+ }
108
+ dilation: 1
109
+ }
110
+ }
111
+ layer {
112
+ name: "batch_norm3"
113
+ type: "BatchNorm"
114
+ bottom: "conv_blob3"
115
+ top: "batch_norm_blob3"
116
+ batch_norm_param {
117
+ use_global_stats: true
118
+ eps: 9.9999997e-06
119
+ }
120
+ }
121
+ layer {
122
+ name: "bn_scale3"
123
+ type: "Scale"
124
+ bottom: "batch_norm_blob3"
125
+ top: "batch_norm_blob3"
126
+ scale_param {
127
+ bias_term: true
128
+ }
129
+ }
130
+ layer {
131
+ name: "relu3"
132
+ type: "ReLU"
133
+ bottom: "batch_norm_blob3"
134
+ top: "relu_blob3"
135
+ }
136
+ layer {
137
+ name: "conv4"
138
+ type: "Convolution"
139
+ bottom: "relu_blob3"
140
+ top: "conv_blob4"
141
+ convolution_param {
142
+ num_output: 16
143
+ bias_term: false
144
+ pad: 1
145
+ kernel_size: 3
146
+ group: 16
147
+ stride: 2
148
+ weight_filler {
149
+ type: "xavier"
150
+ }
151
+ dilation: 1
152
+ }
153
+ }
154
+ layer {
155
+ name: "batch_norm4"
156
+ type: "BatchNorm"
157
+ bottom: "conv_blob4"
158
+ top: "batch_norm_blob4"
159
+ batch_norm_param {
160
+ use_global_stats: true
161
+ eps: 9.9999997e-06
162
+ }
163
+ }
164
+ layer {
165
+ name: "bn_scale4"
166
+ type: "Scale"
167
+ bottom: "batch_norm_blob4"
168
+ top: "batch_norm_blob4"
169
+ scale_param {
170
+ bias_term: true
171
+ }
172
+ }
173
+ layer {
174
+ name: "relu4"
175
+ type: "ReLU"
176
+ bottom: "batch_norm_blob4"
177
+ top: "relu_blob4"
178
+ }
179
+ layer {
180
+ name: "conv5"
181
+ type: "Convolution"
182
+ bottom: "relu_blob4"
183
+ top: "conv_blob5"
184
+ convolution_param {
185
+ num_output: 32
186
+ bias_term: false
187
+ pad: 0
188
+ kernel_size: 1
189
+ group: 1
190
+ stride: 1
191
+ weight_filler {
192
+ type: "xavier"
193
+ }
194
+ dilation: 1
195
+ }
196
+ }
197
+ layer {
198
+ name: "batch_norm5"
199
+ type: "BatchNorm"
200
+ bottom: "conv_blob5"
201
+ top: "batch_norm_blob5"
202
+ batch_norm_param {
203
+ use_global_stats: true
204
+ eps: 9.9999997e-06
205
+ }
206
+ }
207
+ layer {
208
+ name: "bn_scale5"
209
+ type: "Scale"
210
+ bottom: "batch_norm_blob5"
211
+ top: "batch_norm_blob5"
212
+ scale_param {
213
+ bias_term: true
214
+ }
215
+ }
216
+ layer {
217
+ name: "relu5"
218
+ type: "ReLU"
219
+ bottom: "batch_norm_blob5"
220
+ top: "relu_blob5"
221
+ }
222
+ layer {
223
+ name: "conv6"
224
+ type: "Convolution"
225
+ bottom: "relu_blob5"
226
+ top: "conv_blob6"
227
+ convolution_param {
228
+ num_output: 32
229
+ bias_term: false
230
+ pad: 1
231
+ kernel_size: 3
232
+ group: 32
233
+ stride: 1
234
+ weight_filler {
235
+ type: "xavier"
236
+ }
237
+ dilation: 1
238
+ }
239
+ }
240
+ layer {
241
+ name: "batch_norm6"
242
+ type: "BatchNorm"
243
+ bottom: "conv_blob6"
244
+ top: "batch_norm_blob6"
245
+ batch_norm_param {
246
+ use_global_stats: true
247
+ eps: 9.9999997e-06
248
+ }
249
+ }
250
+ layer {
251
+ name: "bn_scale6"
252
+ type: "Scale"
253
+ bottom: "batch_norm_blob6"
254
+ top: "batch_norm_blob6"
255
+ scale_param {
256
+ bias_term: true
257
+ }
258
+ }
259
+ layer {
260
+ name: "relu6"
261
+ type: "ReLU"
262
+ bottom: "batch_norm_blob6"
263
+ top: "relu_blob6"
264
+ }
265
+ layer {
266
+ name: "conv7"
267
+ type: "Convolution"
268
+ bottom: "relu_blob6"
269
+ top: "conv_blob7"
270
+ convolution_param {
271
+ num_output: 32
272
+ bias_term: false
273
+ pad: 0
274
+ kernel_size: 1
275
+ group: 1
276
+ stride: 1
277
+ weight_filler {
278
+ type: "xavier"
279
+ }
280
+ dilation: 1
281
+ }
282
+ }
283
+ layer {
284
+ name: "batch_norm7"
285
+ type: "BatchNorm"
286
+ bottom: "conv_blob7"
287
+ top: "batch_norm_blob7"
288
+ batch_norm_param {
289
+ use_global_stats: true
290
+ eps: 9.9999997e-06
291
+ }
292
+ }
293
+ layer {
294
+ name: "bn_scale7"
295
+ type: "Scale"
296
+ bottom: "batch_norm_blob7"
297
+ top: "batch_norm_blob7"
298
+ scale_param {
299
+ bias_term: true
300
+ }
301
+ }
302
+ layer {
303
+ name: "relu7"
304
+ type: "ReLU"
305
+ bottom: "batch_norm_blob7"
306
+ top: "relu_blob7"
307
+ }
308
+ layer {
309
+ name: "conv8"
310
+ type: "Convolution"
311
+ bottom: "relu_blob7"
312
+ top: "conv_blob8"
313
+ convolution_param {
314
+ num_output: 32
315
+ bias_term: false
316
+ pad: 1
317
+ kernel_size: 3
318
+ group: 32
319
+ stride: 2
320
+ weight_filler {
321
+ type: "xavier"
322
+ }
323
+ dilation: 1
324
+ }
325
+ }
326
+ layer {
327
+ name: "batch_norm8"
328
+ type: "BatchNorm"
329
+ bottom: "conv_blob8"
330
+ top: "batch_norm_blob8"
331
+ batch_norm_param {
332
+ use_global_stats: true
333
+ eps: 9.9999997e-06
334
+ }
335
+ }
336
+ layer {
337
+ name: "bn_scale8"
338
+ type: "Scale"
339
+ bottom: "batch_norm_blob8"
340
+ top: "batch_norm_blob8"
341
+ scale_param {
342
+ bias_term: true
343
+ }
344
+ }
345
+ layer {
346
+ name: "relu8"
347
+ type: "ReLU"
348
+ bottom: "batch_norm_blob8"
349
+ top: "relu_blob8"
350
+ }
351
+ layer {
352
+ name: "conv9"
353
+ type: "Convolution"
354
+ bottom: "relu_blob8"
355
+ top: "conv_blob9"
356
+ convolution_param {
357
+ num_output: 64
358
+ bias_term: false
359
+ pad: 0
360
+ kernel_size: 1
361
+ group: 1
362
+ stride: 1
363
+ weight_filler {
364
+ type: "xavier"
365
+ }
366
+ dilation: 1
367
+ }
368
+ }
369
+ layer {
370
+ name: "batch_norm9"
371
+ type: "BatchNorm"
372
+ bottom: "conv_blob9"
373
+ top: "batch_norm_blob9"
374
+ batch_norm_param {
375
+ use_global_stats: true
376
+ eps: 9.9999997e-06
377
+ }
378
+ }
379
+ layer {
380
+ name: "bn_scale9"
381
+ type: "Scale"
382
+ bottom: "batch_norm_blob9"
383
+ top: "batch_norm_blob9"
384
+ scale_param {
385
+ bias_term: true
386
+ }
387
+ }
388
+ layer {
389
+ name: "relu9"
390
+ type: "ReLU"
391
+ bottom: "batch_norm_blob9"
392
+ top: "relu_blob9"
393
+ }
394
+ layer {
395
+ name: "conv10"
396
+ type: "Convolution"
397
+ bottom: "relu_blob9"
398
+ top: "conv_blob10"
399
+ convolution_param {
400
+ num_output: 64
401
+ bias_term: false
402
+ pad: 1
403
+ kernel_size: 3
404
+ group: 64
405
+ stride: 1
406
+ weight_filler {
407
+ type: "xavier"
408
+ }
409
+ dilation: 1
410
+ }
411
+ }
412
+ layer {
413
+ name: "batch_norm10"
414
+ type: "BatchNorm"
415
+ bottom: "conv_blob10"
416
+ top: "batch_norm_blob10"
417
+ batch_norm_param {
418
+ use_global_stats: true
419
+ eps: 9.9999997e-06
420
+ }
421
+ }
422
+ layer {
423
+ name: "bn_scale10"
424
+ type: "Scale"
425
+ bottom: "batch_norm_blob10"
426
+ top: "batch_norm_blob10"
427
+ scale_param {
428
+ bias_term: true
429
+ }
430
+ }
431
+ layer {
432
+ name: "relu10"
433
+ type: "ReLU"
434
+ bottom: "batch_norm_blob10"
435
+ top: "relu_blob10"
436
+ }
437
+ layer {
438
+ name: "conv11"
439
+ type: "Convolution"
440
+ bottom: "relu_blob10"
441
+ top: "conv_blob11"
442
+ convolution_param {
443
+ num_output: 64
444
+ bias_term: false
445
+ pad: 0
446
+ kernel_size: 1
447
+ group: 1
448
+ stride: 1
449
+ weight_filler {
450
+ type: "xavier"
451
+ }
452
+ dilation: 1
453
+ }
454
+ }
455
+ layer {
456
+ name: "batch_norm11"
457
+ type: "BatchNorm"
458
+ bottom: "conv_blob11"
459
+ top: "batch_norm_blob11"
460
+ batch_norm_param {
461
+ use_global_stats: true
462
+ eps: 9.9999997e-06
463
+ }
464
+ }
465
+ layer {
466
+ name: "bn_scale11"
467
+ type: "Scale"
468
+ bottom: "batch_norm_blob11"
469
+ top: "batch_norm_blob11"
470
+ scale_param {
471
+ bias_term: true
472
+ }
473
+ }
474
+ layer {
475
+ name: "relu11"
476
+ type: "ReLU"
477
+ bottom: "batch_norm_blob11"
478
+ top: "relu_blob11"
479
+ }
480
+ layer {
481
+ name: "conv12"
482
+ type: "Convolution"
483
+ bottom: "relu_blob11"
484
+ top: "conv_blob12"
485
+ convolution_param {
486
+ num_output: 64
487
+ bias_term: false
488
+ pad: 1
489
+ kernel_size: 3
490
+ group: 64
491
+ stride: 2
492
+ weight_filler {
493
+ type: "xavier"
494
+ }
495
+ dilation: 1
496
+ }
497
+ }
498
+ layer {
499
+ name: "batch_norm12"
500
+ type: "BatchNorm"
501
+ bottom: "conv_blob12"
502
+ top: "batch_norm_blob12"
503
+ batch_norm_param {
504
+ use_global_stats: true
505
+ eps: 9.9999997e-06
506
+ }
507
+ }
508
+ layer {
509
+ name: "bn_scale12"
510
+ type: "Scale"
511
+ bottom: "batch_norm_blob12"
512
+ top: "batch_norm_blob12"
513
+ scale_param {
514
+ bias_term: true
515
+ }
516
+ }
517
+ layer {
518
+ name: "relu12"
519
+ type: "ReLU"
520
+ bottom: "batch_norm_blob12"
521
+ top: "relu_blob12"
522
+ }
523
+ layer {
524
+ name: "conv13"
525
+ type: "Convolution"
526
+ bottom: "relu_blob12"
527
+ top: "conv_blob13"
528
+ convolution_param {
529
+ num_output: 128
530
+ bias_term: false
531
+ pad: 0
532
+ kernel_size: 1
533
+ group: 1
534
+ stride: 1
535
+ weight_filler {
536
+ type: "xavier"
537
+ }
538
+ dilation: 1
539
+ }
540
+ }
541
+ layer {
542
+ name: "batch_norm13"
543
+ type: "BatchNorm"
544
+ bottom: "conv_blob13"
545
+ top: "batch_norm_blob13"
546
+ batch_norm_param {
547
+ use_global_stats: true
548
+ eps: 9.9999997e-06
549
+ }
550
+ }
551
+ layer {
552
+ name: "bn_scale13"
553
+ type: "Scale"
554
+ bottom: "batch_norm_blob13"
555
+ top: "batch_norm_blob13"
556
+ scale_param {
557
+ bias_term: true
558
+ }
559
+ }
560
+ layer {
561
+ name: "relu13"
562
+ type: "ReLU"
563
+ bottom: "batch_norm_blob13"
564
+ top: "relu_blob13"
565
+ }
566
+ layer {
567
+ name: "conv14"
568
+ type: "Convolution"
569
+ bottom: "relu_blob13"
570
+ top: "conv_blob14"
571
+ convolution_param {
572
+ num_output: 128
573
+ bias_term: false
574
+ pad: 1
575
+ kernel_size: 3
576
+ group: 128
577
+ stride: 1
578
+ weight_filler {
579
+ type: "xavier"
580
+ }
581
+ dilation: 1
582
+ }
583
+ }
584
+ layer {
585
+ name: "batch_norm14"
586
+ type: "BatchNorm"
587
+ bottom: "conv_blob14"
588
+ top: "batch_norm_blob14"
589
+ batch_norm_param {
590
+ use_global_stats: true
591
+ eps: 9.9999997e-06
592
+ }
593
+ }
594
+ layer {
595
+ name: "bn_scale14"
596
+ type: "Scale"
597
+ bottom: "batch_norm_blob14"
598
+ top: "batch_norm_blob14"
599
+ scale_param {
600
+ bias_term: true
601
+ }
602
+ }
603
+ layer {
604
+ name: "relu14"
605
+ type: "ReLU"
606
+ bottom: "batch_norm_blob14"
607
+ top: "relu_blob14"
608
+ }
609
+ layer {
610
+ name: "conv15"
611
+ type: "Convolution"
612
+ bottom: "relu_blob14"
613
+ top: "conv_blob15"
614
+ convolution_param {
615
+ num_output: 128
616
+ bias_term: false
617
+ pad: 0
618
+ kernel_size: 1
619
+ group: 1
620
+ stride: 1
621
+ weight_filler {
622
+ type: "xavier"
623
+ }
624
+ dilation: 1
625
+ }
626
+ }
627
+ layer {
628
+ name: "batch_norm15"
629
+ type: "BatchNorm"
630
+ bottom: "conv_blob15"
631
+ top: "batch_norm_blob15"
632
+ batch_norm_param {
633
+ use_global_stats: true
634
+ eps: 9.9999997e-06
635
+ }
636
+ }
637
+ layer {
638
+ name: "bn_scale15"
639
+ type: "Scale"
640
+ bottom: "batch_norm_blob15"
641
+ top: "batch_norm_blob15"
642
+ scale_param {
643
+ bias_term: true
644
+ }
645
+ }
646
+ layer {
647
+ name: "relu15"
648
+ type: "ReLU"
649
+ bottom: "batch_norm_blob15"
650
+ top: "relu_blob15"
651
+ }
652
+ layer {
653
+ name: "conv16"
654
+ type: "Convolution"
655
+ bottom: "relu_blob15"
656
+ top: "conv_blob16"
657
+ convolution_param {
658
+ num_output: 128
659
+ bias_term: false
660
+ pad: 1
661
+ kernel_size: 3
662
+ group: 128
663
+ stride: 1
664
+ weight_filler {
665
+ type: "xavier"
666
+ }
667
+ dilation: 1
668
+ }
669
+ }
670
+ layer {
671
+ name: "batch_norm16"
672
+ type: "BatchNorm"
673
+ bottom: "conv_blob16"
674
+ top: "batch_norm_blob16"
675
+ batch_norm_param {
676
+ use_global_stats: true
677
+ eps: 9.9999997e-06
678
+ }
679
+ }
680
+ layer {
681
+ name: "bn_scale16"
682
+ type: "Scale"
683
+ bottom: "batch_norm_blob16"
684
+ top: "batch_norm_blob16"
685
+ scale_param {
686
+ bias_term: true
687
+ }
688
+ }
689
+ layer {
690
+ name: "relu16"
691
+ type: "ReLU"
692
+ bottom: "batch_norm_blob16"
693
+ top: "relu_blob16"
694
+ }
695
+ layer {
696
+ name: "conv17"
697
+ type: "Convolution"
698
+ bottom: "relu_blob16"
699
+ top: "conv_blob17"
700
+ convolution_param {
701
+ num_output: 128
702
+ bias_term: false
703
+ pad: 0
704
+ kernel_size: 1
705
+ group: 1
706
+ stride: 1
707
+ weight_filler {
708
+ type: "xavier"
709
+ }
710
+ dilation: 1
711
+ }
712
+ }
713
+ layer {
714
+ name: "batch_norm17"
715
+ type: "BatchNorm"
716
+ bottom: "conv_blob17"
717
+ top: "batch_norm_blob17"
718
+ batch_norm_param {
719
+ use_global_stats: true
720
+ eps: 9.9999997e-06
721
+ }
722
+ }
723
+ layer {
724
+ name: "bn_scale17"
725
+ type: "Scale"
726
+ bottom: "batch_norm_blob17"
727
+ top: "batch_norm_blob17"
728
+ scale_param {
729
+ bias_term: true
730
+ }
731
+ }
732
+ layer {
733
+ name: "relu17"
734
+ type: "ReLU"
735
+ bottom: "batch_norm_blob17"
736
+ top: "relu_blob17"
737
+ }
738
+ layer {
739
+ name: "conv18"
740
+ type: "Convolution"
741
+ bottom: "relu_blob17"
742
+ top: "conv_blob18"
743
+ convolution_param {
744
+ num_output: 128
745
+ bias_term: false
746
+ pad: 1
747
+ kernel_size: 3
748
+ group: 128
749
+ stride: 1
750
+ weight_filler {
751
+ type: "xavier"
752
+ }
753
+ dilation: 1
754
+ }
755
+ }
756
+ layer {
757
+ name: "batch_norm18"
758
+ type: "BatchNorm"
759
+ bottom: "conv_blob18"
760
+ top: "batch_norm_blob18"
761
+ batch_norm_param {
762
+ use_global_stats: true
763
+ eps: 9.9999997e-06
764
+ }
765
+ }
766
+ layer {
767
+ name: "bn_scale18"
768
+ type: "Scale"
769
+ bottom: "batch_norm_blob18"
770
+ top: "batch_norm_blob18"
771
+ scale_param {
772
+ bias_term: true
773
+ }
774
+ }
775
+ layer {
776
+ name: "relu18"
777
+ type: "ReLU"
778
+ bottom: "batch_norm_blob18"
779
+ top: "relu_blob18"
780
+ }
781
+ layer {
782
+ name: "conv19"
783
+ type: "Convolution"
784
+ bottom: "relu_blob18"
785
+ top: "conv_blob19"
786
+ convolution_param {
787
+ num_output: 128
788
+ bias_term: false
789
+ pad: 0
790
+ kernel_size: 1
791
+ group: 1
792
+ stride: 1
793
+ weight_filler {
794
+ type: "xavier"
795
+ }
796
+ dilation: 1
797
+ }
798
+ }
799
+ layer {
800
+ name: "batch_norm19"
801
+ type: "BatchNorm"
802
+ bottom: "conv_blob19"
803
+ top: "batch_norm_blob19"
804
+ batch_norm_param {
805
+ use_global_stats: true
806
+ eps: 9.9999997e-06
807
+ }
808
+ }
809
+ layer {
810
+ name: "bn_scale19"
811
+ type: "Scale"
812
+ bottom: "batch_norm_blob19"
813
+ top: "batch_norm_blob19"
814
+ scale_param {
815
+ bias_term: true
816
+ }
817
+ }
818
+ layer {
819
+ name: "relu19"
820
+ type: "ReLU"
821
+ bottom: "batch_norm_blob19"
822
+ top: "relu_blob19"
823
+ }
824
+ layer {
825
+ name: "conv20"
826
+ type: "Convolution"
827
+ bottom: "relu_blob19"
828
+ top: "conv_blob20"
829
+ convolution_param {
830
+ num_output: 128
831
+ bias_term: false
832
+ pad: 1
833
+ kernel_size: 3
834
+ group: 128
835
+ stride: 1
836
+ weight_filler {
837
+ type: "xavier"
838
+ }
839
+ dilation: 1
840
+ }
841
+ }
842
+ layer {
843
+ name: "batch_norm20"
844
+ type: "BatchNorm"
845
+ bottom: "conv_blob20"
846
+ top: "batch_norm_blob20"
847
+ batch_norm_param {
848
+ use_global_stats: true
849
+ eps: 9.9999997e-06
850
+ }
851
+ }
852
+ layer {
853
+ name: "bn_scale20"
854
+ type: "Scale"
855
+ bottom: "batch_norm_blob20"
856
+ top: "batch_norm_blob20"
857
+ scale_param {
858
+ bias_term: true
859
+ }
860
+ }
861
+ layer {
862
+ name: "relu20"
863
+ type: "ReLU"
864
+ bottom: "batch_norm_blob20"
865
+ top: "relu_blob20"
866
+ }
867
+ layer {
868
+ name: "conv21"
869
+ type: "Convolution"
870
+ bottom: "relu_blob20"
871
+ top: "conv_blob21"
872
+ convolution_param {
873
+ num_output: 128
874
+ bias_term: false
875
+ pad: 0
876
+ kernel_size: 1
877
+ group: 1
878
+ stride: 1
879
+ weight_filler {
880
+ type: "xavier"
881
+ }
882
+ dilation: 1
883
+ }
884
+ }
885
+ layer {
886
+ name: "batch_norm21"
887
+ type: "BatchNorm"
888
+ bottom: "conv_blob21"
889
+ top: "batch_norm_blob21"
890
+ batch_norm_param {
891
+ use_global_stats: true
892
+ eps: 9.9999997e-06
893
+ }
894
+ }
895
+ layer {
896
+ name: "bn_scale21"
897
+ type: "Scale"
898
+ bottom: "batch_norm_blob21"
899
+ top: "batch_norm_blob21"
900
+ scale_param {
901
+ bias_term: true
902
+ }
903
+ }
904
+ layer {
905
+ name: "relu21"
906
+ type: "ReLU"
907
+ bottom: "batch_norm_blob21"
908
+ top: "relu_blob21"
909
+ }
910
+ layer {
911
+ name: "conv22"
912
+ type: "Convolution"
913
+ bottom: "relu_blob21"
914
+ top: "conv_blob22"
915
+ convolution_param {
916
+ num_output: 128
917
+ bias_term: false
918
+ pad: 1
919
+ kernel_size: 3
920
+ group: 128
921
+ stride: 1
922
+ weight_filler {
923
+ type: "xavier"
924
+ }
925
+ dilation: 1
926
+ }
927
+ }
928
+ layer {
929
+ name: "batch_norm22"
930
+ type: "BatchNorm"
931
+ bottom: "conv_blob22"
932
+ top: "batch_norm_blob22"
933
+ batch_norm_param {
934
+ use_global_stats: true
935
+ eps: 9.9999997e-06
936
+ }
937
+ }
938
+ layer {
939
+ name: "bn_scale22"
940
+ type: "Scale"
941
+ bottom: "batch_norm_blob22"
942
+ top: "batch_norm_blob22"
943
+ scale_param {
944
+ bias_term: true
945
+ }
946
+ }
947
+ layer {
948
+ name: "relu22"
949
+ type: "ReLU"
950
+ bottom: "batch_norm_blob22"
951
+ top: "relu_blob22"
952
+ }
953
+ layer {
954
+ name: "conv23"
955
+ type: "Convolution"
956
+ bottom: "relu_blob22"
957
+ top: "conv_blob23"
958
+ convolution_param {
959
+ num_output: 128
960
+ bias_term: false
961
+ pad: 0
962
+ kernel_size: 1
963
+ group: 1
964
+ stride: 1
965
+ weight_filler {
966
+ type: "xavier"
967
+ }
968
+ dilation: 1
969
+ }
970
+ }
971
+ layer {
972
+ name: "batch_norm23"
973
+ type: "BatchNorm"
974
+ bottom: "conv_blob23"
975
+ top: "batch_norm_blob23"
976
+ batch_norm_param {
977
+ use_global_stats: true
978
+ eps: 9.9999997e-06
979
+ }
980
+ }
981
+ layer {
982
+ name: "bn_scale23"
983
+ type: "Scale"
984
+ bottom: "batch_norm_blob23"
985
+ top: "batch_norm_blob23"
986
+ scale_param {
987
+ bias_term: true
988
+ }
989
+ }
990
+ layer {
991
+ name: "relu23"
992
+ type: "ReLU"
993
+ bottom: "batch_norm_blob23"
994
+ top: "relu_blob23"
995
+ }
996
+ layer {
997
+ name: "conv24"
998
+ type: "Convolution"
999
+ bottom: "relu_blob23"
1000
+ top: "conv_blob24"
1001
+ convolution_param {
1002
+ num_output: 128
1003
+ bias_term: false
1004
+ pad: 1
1005
+ kernel_size: 3
1006
+ group: 128
1007
+ stride: 2
1008
+ weight_filler {
1009
+ type: "xavier"
1010
+ }
1011
+ dilation: 1
1012
+ }
1013
+ }
1014
+ layer {
1015
+ name: "batch_norm24"
1016
+ type: "BatchNorm"
1017
+ bottom: "conv_blob24"
1018
+ top: "batch_norm_blob24"
1019
+ batch_norm_param {
1020
+ use_global_stats: true
1021
+ eps: 9.9999997e-06
1022
+ }
1023
+ }
1024
+ layer {
1025
+ name: "bn_scale24"
1026
+ type: "Scale"
1027
+ bottom: "batch_norm_blob24"
1028
+ top: "batch_norm_blob24"
1029
+ scale_param {
1030
+ bias_term: true
1031
+ }
1032
+ }
1033
+ layer {
1034
+ name: "relu24"
1035
+ type: "ReLU"
1036
+ bottom: "batch_norm_blob24"
1037
+ top: "relu_blob24"
1038
+ }
1039
+ layer {
1040
+ name: "conv25"
1041
+ type: "Convolution"
1042
+ bottom: "relu_blob24"
1043
+ top: "conv_blob25"
1044
+ convolution_param {
1045
+ num_output: 256
1046
+ bias_term: false
1047
+ pad: 0
1048
+ kernel_size: 1
1049
+ group: 1
1050
+ stride: 1
1051
+ weight_filler {
1052
+ type: "xavier"
1053
+ }
1054
+ dilation: 1
1055
+ }
1056
+ }
1057
+ layer {
1058
+ name: "batch_norm25"
1059
+ type: "BatchNorm"
1060
+ bottom: "conv_blob25"
1061
+ top: "batch_norm_blob25"
1062
+ batch_norm_param {
1063
+ use_global_stats: true
1064
+ eps: 9.9999997e-06
1065
+ }
1066
+ }
1067
+ layer {
1068
+ name: "bn_scale25"
1069
+ type: "Scale"
1070
+ bottom: "batch_norm_blob25"
1071
+ top: "batch_norm_blob25"
1072
+ scale_param {
1073
+ bias_term: true
1074
+ }
1075
+ }
1076
+ layer {
1077
+ name: "relu25"
1078
+ type: "ReLU"
1079
+ bottom: "batch_norm_blob25"
1080
+ top: "relu_blob25"
1081
+ }
1082
+ layer {
1083
+ name: "conv26"
1084
+ type: "Convolution"
1085
+ bottom: "relu_blob25"
1086
+ top: "conv_blob26"
1087
+ convolution_param {
1088
+ num_output: 256
1089
+ bias_term: false
1090
+ pad: 1
1091
+ kernel_size: 3
1092
+ group: 256
1093
+ stride: 1
1094
+ weight_filler {
1095
+ type: "xavier"
1096
+ }
1097
+ dilation: 1
1098
+ }
1099
+ }
1100
+ layer {
1101
+ name: "batch_norm26"
1102
+ type: "BatchNorm"
1103
+ bottom: "conv_blob26"
1104
+ top: "batch_norm_blob26"
1105
+ batch_norm_param {
1106
+ use_global_stats: true
1107
+ eps: 9.9999997e-06
1108
+ }
1109
+ }
1110
+ layer {
1111
+ name: "bn_scale26"
1112
+ type: "Scale"
1113
+ bottom: "batch_norm_blob26"
1114
+ top: "batch_norm_blob26"
1115
+ scale_param {
1116
+ bias_term: true
1117
+ }
1118
+ }
1119
+ layer {
1120
+ name: "relu26"
1121
+ type: "ReLU"
1122
+ bottom: "batch_norm_blob26"
1123
+ top: "relu_blob26"
1124
+ }
1125
+ layer {
1126
+ name: "conv27"
1127
+ type: "Convolution"
1128
+ bottom: "relu_blob26"
1129
+ top: "conv_blob27"
1130
+ convolution_param {
1131
+ num_output: 256
1132
+ bias_term: false
1133
+ pad: 0
1134
+ kernel_size: 1
1135
+ group: 1
1136
+ stride: 1
1137
+ weight_filler {
1138
+ type: "xavier"
1139
+ }
1140
+ dilation: 1
1141
+ }
1142
+ }
1143
+ layer {
1144
+ name: "batch_norm27"
1145
+ type: "BatchNorm"
1146
+ bottom: "conv_blob27"
1147
+ top: "batch_norm_blob27"
1148
+ batch_norm_param {
1149
+ use_global_stats: true
1150
+ eps: 9.9999997e-06
1151
+ }
1152
+ }
1153
+ layer {
1154
+ name: "bn_scale27"
1155
+ type: "Scale"
1156
+ bottom: "batch_norm_blob27"
1157
+ top: "batch_norm_blob27"
1158
+ scale_param {
1159
+ bias_term: true
1160
+ }
1161
+ }
1162
+ layer {
1163
+ name: "relu27"
1164
+ type: "ReLU"
1165
+ bottom: "batch_norm_blob27"
1166
+ top: "relu_blob27"
1167
+ }
1168
+ layer {
1169
+ name: "conv28"
1170
+ type: "Convolution"
1171
+ bottom: "relu_blob11"
1172
+ top: "conv_blob28"
1173
+ convolution_param {
1174
+ num_output: 64
1175
+ bias_term: false
1176
+ pad: 0
1177
+ kernel_size: 1
1178
+ group: 1
1179
+ stride: 1
1180
+ weight_filler {
1181
+ type: "xavier"
1182
+ }
1183
+ dilation: 1
1184
+ }
1185
+ }
1186
+ layer {
1187
+ name: "batch_norm28"
1188
+ type: "BatchNorm"
1189
+ bottom: "conv_blob28"
1190
+ top: "batch_norm_blob28"
1191
+ batch_norm_param {
1192
+ use_global_stats: true
1193
+ eps: 9.9999997e-06
1194
+ }
1195
+ }
1196
+ layer {
1197
+ name: "bn_scale28"
1198
+ type: "Scale"
1199
+ bottom: "batch_norm_blob28"
1200
+ top: "batch_norm_blob28"
1201
+ scale_param {
1202
+ bias_term: true
1203
+ }
1204
+ }
1205
+ layer {
1206
+ name: "relu28"
1207
+ type: "ReLU"
1208
+ bottom: "batch_norm_blob28"
1209
+ top: "relu_blob28"
1210
+ }
1211
+ layer {
1212
+ name: "conv29"
1213
+ type: "Convolution"
1214
+ bottom: "relu_blob23"
1215
+ top: "conv_blob29"
1216
+ convolution_param {
1217
+ num_output: 64
1218
+ bias_term: false
1219
+ pad: 0
1220
+ kernel_size: 1
1221
+ group: 1
1222
+ stride: 1
1223
+ weight_filler {
1224
+ type: "xavier"
1225
+ }
1226
+ dilation: 1
1227
+ }
1228
+ }
1229
+ layer {
1230
+ name: "batch_norm29"
1231
+ type: "BatchNorm"
1232
+ bottom: "conv_blob29"
1233
+ top: "batch_norm_blob29"
1234
+ batch_norm_param {
1235
+ use_global_stats: true
1236
+ eps: 9.9999997e-06
1237
+ }
1238
+ }
1239
+ layer {
1240
+ name: "bn_scale29"
1241
+ type: "Scale"
1242
+ bottom: "batch_norm_blob29"
1243
+ top: "batch_norm_blob29"
1244
+ scale_param {
1245
+ bias_term: true
1246
+ }
1247
+ }
1248
+ layer {
1249
+ name: "relu29"
1250
+ type: "ReLU"
1251
+ bottom: "batch_norm_blob29"
1252
+ top: "relu_blob29"
1253
+ }
1254
+ layer {
1255
+ name: "conv30"
1256
+ type: "Convolution"
1257
+ bottom: "relu_blob27"
1258
+ top: "conv_blob30"
1259
+ convolution_param {
1260
+ num_output: 64
1261
+ bias_term: false
1262
+ pad: 0
1263
+ kernel_size: 1
1264
+ group: 1
1265
+ stride: 1
1266
+ weight_filler {
1267
+ type: "xavier"
1268
+ }
1269
+ dilation: 1
1270
+ }
1271
+ }
1272
+ layer {
1273
+ name: "batch_norm30"
1274
+ type: "BatchNorm"
1275
+ bottom: "conv_blob30"
1276
+ top: "batch_norm_blob30"
1277
+ batch_norm_param {
1278
+ use_global_stats: true
1279
+ eps: 9.9999997e-06
1280
+ }
1281
+ }
1282
+ layer {
1283
+ name: "bn_scale30"
1284
+ type: "Scale"
1285
+ bottom: "batch_norm_blob30"
1286
+ top: "batch_norm_blob30"
1287
+ scale_param {
1288
+ bias_term: true
1289
+ }
1290
+ }
1291
+ layer {
1292
+ name: "relu30"
1293
+ type: "ReLU"
1294
+ bottom: "batch_norm_blob30"
1295
+ top: "relu_blob30"
1296
+ }
1297
+ layer {
1298
+ name: "conv_transpose1"
1299
+ type: "Deconvolution"
1300
+ bottom: "relu_blob30"
1301
+ top: "conv_transpose_blob1"
1302
+ convolution_param {
1303
+ num_output: 64
1304
+ bias_term: true
1305
+ pad: 0
1306
+ kernel_size: 2
1307
+ group: 1
1308
+ stride: 2
1309
+ weight_filler {
1310
+ type: "xavier"
1311
+ }
1312
+ bias_filler {
1313
+ type: "constant"
1314
+ }
1315
+ dilation: 1
1316
+ }
1317
+ }
1318
+ layer {
1319
+ name: "crop1"
1320
+ type: "Crop"
1321
+ bottom: "conv_transpose_blob1"
1322
+ bottom: "relu_blob29"
1323
+ top: "crop1"
1324
+ }
1325
+ layer {
1326
+ name: "add1"
1327
+ type: "Eltwise"
1328
+ bottom: "relu_blob29"
1329
+ bottom: "crop1"
1330
+ top: "add_blob1"
1331
+ eltwise_param {
1332
+ operation: SUM
1333
+ }
1334
+ }
1335
+ layer {
1336
+ name: "conv31"
1337
+ type: "Convolution"
1338
+ bottom: "add_blob1"
1339
+ top: "conv_blob31"
1340
+ convolution_param {
1341
+ num_output: 64
1342
+ bias_term: false
1343
+ pad: 1
1344
+ kernel_size: 3
1345
+ group: 1
1346
+ stride: 1
1347
+ weight_filler {
1348
+ type: "xavier"
1349
+ }
1350
+ dilation: 1
1351
+ }
1352
+ }
1353
+ layer {
1354
+ name: "batch_norm31"
1355
+ type: "BatchNorm"
1356
+ bottom: "conv_blob31"
1357
+ top: "batch_norm_blob31"
1358
+ batch_norm_param {
1359
+ use_global_stats: true
1360
+ eps: 9.9999997e-06
1361
+ }
1362
+ }
1363
+ layer {
1364
+ name: "bn_scale31"
1365
+ type: "Scale"
1366
+ bottom: "batch_norm_blob31"
1367
+ top: "batch_norm_blob31"
1368
+ scale_param {
1369
+ bias_term: true
1370
+ }
1371
+ }
1372
+ layer {
1373
+ name: "relu31"
1374
+ type: "ReLU"
1375
+ bottom: "batch_norm_blob31"
1376
+ top: "relu_blob31"
1377
+ }
1378
+ layer {
1379
+ name: "conv_transpose2"
1380
+ type: "Deconvolution"
1381
+ bottom: "relu_blob31"
1382
+ top: "conv_transpose_blob2"
1383
+ convolution_param {
1384
+ num_output: 64
1385
+ bias_term: true
1386
+ pad: 0
1387
+ kernel_size: 2
1388
+ group: 1
1389
+ stride: 2
1390
+ weight_filler {
1391
+ type: "xavier"
1392
+ }
1393
+ bias_filler {
1394
+ type: "constant"
1395
+ }
1396
+ dilation: 1
1397
+ }
1398
+ }
1399
+ layer {
1400
+ name: "crop2"
1401
+ type: "Crop"
1402
+ bottom: "conv_transpose_blob2"
1403
+ bottom: "relu_blob28"
1404
+ top: "crop2"
1405
+ }
1406
+ layer {
1407
+ name: "add2"
1408
+ type: "Eltwise"
1409
+ bottom: "relu_blob28"
1410
+ bottom: "crop2"
1411
+ top: "add_blob2"
1412
+ eltwise_param {
1413
+ operation: SUM
1414
+ }
1415
+ }
1416
+ layer {
1417
+ name: "conv32"
1418
+ type: "Convolution"
1419
+ bottom: "add_blob2"
1420
+ top: "conv_blob32"
1421
+ convolution_param {
1422
+ num_output: 64
1423
+ bias_term: false
1424
+ pad: 1
1425
+ kernel_size: 3
1426
+ group: 1
1427
+ stride: 1
1428
+ weight_filler {
1429
+ type: "xavier"
1430
+ }
1431
+ dilation: 1
1432
+ }
1433
+ }
1434
+ layer {
1435
+ name: "batch_norm32"
1436
+ type: "BatchNorm"
1437
+ bottom: "conv_blob32"
1438
+ top: "batch_norm_blob32"
1439
+ batch_norm_param {
1440
+ use_global_stats: true
1441
+ eps: 9.9999997e-06
1442
+ }
1443
+ }
1444
+ layer {
1445
+ name: "bn_scale32"
1446
+ type: "Scale"
1447
+ bottom: "batch_norm_blob32"
1448
+ top: "batch_norm_blob32"
1449
+ scale_param {
1450
+ bias_term: true
1451
+ }
1452
+ }
1453
+ layer {
1454
+ name: "relu32"
1455
+ type: "ReLU"
1456
+ bottom: "batch_norm_blob32"
1457
+ top: "relu_blob32"
1458
+ }
1459
+ layer {
1460
+ name: "conv33"
1461
+ type: "Convolution"
1462
+ bottom: "relu_blob32"
1463
+ top: "conv_blob33"
1464
+ convolution_param {
1465
+ num_output: 32
1466
+ bias_term: false
1467
+ pad: 1
1468
+ kernel_size: 3
1469
+ group: 1
1470
+ stride: 1
1471
+ weight_filler {
1472
+ type: "xavier"
1473
+ }
1474
+ dilation: 1
1475
+ }
1476
+ }
1477
+ layer {
1478
+ name: "batch_norm33"
1479
+ type: "BatchNorm"
1480
+ bottom: "conv_blob33"
1481
+ top: "batch_norm_blob33"
1482
+ batch_norm_param {
1483
+ use_global_stats: true
1484
+ eps: 9.9999997e-06
1485
+ }
1486
+ }
1487
+ layer {
1488
+ name: "bn_scale33"
1489
+ type: "Scale"
1490
+ bottom: "batch_norm_blob33"
1491
+ top: "batch_norm_blob33"
1492
+ scale_param {
1493
+ bias_term: true
1494
+ }
1495
+ }
1496
+ layer {
1497
+ name: "conv34"
1498
+ type: "Convolution"
1499
+ bottom: "relu_blob32"
1500
+ top: "conv_blob34"
1501
+ convolution_param {
1502
+ num_output: 16
1503
+ bias_term: false
1504
+ pad: 1
1505
+ kernel_size: 3
1506
+ group: 1
1507
+ stride: 1
1508
+ weight_filler {
1509
+ type: "xavier"
1510
+ }
1511
+ dilation: 1
1512
+ }
1513
+ }
1514
+ layer {
1515
+ name: "batch_norm34"
1516
+ type: "BatchNorm"
1517
+ bottom: "conv_blob34"
1518
+ top: "batch_norm_blob34"
1519
+ batch_norm_param {
1520
+ use_global_stats: true
1521
+ eps: 9.9999997e-06
1522
+ }
1523
+ }
1524
+ layer {
1525
+ name: "bn_scale34"
1526
+ type: "Scale"
1527
+ bottom: "batch_norm_blob34"
1528
+ top: "batch_norm_blob34"
1529
+ scale_param {
1530
+ bias_term: true
1531
+ }
1532
+ }
1533
+ layer {
1534
+ name: "relu33"
1535
+ type: "ReLU"
1536
+ bottom: "batch_norm_blob34"
1537
+ top: "relu_blob33"
1538
+ }
1539
+ layer {
1540
+ name: "conv35"
1541
+ type: "Convolution"
1542
+ bottom: "relu_blob33"
1543
+ top: "conv_blob35"
1544
+ convolution_param {
1545
+ num_output: 16
1546
+ bias_term: false
1547
+ pad: 1
1548
+ kernel_size: 3
1549
+ group: 1
1550
+ stride: 1
1551
+ weight_filler {
1552
+ type: "xavier"
1553
+ }
1554
+ dilation: 1
1555
+ }
1556
+ }
1557
+ layer {
1558
+ name: "batch_norm35"
1559
+ type: "BatchNorm"
1560
+ bottom: "conv_blob35"
1561
+ top: "batch_norm_blob35"
1562
+ batch_norm_param {
1563
+ use_global_stats: true
1564
+ eps: 9.9999997e-06
1565
+ }
1566
+ }
1567
+ layer {
1568
+ name: "bn_scale35"
1569
+ type: "Scale"
1570
+ bottom: "batch_norm_blob35"
1571
+ top: "batch_norm_blob35"
1572
+ scale_param {
1573
+ bias_term: true
1574
+ }
1575
+ }
1576
+ layer {
1577
+ name: "conv36"
1578
+ type: "Convolution"
1579
+ bottom: "relu_blob33"
1580
+ top: "conv_blob36"
1581
+ convolution_param {
1582
+ num_output: 16
1583
+ bias_term: false
1584
+ pad: 1
1585
+ kernel_size: 3
1586
+ group: 1
1587
+ stride: 1
1588
+ weight_filler {
1589
+ type: "xavier"
1590
+ }
1591
+ dilation: 1
1592
+ }
1593
+ }
1594
+ layer {
1595
+ name: "batch_norm36"
1596
+ type: "BatchNorm"
1597
+ bottom: "conv_blob36"
1598
+ top: "batch_norm_blob36"
1599
+ batch_norm_param {
1600
+ use_global_stats: true
1601
+ eps: 9.9999997e-06
1602
+ }
1603
+ }
1604
+ layer {
1605
+ name: "bn_scale36"
1606
+ type: "Scale"
1607
+ bottom: "batch_norm_blob36"
1608
+ top: "batch_norm_blob36"
1609
+ scale_param {
1610
+ bias_term: true
1611
+ }
1612
+ }
1613
+ layer {
1614
+ name: "relu34"
1615
+ type: "ReLU"
1616
+ bottom: "batch_norm_blob36"
1617
+ top: "relu_blob34"
1618
+ }
1619
+ layer {
1620
+ name: "conv37"
1621
+ type: "Convolution"
1622
+ bottom: "relu_blob34"
1623
+ top: "conv_blob37"
1624
+ convolution_param {
1625
+ num_output: 16
1626
+ bias_term: false
1627
+ pad: 1
1628
+ kernel_size: 3
1629
+ group: 1
1630
+ stride: 1
1631
+ weight_filler {
1632
+ type: "xavier"
1633
+ }
1634
+ dilation: 1
1635
+ }
1636
+ }
1637
+ layer {
1638
+ name: "batch_norm37"
1639
+ type: "BatchNorm"
1640
+ bottom: "conv_blob37"
1641
+ top: "batch_norm_blob37"
1642
+ batch_norm_param {
1643
+ use_global_stats: true
1644
+ eps: 9.9999997e-06
1645
+ }
1646
+ }
1647
+ layer {
1648
+ name: "bn_scale37"
1649
+ type: "Scale"
1650
+ bottom: "batch_norm_blob37"
1651
+ top: "batch_norm_blob37"
1652
+ scale_param {
1653
+ bias_term: true
1654
+ }
1655
+ }
1656
+ layer {
1657
+ name: "cat1"
1658
+ type: "Concat"
1659
+ bottom: "batch_norm_blob33"
1660
+ bottom: "batch_norm_blob35"
1661
+ bottom: "batch_norm_blob37"
1662
+ top: "cat_blob1"
1663
+ concat_param {
1664
+ axis: 1
1665
+ }
1666
+ }
1667
+ layer {
1668
+ name: "relu35"
1669
+ type: "ReLU"
1670
+ bottom: "cat_blob1"
1671
+ top: "relu_blob35"
1672
+ }
1673
+ layer {
1674
+ name: "conv38"
1675
+ type: "Convolution"
1676
+ bottom: "relu_blob31"
1677
+ top: "conv_blob38"
1678
+ convolution_param {
1679
+ num_output: 32
1680
+ bias_term: false
1681
+ pad: 1
1682
+ kernel_size: 3
1683
+ group: 1
1684
+ stride: 1
1685
+ weight_filler {
1686
+ type: "xavier"
1687
+ }
1688
+ dilation: 1
1689
+ }
1690
+ }
1691
+ layer {
1692
+ name: "batch_norm38"
1693
+ type: "BatchNorm"
1694
+ bottom: "conv_blob38"
1695
+ top: "batch_norm_blob38"
1696
+ batch_norm_param {
1697
+ use_global_stats: true
1698
+ eps: 9.9999997e-06
1699
+ }
1700
+ }
1701
+ layer {
1702
+ name: "bn_scale38"
1703
+ type: "Scale"
1704
+ bottom: "batch_norm_blob38"
1705
+ top: "batch_norm_blob38"
1706
+ scale_param {
1707
+ bias_term: true
1708
+ }
1709
+ }
1710
+ layer {
1711
+ name: "conv39"
1712
+ type: "Convolution"
1713
+ bottom: "relu_blob31"
1714
+ top: "conv_blob39"
1715
+ convolution_param {
1716
+ num_output: 16
1717
+ bias_term: false
1718
+ pad: 1
1719
+ kernel_size: 3
1720
+ group: 1
1721
+ stride: 1
1722
+ weight_filler {
1723
+ type: "xavier"
1724
+ }
1725
+ dilation: 1
1726
+ }
1727
+ }
1728
+ layer {
1729
+ name: "batch_norm39"
1730
+ type: "BatchNorm"
1731
+ bottom: "conv_blob39"
1732
+ top: "batch_norm_blob39"
1733
+ batch_norm_param {
1734
+ use_global_stats: true
1735
+ eps: 9.9999997e-06
1736
+ }
1737
+ }
1738
+ layer {
1739
+ name: "bn_scale39"
1740
+ type: "Scale"
1741
+ bottom: "batch_norm_blob39"
1742
+ top: "batch_norm_blob39"
1743
+ scale_param {
1744
+ bias_term: true
1745
+ }
1746
+ }
1747
+ layer {
1748
+ name: "relu36"
1749
+ type: "ReLU"
1750
+ bottom: "batch_norm_blob39"
1751
+ top: "relu_blob36"
1752
+ }
1753
+ layer {
1754
+ name: "conv40"
1755
+ type: "Convolution"
1756
+ bottom: "relu_blob36"
1757
+ top: "conv_blob40"
1758
+ convolution_param {
1759
+ num_output: 16
1760
+ bias_term: false
1761
+ pad: 1
1762
+ kernel_size: 3
1763
+ group: 1
1764
+ stride: 1
1765
+ weight_filler {
1766
+ type: "xavier"
1767
+ }
1768
+ dilation: 1
1769
+ }
1770
+ }
1771
+ layer {
1772
+ name: "batch_norm40"
1773
+ type: "BatchNorm"
1774
+ bottom: "conv_blob40"
1775
+ top: "batch_norm_blob40"
1776
+ batch_norm_param {
1777
+ use_global_stats: true
1778
+ eps: 9.9999997e-06
1779
+ }
1780
+ }
1781
+ layer {
1782
+ name: "bn_scale40"
1783
+ type: "Scale"
1784
+ bottom: "batch_norm_blob40"
1785
+ top: "batch_norm_blob40"
1786
+ scale_param {
1787
+ bias_term: true
1788
+ }
1789
+ }
1790
+ layer {
1791
+ name: "conv41"
1792
+ type: "Convolution"
1793
+ bottom: "relu_blob36"
1794
+ top: "conv_blob41"
1795
+ convolution_param {
1796
+ num_output: 16
1797
+ bias_term: false
1798
+ pad: 1
1799
+ kernel_size: 3
1800
+ group: 1
1801
+ stride: 1
1802
+ weight_filler {
1803
+ type: "xavier"
1804
+ }
1805
+ dilation: 1
1806
+ }
1807
+ }
1808
+ layer {
1809
+ name: "batch_norm41"
1810
+ type: "BatchNorm"
1811
+ bottom: "conv_blob41"
1812
+ top: "batch_norm_blob41"
1813
+ batch_norm_param {
1814
+ use_global_stats: true
1815
+ eps: 9.9999997e-06
1816
+ }
1817
+ }
1818
+ layer {
1819
+ name: "bn_scale41"
1820
+ type: "Scale"
1821
+ bottom: "batch_norm_blob41"
1822
+ top: "batch_norm_blob41"
1823
+ scale_param {
1824
+ bias_term: true
1825
+ }
1826
+ }
1827
+ layer {
1828
+ name: "relu37"
1829
+ type: "ReLU"
1830
+ bottom: "batch_norm_blob41"
1831
+ top: "relu_blob37"
1832
+ }
1833
+ layer {
1834
+ name: "conv42"
1835
+ type: "Convolution"
1836
+ bottom: "relu_blob37"
1837
+ top: "conv_blob42"
1838
+ convolution_param {
1839
+ num_output: 16
1840
+ bias_term: false
1841
+ pad: 1
1842
+ kernel_size: 3
1843
+ group: 1
1844
+ stride: 1
1845
+ weight_filler {
1846
+ type: "xavier"
1847
+ }
1848
+ dilation: 1
1849
+ }
1850
+ }
1851
+ layer {
1852
+ name: "batch_norm42"
1853
+ type: "BatchNorm"
1854
+ bottom: "conv_blob42"
1855
+ top: "batch_norm_blob42"
1856
+ batch_norm_param {
1857
+ use_global_stats: true
1858
+ eps: 9.9999997e-06
1859
+ }
1860
+ }
1861
+ layer {
1862
+ name: "bn_scale42"
1863
+ type: "Scale"
1864
+ bottom: "batch_norm_blob42"
1865
+ top: "batch_norm_blob42"
1866
+ scale_param {
1867
+ bias_term: true
1868
+ }
1869
+ }
1870
+ layer {
1871
+ name: "cat2"
1872
+ type: "Concat"
1873
+ bottom: "batch_norm_blob38"
1874
+ bottom: "batch_norm_blob40"
1875
+ bottom: "batch_norm_blob42"
1876
+ top: "cat_blob2"
1877
+ concat_param {
1878
+ axis: 1
1879
+ }
1880
+ }
1881
+ layer {
1882
+ name: "relu38"
1883
+ type: "ReLU"
1884
+ bottom: "cat_blob2"
1885
+ top: "relu_blob38"
1886
+ }
1887
+ layer {
1888
+ name: "conv43"
1889
+ type: "Convolution"
1890
+ bottom: "relu_blob30"
1891
+ top: "conv_blob43"
1892
+ convolution_param {
1893
+ num_output: 32
1894
+ bias_term: false
1895
+ pad: 1
1896
+ kernel_size: 3
1897
+ group: 1
1898
+ stride: 1
1899
+ weight_filler {
1900
+ type: "xavier"
1901
+ }
1902
+ dilation: 1
1903
+ }
1904
+ }
1905
+ layer {
1906
+ name: "batch_norm43"
1907
+ type: "BatchNorm"
1908
+ bottom: "conv_blob43"
1909
+ top: "batch_norm_blob43"
1910
+ batch_norm_param {
1911
+ use_global_stats: true
1912
+ eps: 9.9999997e-06
1913
+ }
1914
+ }
1915
+ layer {
1916
+ name: "bn_scale43"
1917
+ type: "Scale"
1918
+ bottom: "batch_norm_blob43"
1919
+ top: "batch_norm_blob43"
1920
+ scale_param {
1921
+ bias_term: true
1922
+ }
1923
+ }
1924
+ layer {
1925
+ name: "conv44"
1926
+ type: "Convolution"
1927
+ bottom: "relu_blob30"
1928
+ top: "conv_blob44"
1929
+ convolution_param {
1930
+ num_output: 16
1931
+ bias_term: false
1932
+ pad: 1
1933
+ kernel_size: 3
1934
+ group: 1
1935
+ stride: 1
1936
+ weight_filler {
1937
+ type: "xavier"
1938
+ }
1939
+ dilation: 1
1940
+ }
1941
+ }
1942
+ layer {
1943
+ name: "batch_norm44"
1944
+ type: "BatchNorm"
1945
+ bottom: "conv_blob44"
1946
+ top: "batch_norm_blob44"
1947
+ batch_norm_param {
1948
+ use_global_stats: true
1949
+ eps: 9.9999997e-06
1950
+ }
1951
+ }
1952
+ layer {
1953
+ name: "bn_scale44"
1954
+ type: "Scale"
1955
+ bottom: "batch_norm_blob44"
1956
+ top: "batch_norm_blob44"
1957
+ scale_param {
1958
+ bias_term: true
1959
+ }
1960
+ }
1961
+ layer {
1962
+ name: "relu39"
1963
+ type: "ReLU"
1964
+ bottom: "batch_norm_blob44"
1965
+ top: "relu_blob39"
1966
+ }
1967
+ layer {
1968
+ name: "conv45"
1969
+ type: "Convolution"
1970
+ bottom: "relu_blob39"
1971
+ top: "conv_blob45"
1972
+ convolution_param {
1973
+ num_output: 16
1974
+ bias_term: false
1975
+ pad: 1
1976
+ kernel_size: 3
1977
+ group: 1
1978
+ stride: 1
1979
+ weight_filler {
1980
+ type: "xavier"
1981
+ }
1982
+ dilation: 1
1983
+ }
1984
+ }
1985
+ layer {
1986
+ name: "batch_norm45"
1987
+ type: "BatchNorm"
1988
+ bottom: "conv_blob45"
1989
+ top: "batch_norm_blob45"
1990
+ batch_norm_param {
1991
+ use_global_stats: true
1992
+ eps: 9.9999997e-06
1993
+ }
1994
+ }
1995
+ layer {
1996
+ name: "bn_scale45"
1997
+ type: "Scale"
1998
+ bottom: "batch_norm_blob45"
1999
+ top: "batch_norm_blob45"
2000
+ scale_param {
2001
+ bias_term: true
2002
+ }
2003
+ }
2004
+ layer {
2005
+ name: "conv46"
2006
+ type: "Convolution"
2007
+ bottom: "relu_blob39"
2008
+ top: "conv_blob46"
2009
+ convolution_param {
2010
+ num_output: 16
2011
+ bias_term: false
2012
+ pad: 1
2013
+ kernel_size: 3
2014
+ group: 1
2015
+ stride: 1
2016
+ weight_filler {
2017
+ type: "xavier"
2018
+ }
2019
+ dilation: 1
2020
+ }
2021
+ }
2022
+ layer {
2023
+ name: "batch_norm46"
2024
+ type: "BatchNorm"
2025
+ bottom: "conv_blob46"
2026
+ top: "batch_norm_blob46"
2027
+ batch_norm_param {
2028
+ use_global_stats: true
2029
+ eps: 9.9999997e-06
2030
+ }
2031
+ }
2032
+ layer {
2033
+ name: "bn_scale46"
2034
+ type: "Scale"
2035
+ bottom: "batch_norm_blob46"
2036
+ top: "batch_norm_blob46"
2037
+ scale_param {
2038
+ bias_term: true
2039
+ }
2040
+ }
2041
+ layer {
2042
+ name: "relu40"
2043
+ type: "ReLU"
2044
+ bottom: "batch_norm_blob46"
2045
+ top: "relu_blob40"
2046
+ }
2047
+ layer {
2048
+ name: "conv47"
2049
+ type: "Convolution"
2050
+ bottom: "relu_blob40"
2051
+ top: "conv_blob47"
2052
+ convolution_param {
2053
+ num_output: 16
2054
+ bias_term: false
2055
+ pad: 1
2056
+ kernel_size: 3
2057
+ group: 1
2058
+ stride: 1
2059
+ weight_filler {
2060
+ type: "xavier"
2061
+ }
2062
+ dilation: 1
2063
+ }
2064
+ }
2065
+ layer {
2066
+ name: "batch_norm47"
2067
+ type: "BatchNorm"
2068
+ bottom: "conv_blob47"
2069
+ top: "batch_norm_blob47"
2070
+ batch_norm_param {
2071
+ use_global_stats: true
2072
+ eps: 9.9999997e-06
2073
+ }
2074
+ }
2075
+ layer {
2076
+ name: "bn_scale47"
2077
+ type: "Scale"
2078
+ bottom: "batch_norm_blob47"
2079
+ top: "batch_norm_blob47"
2080
+ scale_param {
2081
+ bias_term: true
2082
+ }
2083
+ }
2084
+ layer {
2085
+ name: "cat3"
2086
+ type: "Concat"
2087
+ bottom: "batch_norm_blob43"
2088
+ bottom: "batch_norm_blob45"
2089
+ bottom: "batch_norm_blob47"
2090
+ top: "cat_blob3"
2091
+ concat_param {
2092
+ axis: 1
2093
+ }
2094
+ }
2095
+ layer {
2096
+ name: "relu41"
2097
+ type: "ReLU"
2098
+ bottom: "cat_blob3"
2099
+ top: "relu_blob41"
2100
+ }
2101
+ layer {
2102
+ name: "conv48"
2103
+ type: "Convolution"
2104
+ bottom: "relu_blob35"
2105
+ top: "conv_blob48"
2106
+ convolution_param {
2107
+ num_output: 8
2108
+ bias_term: true
2109
+ pad: 0
2110
+ kernel_size: 1
2111
+ group: 1
2112
+ stride: 1
2113
+ weight_filler {
2114
+ type: "xavier"
2115
+ }
2116
+ bias_filler {
2117
+ type: "constant"
2118
+ }
2119
+ dilation: 1
2120
+ }
2121
+ }
2122
+ layer {
2123
+ name: "conv49"
2124
+ type: "Convolution"
2125
+ bottom: "relu_blob35"
2126
+ top: "conv_blob49"
2127
+ convolution_param {
2128
+ num_output: 4
2129
+ bias_term: true
2130
+ pad: 0
2131
+ kernel_size: 1
2132
+ group: 1
2133
+ stride: 1
2134
+ weight_filler {
2135
+ type: "xavier"
2136
+ }
2137
+ bias_filler {
2138
+ type: "constant"
2139
+ }
2140
+ dilation: 1
2141
+ }
2142
+ }
2143
+ layer {
2144
+ name: "conv50"
2145
+ type: "Convolution"
2146
+ bottom: "relu_blob38"
2147
+ top: "conv_blob50"
2148
+ convolution_param {
2149
+ num_output: 8
2150
+ bias_term: true
2151
+ pad: 0
2152
+ kernel_size: 1
2153
+ group: 1
2154
+ stride: 1
2155
+ weight_filler {
2156
+ type: "xavier"
2157
+ }
2158
+ bias_filler {
2159
+ type: "constant"
2160
+ }
2161
+ dilation: 1
2162
+ }
2163
+ }
2164
+ layer {
2165
+ name: "conv51"
2166
+ type: "Convolution"
2167
+ bottom: "relu_blob38"
2168
+ top: "conv_blob51"
2169
+ convolution_param {
2170
+ num_output: 4
2171
+ bias_term: true
2172
+ pad: 0
2173
+ kernel_size: 1
2174
+ group: 1
2175
+ stride: 1
2176
+ weight_filler {
2177
+ type: "xavier"
2178
+ }
2179
+ bias_filler {
2180
+ type: "constant"
2181
+ }
2182
+ dilation: 1
2183
+ }
2184
+ }
2185
+ layer {
2186
+ name: "conv52"
2187
+ type: "Convolution"
2188
+ bottom: "relu_blob41"
2189
+ top: "conv_blob52"
2190
+ convolution_param {
2191
+ num_output: 8
2192
+ bias_term: true
2193
+ pad: 0
2194
+ kernel_size: 1
2195
+ group: 1
2196
+ stride: 1
2197
+ weight_filler {
2198
+ type: "xavier"
2199
+ }
2200
+ bias_filler {
2201
+ type: "constant"
2202
+ }
2203
+ dilation: 1
2204
+ }
2205
+ }
2206
+ layer {
2207
+ name: "conv53"
2208
+ type: "Convolution"
2209
+ bottom: "relu_blob41"
2210
+ top: "conv_blob53"
2211
+ convolution_param {
2212
+ num_output: 4
2213
+ bias_term: true
2214
+ pad: 0
2215
+ kernel_size: 1
2216
+ group: 1
2217
+ stride: 1
2218
+ weight_filler {
2219
+ type: "xavier"
2220
+ }
2221
+ bias_filler {
2222
+ type: "constant"
2223
+ }
2224
+ dilation: 1
2225
+ }
2226
+ }
2227
+ ############ prior box ###########
2228
+
2229
+ layer {
2230
+ name: "conv4_3_norm_mbox_loc_perm"
2231
+ type: "Permute"
2232
+ bottom: "conv_blob48"
2233
+ top: "conv4_3_norm_mbox_loc_perm"
2234
+ permute_param {
2235
+ order: 0
2236
+ order: 2
2237
+ order: 3
2238
+ order: 1
2239
+ }
2240
+ }
2241
+ layer {
2242
+ name: "conv4_3_norm_mbox_loc_flat"
2243
+ type: "Flatten"
2244
+ bottom: "conv4_3_norm_mbox_loc_perm"
2245
+ top: "conv4_3_norm_mbox_loc_flat"
2246
+ flatten_param {
2247
+ axis: 1
2248
+ }
2249
+ }
2250
+ layer {
2251
+ name: "conv4_3_norm_mbox_conf_perm"
2252
+ type: "Permute"
2253
+ bottom: "conv_blob49"
2254
+ top: "conv4_3_norm_mbox_conf_perm"
2255
+ permute_param {
2256
+ order: 0
2257
+ order: 2
2258
+ order: 3
2259
+ order: 1
2260
+ }
2261
+ }
2262
+ layer {
2263
+ name: "conv4_3_norm_mbox_conf_flat"
2264
+ type: "Flatten"
2265
+ bottom: "conv4_3_norm_mbox_conf_perm"
2266
+ top: "conv4_3_norm_mbox_conf_flat"
2267
+ flatten_param {
2268
+ axis: 1
2269
+ }
2270
+ }
2271
+ layer {
2272
+ name: "conv4_3_norm_mbox_priorbox"
2273
+ type: "PriorBox"
2274
+ bottom: "relu_blob35"
2275
+ bottom: "data"
2276
+ top: "conv4_3_norm_mbox_priorbox"
2277
+ prior_box_param {
2278
+ min_size: 16.0
2279
+ min_size: 32.0
2280
+ clip: false
2281
+ variance: 0.1
2282
+ variance: 0.1
2283
+ variance: 0.2
2284
+ variance: 0.2
2285
+ step: 8.0
2286
+ offset: 0.5
2287
+ }
2288
+ }
2289
+
2290
+ layer {
2291
+ name: "conv5_3_norm_mbox_loc_perm"
2292
+ type: "Permute"
2293
+ bottom: "conv_blob50"
2294
+ top: "conv5_3_norm_mbox_loc_perm"
2295
+ permute_param {
2296
+ order: 0
2297
+ order: 2
2298
+ order: 3
2299
+ order: 1
2300
+ }
2301
+ }
2302
+ layer {
2303
+ name: "conv5_3_norm_mbox_loc_flat"
2304
+ type: "Flatten"
2305
+ bottom: "conv5_3_norm_mbox_loc_perm"
2306
+ top: "conv5_3_norm_mbox_loc_flat"
2307
+ flatten_param {
2308
+ axis: 1
2309
+ }
2310
+ }
2311
+ layer {
2312
+ name: "conv5_3_norm_mbox_conf_perm"
2313
+ type: "Permute"
2314
+ bottom: "conv_blob51"
2315
+ top: "conv5_3_norm_mbox_conf_perm"
2316
+ permute_param {
2317
+ order: 0
2318
+ order: 2
2319
+ order: 3
2320
+ order: 1
2321
+ }
2322
+ }
2323
+ layer {
2324
+ name: "conv5_3_norm_mbox_conf_flat"
2325
+ type: "Flatten"
2326
+ bottom: "conv5_3_norm_mbox_conf_perm"
2327
+ top: "conv5_3_norm_mbox_conf_flat"
2328
+ flatten_param {
2329
+ axis: 1
2330
+ }
2331
+ }
2332
+ layer {
2333
+ name: "conv5_3_norm_mbox_priorbox"
2334
+ type: "PriorBox"
2335
+ bottom: "relu_blob38"
2336
+ bottom: "data"
2337
+ top: "conv5_3_norm_mbox_priorbox"
2338
+ prior_box_param {
2339
+ min_size: 64.0
2340
+ min_size: 128.0
2341
+ clip: false
2342
+ variance: 0.1
2343
+ variance: 0.1
2344
+ variance: 0.2
2345
+ variance: 0.2
2346
+ step: 16.0
2347
+ offset: 0.5
2348
+ }
2349
+ }
2350
+
2351
+ layer {
2352
+ name: "conv6_3_norm_mbox_loc_perm"
2353
+ type: "Permute"
2354
+ bottom: "conv_blob52"
2355
+ top: "conv6_3_norm_mbox_loc_perm"
2356
+ permute_param {
2357
+ order: 0
2358
+ order: 2
2359
+ order: 3
2360
+ order: 1
2361
+ }
2362
+ }
2363
+ layer {
2364
+ name: "conv6_3_norm_mbox_loc_flat"
2365
+ type: "Flatten"
2366
+ bottom: "conv6_3_norm_mbox_loc_perm"
2367
+ top: "conv6_3_norm_mbox_loc_flat"
2368
+ flatten_param {
2369
+ axis: 1
2370
+ }
2371
+ }
2372
+ layer {
2373
+ name: "conv6_3_norm_mbox_conf_perm"
2374
+ type: "Permute"
2375
+ bottom: "conv_blob53"
2376
+ top: "conv6_3_norm_mbox_conf_perm"
2377
+ permute_param {
2378
+ order: 0
2379
+ order: 2
2380
+ order: 3
2381
+ order: 1
2382
+ }
2383
+ }
2384
+ layer {
2385
+ name: "conv6_3_norm_mbox_conf_flat"
2386
+ type: "Flatten"
2387
+ bottom: "conv6_3_norm_mbox_conf_perm"
2388
+ top: "conv6_3_norm_mbox_conf_flat"
2389
+ flatten_param {
2390
+ axis: 1
2391
+ }
2392
+ }
2393
+ layer {
2394
+ name: "conv6_3_norm_mbox_priorbox"
2395
+ type: "PriorBox"
2396
+ bottom: "relu_blob41"
2397
+ bottom: "data"
2398
+ top: "conv6_3_norm_mbox_priorbox"
2399
+ prior_box_param {
2400
+ min_size: 256.0
2401
+ min_size: 512.0
2402
+ clip: false
2403
+ variance: 0.1
2404
+ variance: 0.1
2405
+ variance: 0.2
2406
+ variance: 0.2
2407
+ step: 32.0
2408
+ offset: 0.5
2409
+ }
2410
+ }
2411
+
2412
+ ########################################################
2413
+ layer {
2414
+ name: "mbox_loc"
2415
+ type: "Concat"
2416
+ bottom: "conv4_3_norm_mbox_loc_flat"
2417
+ bottom: "conv5_3_norm_mbox_loc_flat"
2418
+ bottom: "conv6_3_norm_mbox_loc_flat"
2419
+ top: "mbox_loc"
2420
+ concat_param {
2421
+ axis: 1
2422
+ }
2423
+ }
2424
+ layer {
2425
+ name: "mbox_conf"
2426
+ type: "Concat"
2427
+ bottom: "conv4_3_norm_mbox_conf_flat"
2428
+ bottom: "conv5_3_norm_mbox_conf_flat"
2429
+ bottom: "conv6_3_norm_mbox_conf_flat"
2430
+ top: "mbox_conf"
2431
+ concat_param {
2432
+ axis: 1
2433
+ }
2434
+ }
2435
+ layer {
2436
+ name: "mbox_priorbox"
2437
+ type: "Concat"
2438
+ bottom: "conv4_3_norm_mbox_priorbox"
2439
+ bottom: "conv5_3_norm_mbox_priorbox"
2440
+ bottom: "conv6_3_norm_mbox_priorbox"
2441
+ top: "mbox_priorbox"
2442
+ concat_param {
2443
+ axis: 2
2444
+ }
2445
+ }
2446
+ layer {
2447
+ name: "mbox_conf_reshape"
2448
+ type: "Reshape"
2449
+ bottom: "mbox_conf"
2450
+ top: "mbox_conf_reshape"
2451
+ reshape_param {
2452
+ shape {
2453
+ dim: 0
2454
+ dim: -1
2455
+ dim: 2
2456
+ }
2457
+ }
2458
+ }
2459
+ layer {
2460
+ name: "mbox_conf_softmax"
2461
+ type: "Softmax"
2462
+ bottom: "mbox_conf_reshape"
2463
+ top: "mbox_conf_softmax"
2464
+ softmax_param {
2465
+ axis: 2
2466
+ }
2467
+ }
2468
+ layer {
2469
+ name: "mbox_conf_flatten"
2470
+ type: "Flatten"
2471
+ bottom: "mbox_conf_softmax"
2472
+ top: "mbox_conf_flatten"
2473
+ flatten_param {
2474
+ axis: 1
2475
+ }
2476
+ }
2477
+ layer {
2478
+ name: "detection_out"
2479
+ type: "DetectionOutput"
2480
+ bottom: "mbox_loc"
2481
+ bottom: "mbox_conf_flatten"
2482
+ bottom: "mbox_priorbox"
2483
+ top: "detection_out"
2484
+ include {
2485
+ phase: TEST
2486
+ }
2487
+ detection_output_param {
2488
+ num_classes: 2
2489
+ share_location: true
2490
+ background_label_id: 0
2491
+ nms_param {
2492
+ nms_threshold: 0.3
2493
+ top_k: 400
2494
+ }
2495
+ code_type: CENTER_SIZE
2496
+ keep_top_k: 200
2497
+ confidence_threshold: 0.1
2498
+ }
2499
+ }
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/saved_logs/jobs/Anti_Spoofing_1_80x80/Jul08_12-51-18/events.out.tfevents.1594183888.old01 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3809972fe501c551c4e64f0bffbafc4e13b06ee318a6e1679246cf432cab5570
3
+ size 1645
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/anti_spoof_predict.cpython-310.pyc ADDED
Binary file (3.57 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/anti_spoof_predict.cpython-311.pyc ADDED
Binary file (7.06 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/generate_patches.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/generate_patches.cpython-311.pyc ADDED
Binary file (2.5 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/utility.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/__pycache__/utility.cpython-311.pyc ADDED
Binary file (2.39 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/anti_spoof_predict.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-9 上午10:20
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : anti_spoof_predict.py
6
+ # @Software : PyCharm
7
+
8
+ import os
9
+ import traceback
10
+ import cv2
11
+ import math
12
+ import torch
13
+ import numpy as np
14
+ import torch.nn.functional as F
15
+
16
+
17
+ from src.model_lib.MiniFASNet import MiniFASNetV1, MiniFASNetV2,MiniFASNetV1SE,MiniFASNetV2SE
18
+ from src.data_io import transform as trans
19
+ from src.utility import get_kernel, parse_model_name
20
+
21
+ MODEL_MAPPING = {
22
+ 'MiniFASNetV1': MiniFASNetV1,
23
+ 'MiniFASNetV2': MiniFASNetV2,
24
+ 'MiniFASNetV1SE':MiniFASNetV1SE,
25
+ 'MiniFASNetV2SE':MiniFASNetV2SE
26
+ }
27
+
28
+
29
+ class Detection:
30
+ def __init__(self):
31
+ stack = traceback.extract_stack()
32
+ dirname= os.path.dirname(stack[-2].filename)
33
+ caffemodel = os.path.join(dirname,'..','resources','detection_model','Widerface-RetinaFace.caffemodel')
34
+ deploy = os.path.join(dirname,'..','resources','detection_model','deploy.prototxt')
35
+ self.detector = cv2.dnn.readNetFromCaffe(deploy, caffemodel)
36
+ self.detector_confidence = 0.6
37
+
38
+ def get_bbox(self, img):
39
+ height, width = img.shape[0], img.shape[1]
40
+ aspect_ratio = width / height
41
+ if img.shape[1] * img.shape[0] >= 192 * 192:
42
+ img = cv2.resize(img,
43
+ (int(192 * math.sqrt(aspect_ratio)),
44
+ int(192 / math.sqrt(aspect_ratio))), interpolation=cv2.INTER_LINEAR)
45
+
46
+ blob = cv2.dnn.blobFromImage(img, 1, mean=(104, 117, 123))
47
+ self.detector.setInput(blob, 'data')
48
+ out = self.detector.forward('detection_out').squeeze()
49
+ max_conf_index = np.argmax(out[:, 2])
50
+ left, top, right, bottom = out[max_conf_index, 3]*width, out[max_conf_index, 4]*height, \
51
+ out[max_conf_index, 5]*width, out[max_conf_index, 6]*height
52
+ bbox = [int(left), int(top), int(right-left+1), int(bottom-top+1)]
53
+ return bbox
54
+
55
+
56
+ class AntiSpoofPredict(Detection):
57
+ def __init__(self, device_id):
58
+ super(AntiSpoofPredict, self).__init__()
59
+ self.device = torch.device("cuda:{}".format(device_id)
60
+ if torch.cuda.is_available() else "cpu")
61
+
62
+ def _load_model(self, model_path):
63
+ # define model
64
+ model_name = os.path.basename(model_path)
65
+ h_input, w_input, model_type, _ = parse_model_name(model_name)
66
+ self.kernel_size = get_kernel(h_input, w_input,)
67
+ self.model = MODEL_MAPPING[model_type](conv6_kernel=self.kernel_size).to(self.device)
68
+
69
+ # load model weight
70
+ state_dict = torch.load(model_path, map_location=self.device)
71
+ keys = iter(state_dict)
72
+ first_layer_name = keys.__next__()
73
+ if first_layer_name.find('module.') >= 0:
74
+ from collections import OrderedDict
75
+ new_state_dict = OrderedDict()
76
+ for key, value in state_dict.items():
77
+ name_key = key[7:]
78
+ new_state_dict[name_key] = value
79
+ self.model.load_state_dict(new_state_dict)
80
+ else:
81
+ self.model.load_state_dict(state_dict)
82
+ return None
83
+
84
+ def predict(self, img, model_path):
85
+ test_transform = trans.Compose([
86
+ trans.ToTensor(),
87
+ ])
88
+ img = test_transform(img)
89
+ img = img.unsqueeze(0).to(self.device)
90
+ self._load_model(model_path)
91
+ self.model.eval()
92
+ with torch.no_grad():
93
+ result = self.model.forward(img)
94
+ result = F.softmax(result).cpu().numpy()
95
+ return result
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+
106
+
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/functional.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/functional.cpython-311.pyc ADDED
Binary file (28.7 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/transform.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/__pycache__/transform.cpython-311.pyc ADDED
Binary file (17.7 kB). View file
 
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/dataset_folder.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-4 下午4:04
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : dataset_folder.py
6
+ # @Software : PyCharm
7
+
8
+ import cv2
9
+ import torch
10
+ from torchvision import datasets
11
+ import numpy as np
12
+
13
+
14
+ def opencv_loader(path):
15
+ img = cv2.imread(path)
16
+ return img
17
+
18
+
19
+ class DatasetFolderFT(datasets.ImageFolder):
20
+ def __init__(self, root, transform=None, target_transform=None,
21
+ ft_width=10, ft_height=10, loader=opencv_loader):
22
+ super(DatasetFolderFT, self).__init__(root, transform, target_transform, loader)
23
+ self.root = root
24
+ self.ft_width = ft_width
25
+ self.ft_height = ft_height
26
+
27
+ def __getitem__(self, index):
28
+ path, target = self.samples[index]
29
+ sample = self.loader(path)
30
+ # generate the FT picture of the sample
31
+ ft_sample = generate_FT(sample)
32
+ if sample is None:
33
+ print('image is None --> ', path)
34
+ if ft_sample is None:
35
+ print('FT image is None -->', path)
36
+ assert sample is not None
37
+
38
+ ft_sample = cv2.resize(ft_sample, (self.ft_width, self.ft_height))
39
+ ft_sample = torch.from_numpy(ft_sample).float()
40
+ ft_sample = torch.unsqueeze(ft_sample, 0)
41
+
42
+ if self.transform is not None:
43
+ try:
44
+ sample = self.transform(sample)
45
+ except Exception as err:
46
+ print('Error Occured: %s' % err, path)
47
+ if self.target_transform is not None:
48
+ target = self.target_transform(target)
49
+ return sample, ft_sample, target
50
+
51
+
52
+ def generate_FT(image):
53
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
54
+ f = np.fft.fft2(image)
55
+ fshift = np.fft.fftshift(f)
56
+ fimg = np.log(np.abs(fshift)+1)
57
+ maxx = -1
58
+ minn = 100000
59
+ for i in range(len(fimg)):
60
+ if maxx < max(fimg[i]):
61
+ maxx = max(fimg[i])
62
+ if minn > min(fimg[i]):
63
+ minn = min(fimg[i])
64
+ fimg = (fimg - minn+1) / (maxx - minn+1)
65
+ return fimg
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/dataset_loader.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-4 下午3:40
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : dataset_loader.py
6
+ # @Software : PyCharm
7
+
8
+ from torch.utils.data import DataLoader
9
+ from src.data_io.dataset_folder import DatasetFolderFT
10
+ from src.data_io import transform as trans
11
+
12
+
13
+ def get_train_loader(conf):
14
+ train_transform = trans.Compose([
15
+ trans.ToPILImage(),
16
+ trans.RandomResizedCrop(size=tuple(conf.input_size),
17
+ scale=(0.9, 1.1)),
18
+ trans.ColorJitter(brightness=0.4,
19
+ contrast=0.4, saturation=0.4, hue=0.1),
20
+ trans.RandomRotation(10),
21
+ trans.RandomHorizontalFlip(),
22
+ trans.ToTensor()
23
+ ])
24
+ root_path = '{}/{}'.format(conf.train_root_path, conf.patch_info)
25
+ trainset = DatasetFolderFT(root_path, train_transform,
26
+ None, conf.ft_width, conf.ft_height)
27
+ train_loader = DataLoader(
28
+ trainset,
29
+ batch_size=conf.batch_size,
30
+ shuffle=True,
31
+ pin_memory=True,
32
+ num_workers=16)
33
+ return train_loader
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/functional.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-4 下午6:18
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : functional.py
6
+ # @Software : PyCharm
7
+
8
+ from __future__ import division
9
+ import torch
10
+ from PIL import Image, ImageOps, ImageEnhance
11
+ try:
12
+ import accimage
13
+ except ImportError:
14
+ accimage = None
15
+ import numpy as np
16
+ import numbers
17
+ import types
18
+ import collections
19
+ import warnings
20
+
21
+
22
+ def _is_pil_image(img):
23
+ if accimage is not None:
24
+ return isinstance(img, (Image.Image, accimage.Image))
25
+ else:
26
+ return isinstance(img, Image.Image)
27
+
28
+
29
+ def _is_tensor_image(img):
30
+ return torch.is_tensor(img) and img.ndimension() == 3
31
+
32
+
33
+ def _is_numpy_image(img):
34
+ return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
35
+
36
+
37
+ def to_tensor(pic):
38
+ """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
39
+
40
+ See ``ToTensor`` for more details.
41
+
42
+ Args:
43
+ pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
44
+
45
+ Returns:
46
+ Tensor: Converted image.
47
+ """
48
+ if not(_is_pil_image(pic) or _is_numpy_image(pic)):
49
+ raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
50
+
51
+ if isinstance(pic, np.ndarray):
52
+ # handle numpy array
53
+ # IR image channel=1: modify by lzc --> 20190730
54
+ if pic.ndim == 2:
55
+ pic = pic.reshape((pic.shape[0], pic.shape[1], 1))
56
+
57
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
58
+ # backward compatibility
59
+ # return img.float().div(255) modify by zkx
60
+ return img.float()
61
+ if accimage is not None and isinstance(pic, accimage.Image):
62
+ nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
63
+ pic.copyto(nppic)
64
+ return torch.from_numpy(nppic)
65
+
66
+ # handle PIL Image
67
+ if pic.mode == 'I':
68
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
69
+ elif pic.mode == 'I;16':
70
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
71
+ else:
72
+ img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
73
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
74
+ if pic.mode == 'YCbCr':
75
+ nchannel = 3
76
+ elif pic.mode == 'I;16':
77
+ nchannel = 1
78
+ else:
79
+ nchannel = len(pic.mode)
80
+ img = img.view(pic.size[1], pic.size[0], nchannel)
81
+ # put it from HWC to CHW format
82
+ # yikes, this transpose takes 80% of the loading time/CPU
83
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
84
+ if isinstance(img, torch.ByteTensor):
85
+ # return img.float().div(255) #modified by zkx
86
+ return img.float()
87
+ else:
88
+ return img
89
+
90
+
91
+ def to_pil_image(pic, mode=None):
92
+ """Convert a tensor or an ndarray to PIL Image.
93
+
94
+ See :class:`~torchvision.transforms.ToPIlImage` for more details.
95
+
96
+ Args:
97
+ pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
98
+ mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
99
+
100
+ .. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
101
+
102
+ Returns:
103
+ PIL Image: Image converted to PIL Image.
104
+ """
105
+ if not(_is_numpy_image(pic) or _is_tensor_image(pic)):
106
+ raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
107
+
108
+ npimg = pic
109
+ if isinstance(pic, torch.FloatTensor):
110
+ pic = pic.mul(255).byte()
111
+ if torch.is_tensor(pic):
112
+ npimg = np.transpose(pic.numpy(), (1, 2, 0))
113
+
114
+ if not isinstance(npimg, np.ndarray):
115
+ raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
116
+ 'not {}'.format(type(npimg)))
117
+
118
+ if npimg.shape[2] == 1:
119
+ expected_mode = None
120
+ npimg = npimg[:, :, 0]
121
+ if npimg.dtype == np.uint8:
122
+ expected_mode = 'L'
123
+ if npimg.dtype == np.int16:
124
+ expected_mode = 'I;16'
125
+ if npimg.dtype == np.int32:
126
+ expected_mode = 'I'
127
+ elif npimg.dtype == np.float32:
128
+ expected_mode = 'F'
129
+ if mode is not None and mode != expected_mode:
130
+ raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
131
+ .format(mode, np.dtype, expected_mode))
132
+ mode = expected_mode
133
+
134
+ elif npimg.shape[2] == 4:
135
+ permitted_4_channel_modes = ['RGBA', 'CMYK']
136
+ if mode is not None and mode not in permitted_4_channel_modes:
137
+ raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
138
+
139
+ if mode is None and npimg.dtype == np.uint8:
140
+ mode = 'RGBA'
141
+ else:
142
+ permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
143
+ if mode is not None and mode not in permitted_3_channel_modes:
144
+ raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
145
+ if mode is None and npimg.dtype == np.uint8:
146
+ mode = 'RGB'
147
+
148
+ if mode is None:
149
+ raise TypeError('Input type {} is not supported'.format(npimg.dtype))
150
+
151
+ return Image.fromarray(npimg, mode=mode)
152
+
153
+
154
+ def normalize(tensor, mean, std):
155
+ """Normalize a tensor image with mean and standard deviation.
156
+
157
+ See ``Normalize`` for more details.
158
+
159
+ Args:
160
+ tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
161
+ mean (sequence): Sequence of means for each channel.
162
+ std (sequence): Sequence of standard deviations for each channely.
163
+
164
+ Returns:
165
+ Tensor: Normalized Tensor image.
166
+ """
167
+ if not _is_tensor_image(tensor):
168
+ raise TypeError('tensor is not a torch image.')
169
+
170
+ for t, m, s in zip(tensor, mean, std):
171
+ t.sub_(m).div_(s)
172
+ return tensor
173
+
174
+
175
+ def resize(img, size, interpolation=Image.BILINEAR):
176
+ """Resize the input PIL Image to the given size.
177
+
178
+ Args:
179
+ img (PIL Image): Image to be resized.
180
+ size (sequence or int): Desired output size. If size is a sequence like
181
+ (h, w), the output size will be matched to this. If size is an int,
182
+ the smaller edge of the image will be matched to this number maintaing
183
+ the aspect ratio. i.e, if height > width, then image will be rescaled to
184
+ (size * height / width, size)
185
+ interpolation (int, optional): Desired interpolation. Default is
186
+ ``PIL.Image.BILINEAR``
187
+
188
+ Returns:
189
+ PIL Image: Resized image.
190
+ """
191
+ if not _is_pil_image(img):
192
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
193
+ if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
194
+ raise TypeError('Got inappropriate size arg: {}'.format(size))
195
+
196
+ if isinstance(size, int):
197
+ w, h = img.size
198
+ if (w <= h and w == size) or (h <= w and h == size):
199
+ return img
200
+ if w < h:
201
+ ow = size
202
+ oh = int(size * h / w)
203
+ return img.resize((ow, oh), interpolation)
204
+ else:
205
+ oh = size
206
+ ow = int(size * w / h)
207
+ return img.resize((ow, oh), interpolation)
208
+ else:
209
+ return img.resize(size[::-1], interpolation)
210
+
211
+
212
+ def scale(*args, **kwargs):
213
+ warnings.warn("The use of the transforms.Scale transform is deprecated, " +
214
+ "please use transforms.Resize instead.")
215
+ return resize(*args, **kwargs)
216
+
217
+
218
+ def pad(img, padding, fill=0):
219
+ """Pad the given PIL Image on all sides with the given "pad" value.
220
+
221
+ Args:
222
+ img (PIL Image): Image to be padded.
223
+ padding (int or tuple): Padding on each border. If a single int is provided this
224
+ is used to pad all borders. If tuple of length 2 is provided this is the padding
225
+ on left/right and top/bottom respectively. If a tuple of length 4 is provided
226
+ this is the padding for the left, top, right and bottom borders
227
+ respectively.
228
+ fill: Pixel fill value. Default is 0. If a tuple of
229
+ length 3, it is used to fill R, G, B channels respectively.
230
+
231
+ Returns:
232
+ PIL Image: Padded image.
233
+ """
234
+ if not _is_pil_image(img):
235
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
236
+
237
+ if not isinstance(padding, (numbers.Number, tuple)):
238
+ raise TypeError('Got inappropriate padding arg')
239
+ if not isinstance(fill, (numbers.Number, str, tuple)):
240
+ raise TypeError('Got inappropriate fill arg')
241
+
242
+ if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
243
+ raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
244
+ "{} element tuple".format(len(padding)))
245
+
246
+ return ImageOps.expand(img, border=padding, fill=fill)
247
+
248
+
249
+ def crop(img, i, j, h, w):
250
+ """Crop the given PIL Image.
251
+
252
+ Args:
253
+ img (PIL Image): Image to be cropped.
254
+ i: Upper pixel coordinate.
255
+ j: Left pixel coordinate.
256
+ h: Height of the cropped image.
257
+ w: Width of the cropped image.
258
+
259
+ Returns:
260
+ PIL Image: Cropped image.
261
+ """
262
+ if not _is_pil_image(img):
263
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
264
+
265
+ return img.crop((j, i, j + w, i + h))
266
+
267
+
268
+ def center_crop(img, output_size):
269
+ if isinstance(output_size, numbers.Number):
270
+ output_size = (int(output_size), int(output_size))
271
+ w, h = img.size
272
+ th, tw = output_size
273
+ i = int(round((h - th) / 2.))
274
+ j = int(round((w - tw) / 2.))
275
+ return crop(img, i, j, th, tw)
276
+
277
+
278
+ def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
279
+ """Crop the given PIL Image and resize it to desired size.
280
+
281
+ Notably used in RandomResizedCrop.
282
+
283
+ Args:
284
+ img (PIL Image): Image to be cropped.
285
+ i: Upper pixel coordinate.
286
+ j: Left pixel coordinate.
287
+ h: Height of the cropped image.
288
+ w: Width of the cropped image.
289
+ size (sequence or int): Desired output size. Same semantics as ``scale``.
290
+ interpolation (int, optional): Desired interpolation. Default is
291
+ ``PIL.Image.BILINEAR``.
292
+ Returns:
293
+ PIL Image: Cropped image.
294
+ """
295
+ assert _is_pil_image(img), 'img should be PIL Image'
296
+ img = crop(img, i, j, h, w)
297
+ img = resize(img, size, interpolation)
298
+ return img
299
+
300
+
301
+ def hflip(img):
302
+ """Horizontally flip the given PIL Image.
303
+
304
+ Args:
305
+ img (PIL Image): Image to be flipped.
306
+
307
+ Returns:
308
+ PIL Image: Horizontall flipped image.
309
+ """
310
+ if not _is_pil_image(img):
311
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
312
+
313
+ return img.transpose(Image.FLIP_LEFT_RIGHT)
314
+
315
+
316
+ def vflip(img):
317
+ """Vertically flip the given PIL Image.
318
+
319
+ Args:
320
+ img (PIL Image): Image to be flipped.
321
+
322
+ Returns:
323
+ PIL Image: Vertically flipped image.
324
+ """
325
+ if not _is_pil_image(img):
326
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
327
+
328
+ return img.transpose(Image.FLIP_TOP_BOTTOM)
329
+
330
+
331
+ def five_crop(img, size):
332
+ """Crop the given PIL Image into four corners and the central crop.
333
+
334
+ .. Note::
335
+ This transform returns a tuple of images and there may be a
336
+ mismatch in the number of inputs and targets your ``Dataset`` returns.
337
+
338
+ Args:
339
+ size (sequence or int): Desired output size of the crop. If size is an
340
+ int instead of sequence like (h, w), a square crop (size, size) is
341
+ made.
342
+ Returns:
343
+ tuple: tuple (tl, tr, bl, br, center) corresponding top left,
344
+ top right, bottom left, bottom right and center crop.
345
+ """
346
+ if isinstance(size, numbers.Number):
347
+ size = (int(size), int(size))
348
+ else:
349
+ assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
350
+
351
+ w, h = img.size
352
+ crop_h, crop_w = size
353
+ if crop_w > w or crop_h > h:
354
+ raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
355
+ (h, w)))
356
+ tl = img.crop((0, 0, crop_w, crop_h))
357
+ tr = img.crop((w - crop_w, 0, w, crop_h))
358
+ bl = img.crop((0, h - crop_h, crop_w, h))
359
+ br = img.crop((w - crop_w, h - crop_h, w, h))
360
+ center = center_crop(img, (crop_h, crop_w))
361
+ return (tl, tr, bl, br, center)
362
+
363
+
364
+ def ten_crop(img, size, vertical_flip=False):
365
+ """Crop the given PIL Image into four corners and the central crop plus the
366
+ flipped version of these (horizontal flipping is used by default).
367
+
368
+ .. Note::
369
+ This transform returns a tuple of images and there may be a
370
+ mismatch in the number of inputs and targets your ``Dataset`` returns.
371
+
372
+ Args:
373
+ size (sequence or int): Desired output size of the crop. If size is an
374
+ int instead of sequence like (h, w), a square crop (size, size) is
375
+ made.
376
+ vertical_flip (bool): Use vertical flipping instead of horizontal
377
+
378
+ Returns:
379
+ tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip,
380
+ br_flip, center_flip) corresponding top left, top right,
381
+ bottom left, bottom right and center crop and same for the
382
+ flipped image.
383
+ """
384
+ if isinstance(size, numbers.Number):
385
+ size = (int(size), int(size))
386
+ else:
387
+ assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
388
+
389
+ first_five = five_crop(img, size)
390
+
391
+ if vertical_flip:
392
+ img = vflip(img)
393
+ else:
394
+ img = hflip(img)
395
+
396
+ second_five = five_crop(img, size)
397
+ return first_five + second_five
398
+
399
+
400
+ def adjust_brightness(img, brightness_factor):
401
+ """Adjust brightness of an Image.
402
+
403
+ Args:
404
+ img (PIL Image): PIL Image to be adjusted.
405
+ brightness_factor (float): How much to adjust the brightness. Can be
406
+ any non negative number. 0 gives a black image, 1 gives the
407
+ original image while 2 increases the brightness by a factor of 2.
408
+
409
+ Returns:
410
+ PIL Image: Brightness adjusted image.
411
+ """
412
+ if not _is_pil_image(img):
413
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
414
+
415
+ enhancer = ImageEnhance.Brightness(img)
416
+ img = enhancer.enhance(brightness_factor)
417
+ return img
418
+
419
+
420
+ def adjust_contrast(img, contrast_factor):
421
+ """Adjust contrast of an Image.
422
+
423
+ Args:
424
+ img (PIL Image): PIL Image to be adjusted.
425
+ contrast_factor (float): How much to adjust the contrast. Can be any
426
+ non negative number. 0 gives a solid gray image, 1 gives the
427
+ original image while 2 increases the contrast by a factor of 2.
428
+
429
+ Returns:
430
+ PIL Image: Contrast adjusted image.
431
+ """
432
+ if not _is_pil_image(img):
433
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
434
+
435
+ enhancer = ImageEnhance.Contrast(img)
436
+ img = enhancer.enhance(contrast_factor)
437
+ return img
438
+
439
+
440
+ def adjust_saturation(img, saturation_factor):
441
+ """Adjust color saturation of an image.
442
+
443
+ Args:
444
+ img (PIL Image): PIL Image to be adjusted.
445
+ saturation_factor (float): How much to adjust the saturation. 0 will
446
+ give a black and white image, 1 will give the original image while
447
+ 2 will enhance the saturation by a factor of 2.
448
+
449
+ Returns:
450
+ PIL Image: Saturation adjusted image.
451
+ """
452
+ if not _is_pil_image(img):
453
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
454
+
455
+ enhancer = ImageEnhance.Color(img)
456
+ img = enhancer.enhance(saturation_factor)
457
+ return img
458
+
459
+
460
+ def adjust_hue(img, hue_factor):
461
+ """Adjust hue of an image.
462
+
463
+ The image hue is adjusted by converting the image to HSV and
464
+ cyclically shifting the intensities in the hue channel (H).
465
+ The image is then converted back to original image mode.
466
+
467
+ `hue_factor` is the amount of shift in H channel and must be in the
468
+ interval `[-0.5, 0.5]`.
469
+
470
+ See https://en.wikipedia.org/wiki/Hue for more details on Hue.
471
+
472
+ Args:
473
+ img (PIL Image): PIL Image to be adjusted.
474
+ hue_factor (float): How much to shift the hue channel. Should be in
475
+ [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
476
+ HSV space in positive and negative direction respectively.
477
+ 0 means no shift. Therefore, both -0.5 and 0.5 will give an image
478
+ with complementary colors while 0 gives the original image.
479
+
480
+ Returns:
481
+ PIL Image: Hue adjusted image.
482
+ """
483
+ if not(-0.5 <= hue_factor <= 0.5):
484
+ raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))
485
+
486
+ if not _is_pil_image(img):
487
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
488
+
489
+ input_mode = img.mode
490
+ if input_mode in {'L', '1', 'I', 'F'}:
491
+ return img
492
+
493
+ h, s, v = img.convert('HSV').split()
494
+
495
+ np_h = np.array(h, dtype=np.uint8)
496
+ # uint8 addition take cares of rotation across boundaries
497
+ with np.errstate(over='ignore'):
498
+ np_h += np.uint8(hue_factor * 255)
499
+ h = Image.fromarray(np_h, 'L')
500
+
501
+ img = Image.merge('HSV', (h, s, v)).convert(input_mode)
502
+ return img
503
+
504
+
505
+ def adjust_gamma(img, gamma, gain=1):
506
+ """Perform gamma correction on an image.
507
+
508
+ Also known as Power Law Transform. Intensities in RGB mode are adjusted
509
+ based on the following equation:
510
+
511
+ I_out = 255 * gain * ((I_in / 255) ** gamma)
512
+
513
+ See https://en.wikipedia.org/wiki/Gamma_correction for more details.
514
+
515
+ Args:
516
+ img (PIL Image): PIL Image to be adjusted.
517
+ gamma (float): Non negative real number. gamma larger than 1 make the
518
+ shadows darker, while gamma smaller than 1 make dark regions
519
+ lighter.
520
+ gain (float): The constant multiplier.
521
+ """
522
+ if not _is_pil_image(img):
523
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
524
+
525
+ if gamma < 0:
526
+ raise ValueError('Gamma should be a non-negative real number')
527
+
528
+ input_mode = img.mode
529
+ img = img.convert('RGB')
530
+
531
+ np_img = np.array(img, dtype=np.float32)
532
+ np_img = 255 * gain * ((np_img / 255) ** gamma)
533
+ np_img = np.uint8(np.clip(np_img, 0, 255))
534
+
535
+ img = Image.fromarray(np_img, 'RGB').convert(input_mode)
536
+ return img
537
+
538
+
539
+ def rotate(img, angle, resample=False, expand=False, center=None):
540
+ """Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows)
541
+
542
+
543
+ Args:
544
+ img (PIL Image): PIL Image to be rotated.
545
+ angle ({float, int}): In degrees degrees counter clockwise order.
546
+ resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
547
+ An optional resampling filter.
548
+ See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
549
+ If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
550
+ expand (bool, optional): Optional expansion flag.
551
+ If true, expands the output image to make it large enough to hold the entire rotated image.
552
+ If false or omitted, make the output image the same size as the input image.
553
+ Note that the expand flag assumes rotation around the center and no translation.
554
+ center (2-tuple, optional): Optional center of rotation.
555
+ Origin is the upper left corner.
556
+ Default is the center of the image.
557
+ """
558
+
559
+ if not _is_pil_image(img):
560
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
561
+
562
+ return img.rotate(angle, resample, expand, center)
563
+
564
+
565
+ def to_grayscale(img, num_output_channels=1):
566
+ """Convert image to grayscale version of image.
567
+
568
+ Args:
569
+ img (PIL Image): Image to be converted to grayscale.
570
+
571
+ Returns:
572
+ PIL Image: Grayscale version of the image.
573
+ if num_output_channels == 1 : returned image is single channel
574
+ if num_output_channels == 3 : returned image is 3 channel with r == g == b
575
+ """
576
+ if not _is_pil_image(img):
577
+ raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
578
+
579
+ if num_output_channels == 1:
580
+ img = img.convert('L')
581
+ elif num_output_channels == 3:
582
+ img = img.convert('L')
583
+ np_img = np.array(img, dtype=np.uint8)
584
+ np_img = np.dstack([np_img, np_img, np_img])
585
+ img = Image.fromarray(np_img, 'RGB')
586
+ else:
587
+ raise ValueError('num_output_channels should be either 1 or 3')
588
+
589
+ return img
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/data_io/transform.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-4 下午4:19
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : transform.py
6
+ # @Software : PyCharm
7
+
8
+ from __future__ import division
9
+ import math
10
+ import random
11
+ from PIL import Image
12
+ try:
13
+ import accimage
14
+ except ImportError:
15
+ accimage = None
16
+ import numpy as np
17
+ import numbers
18
+ import types
19
+
20
+ from src.data_io import functional as F
21
+
22
+ __all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "RandomHorizontalFlip",
23
+ "Lambda", "RandomResizedCrop", "ColorJitter", "RandomRotation"]
24
+
25
+
26
+ class Compose(object):
27
+ """Composes several transforms together.
28
+
29
+ Args:
30
+ transforms (list of ``Transform`` objects): list of transforms to compose.
31
+
32
+ Example:
33
+ >>> transforms.Compose([
34
+ >>> transforms.CenterCrop(10),
35
+ >>> transforms.ToTensor(),
36
+ >>> ])
37
+ """
38
+
39
+ def __init__(self, transforms):
40
+ self.transforms = transforms
41
+
42
+ def __call__(self, img):
43
+ for t in self.transforms:
44
+ img = t(img)
45
+ return img
46
+
47
+
48
+ class ToTensor(object):
49
+
50
+ """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
51
+
52
+ Converts a PIL Image or numpy.ndarray (H x W x C) in the range
53
+ [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
54
+ """
55
+
56
+ def __call__(self, pic):
57
+ """
58
+ Args:
59
+ pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
60
+
61
+ Returns:
62
+ Tensor: Converted image.
63
+ """
64
+ return F.to_tensor(pic)
65
+
66
+
67
+ class Lambda(object):
68
+ """Apply a user-defined lambda as a transform.
69
+
70
+ Args:
71
+ lambd (function): Lambda/function to be used for transform.
72
+ """
73
+
74
+ def __init__(self, lambd):
75
+ assert isinstance(lambd, types.LambdaType)
76
+ self.lambd = lambd
77
+
78
+ def __call__(self, img):
79
+ return self.lambd(img)
80
+
81
+
82
+ class ToPILImage(object):
83
+ """Convert a tensor or an ndarray to PIL Image.
84
+
85
+ Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
86
+ H x W x C to a PIL Image while preserving the value range.
87
+
88
+ Args:
89
+ mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
90
+ If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
91
+ 1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
92
+ 2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
93
+ 3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,
94
+ ``int``, ``float``, ``short``).
95
+
96
+ .. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
97
+ """
98
+ def __init__(self, mode=None):
99
+ self.mode = mode
100
+
101
+ def __call__(self, pic):
102
+ """
103
+ Args:
104
+ pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
105
+
106
+ Returns:
107
+ PIL Image: Image converted to PIL Image.
108
+
109
+ """
110
+ return F.to_pil_image(pic, self.mode)
111
+
112
+
113
+ class Normalize(object):
114
+ """Normalize an tensor image with mean and standard deviation.
115
+ Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
116
+ will normalize each channel of the input ``torch.*Tensor`` i.e.
117
+ ``input[channel] = (input[channel] - mean[channel]) / std[channel]``
118
+
119
+ Args:
120
+ mean (sequence): Sequence of means for each channel.
121
+ std (sequence): Sequence of standard deviations for each channel.
122
+ """
123
+
124
+ def __init__(self, mean, std):
125
+ self.mean = mean
126
+ self.std = std
127
+
128
+ def __call__(self, tensor):
129
+ """
130
+ Args:
131
+ tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
132
+
133
+ Returns:
134
+ Tensor: Normalized Tensor image.
135
+ """
136
+ return F.normalize(tensor, self.mean, self.std)
137
+
138
+
139
+ class RandomHorizontalFlip(object):
140
+ """Horizontally flip the given PIL Image randomly with a probability of 0.5."""
141
+
142
+ def __call__(self, img):
143
+ """
144
+ Args:
145
+ img (PIL Image): Image to be flipped.
146
+
147
+ Returns:
148
+ PIL Image: Randomly flipped image.
149
+ """
150
+ if random.random() < 0.5:
151
+ return F.hflip(img)
152
+ return img
153
+
154
+
155
+ class RandomResizedCrop(object):
156
+ """Crop the given PIL Image to random size and aspect ratio.
157
+
158
+ A crop of random size (default: of 0.08 to 1.0) of the original size and a random
159
+ aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
160
+ is finally resized to given size.
161
+ This is popularly used to train the Inception networks.
162
+
163
+ Args:
164
+ size: expected output size of each edge
165
+ scale: range of size of the origin size cropped
166
+ ratio: range of aspect ratio of the origin aspect ratio cropped
167
+ interpolation: Default: PIL.Image.BILINEAR
168
+ """
169
+
170
+ def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
171
+ if isinstance(size, tuple):
172
+ self.size = size
173
+ else:
174
+ self.size = (size, size)
175
+ self.interpolation = interpolation
176
+ self.scale = scale
177
+ self.ratio = ratio
178
+
179
+ @staticmethod
180
+ def get_params(img, scale, ratio):
181
+ """Get parameters for ``crop`` for a random sized crop.
182
+
183
+ Args:
184
+ img (PIL Image): Image to be cropped.
185
+ scale (tuple): range of size of the origin size cropped
186
+ ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
187
+
188
+ Returns:
189
+ tuple: params (i, j, h, w) to be passed to ``crop`` for a random
190
+ sized crop.
191
+ """
192
+ for attempt in range(10):
193
+ area = img.size[0] * img.size[1]
194
+ target_area = random.uniform(*scale) * area
195
+ aspect_ratio = random.uniform(*ratio)
196
+
197
+ w = int(round(math.sqrt(target_area * aspect_ratio)))
198
+ h = int(round(math.sqrt(target_area / aspect_ratio)))
199
+
200
+ if random.random() < 0.5:
201
+ w, h = h, w
202
+
203
+ if w <= img.size[0] and h <= img.size[1]:
204
+ i = random.randint(0, img.size[1] - h)
205
+ j = random.randint(0, img.size[0] - w)
206
+ return i, j, h, w
207
+
208
+ # Fallback
209
+ w = min(img.size[0], img.size[1])
210
+ i = (img.size[1] - w) // 2
211
+ j = (img.size[0] - w) // 2
212
+ return i, j, w, w
213
+
214
+ def __call__(self, img):
215
+ """
216
+ Args:
217
+ img (PIL Image): Image to be flipped.
218
+
219
+ Returns:
220
+ PIL Image: Randomly cropped and resize image.
221
+ """
222
+ i, j, h, w = self.get_params(img, self.scale, self.ratio)
223
+ return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
224
+
225
+
226
+ class ColorJitter(object):
227
+ """Randomly change the brightness, contrast and saturation of an image.
228
+
229
+ Args:
230
+ brightness (float): How much to jitter brightness. brightness_factor
231
+ is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
232
+ contrast (float): How much to jitter contrast. contrast_factor
233
+ is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
234
+ saturation (float): How much to jitter saturation. saturation_factor
235
+ is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
236
+ hue(float): How much to jitter hue. hue_factor is chosen uniformly from
237
+ [-hue, hue]. Should be >=0 and <= 0.5.
238
+ """
239
+ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
240
+ self.brightness = brightness
241
+ self.contrast = contrast
242
+ self.saturation = saturation
243
+ self.hue = hue
244
+
245
+ @staticmethod
246
+ def get_params(brightness, contrast, saturation, hue):
247
+ """Get a randomized transform to be applied on image.
248
+
249
+ Arguments are same as that of __init__.
250
+
251
+ Returns:
252
+ Transform which randomly adjusts brightness, contrast and
253
+ saturation in a random order.
254
+ """
255
+ transforms = []
256
+ if brightness > 0:
257
+ brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
258
+ transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
259
+
260
+ if contrast > 0:
261
+ contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
262
+ transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
263
+
264
+ if saturation > 0:
265
+ saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
266
+ transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
267
+
268
+ if hue > 0:
269
+ hue_factor = np.random.uniform(-hue, hue)
270
+ transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
271
+
272
+ np.random.shuffle(transforms)
273
+ transform = Compose(transforms)
274
+
275
+ return transform
276
+
277
+ def __call__(self, img):
278
+ """
279
+ Args:
280
+ img (PIL Image): Input image.
281
+
282
+ Returns:
283
+ PIL Image: Color jittered image.
284
+ """
285
+ transform = self.get_params(self.brightness, self.contrast,
286
+ self.saturation, self.hue)
287
+ return transform(img)
288
+
289
+
290
+ class RandomRotation(object):
291
+ """Rotate the image by angle.
292
+
293
+ Args:
294
+ degrees (sequence or float or int): Range of degrees to select from.
295
+ If degrees is a number instead of sequence like (min, max), the range of degrees
296
+ will be (-degrees, +degrees).
297
+ resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
298
+ An optional resampling filter.
299
+ See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
300
+ If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
301
+ expand (bool, optional): Optional expansion flag.
302
+ If true, expands the output to make it large enough to hold the entire rotated image.
303
+ If false or omitted, make the output image the same size as the input image.
304
+ Note that the expand flag assumes rotation around the center and no translation.
305
+ center (2-tuple, optional): Optional center of rotation.
306
+ Origin is the upper left corner.
307
+ Default is the center of the image.
308
+ """
309
+
310
+ def __init__(self, degrees, resample=False, expand=False, center=None):
311
+ if isinstance(degrees, numbers.Number):
312
+ if degrees < 0:
313
+ raise ValueError("If degrees is a single number, it must be positive.")
314
+ self.degrees = (-degrees, degrees)
315
+ else:
316
+ if len(degrees) != 2:
317
+ raise ValueError("If degrees is a sequence, it must be of len 2.")
318
+ self.degrees = degrees
319
+
320
+ self.resample = resample
321
+ self.expand = expand
322
+ self.center = center
323
+
324
+ @staticmethod
325
+ def get_params(degrees):
326
+ """Get parameters for ``rotate`` for a random rotation.
327
+
328
+ Returns:
329
+ sequence: params to be passed to ``rotate`` for random rotation.
330
+ """
331
+ angle = np.random.uniform(degrees[0], degrees[1])
332
+
333
+ return angle
334
+
335
+ def __call__(self, img):
336
+ """
337
+ img (PIL Image): Image to be rotated.
338
+
339
+ Returns:
340
+ PIL Image: Rotated image.
341
+ """
342
+
343
+ angle = self.get_params(self.degrees)
344
+
345
+ return F.rotate(img, angle, self.resample, self.expand, self.center)
346
+
347
+
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/default_config.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-4 上午9:12
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : default_config.py
6
+ # @Software : PyCharm
7
+ # --*-- coding: utf-8 --*--
8
+ """
9
+ default config for training
10
+ """
11
+
12
+ import torch
13
+ from datetime import datetime
14
+ from easydict import EasyDict
15
+ from src.utility import make_if_not_exist, get_width_height, get_kernel
16
+
17
+
18
+ def get_default_config():
19
+ conf = EasyDict()
20
+
21
+ # ----------------------training---------------
22
+ conf.lr = 1e-1
23
+ # [9, 13, 15]
24
+ conf.milestones = [10, 15, 22] # down learing rate
25
+ conf.gamma = 0.1
26
+ conf.epochs = 25
27
+ conf.momentum = 0.9
28
+ conf.batch_size = 1024
29
+
30
+ # model
31
+ conf.num_classes = 3
32
+ conf.input_channel = 3
33
+ conf.embedding_size = 128
34
+
35
+ # dataset
36
+ conf.train_root_path = './datasets/rgb_image'
37
+
38
+ # save file path
39
+ conf.snapshot_dir_path = './saved_logs/snapshot'
40
+
41
+ # log path
42
+ conf.log_path = './saved_logs/jobs'
43
+ # tensorboard
44
+ conf.board_loss_every = 10
45
+ # save model/iter
46
+ conf.save_every = 30
47
+
48
+ return conf
49
+
50
+
51
+ def update_config(args, conf):
52
+ conf.devices = args.devices
53
+ conf.patch_info = args.patch_info
54
+ w_input, h_input = get_width_height(args.patch_info)
55
+ conf.input_size = [h_input, w_input]
56
+ conf.kernel_size = get_kernel(h_input, w_input)
57
+ conf.device = "cuda:{}".format(conf.devices[0]) if torch.cuda.is_available() else "cpu"
58
+
59
+ # resize fourier image size
60
+ conf.ft_height = 2*conf.kernel_size[0]
61
+ conf.ft_width = 2*conf.kernel_size[1]
62
+ current_time = datetime.now().strftime('%b%d_%H-%M-%S')
63
+ job_name = 'Anti_Spoofing_{}'.format(args.patch_info)
64
+ log_path = '{}/{}/{} '.format(conf.log_path, job_name, current_time)
65
+ snapshot_dir = '{}/{}'.format(conf.snapshot_dir_path, job_name)
66
+
67
+ make_if_not_exist(snapshot_dir)
68
+ make_if_not_exist(log_path)
69
+
70
+ conf.model_path = snapshot_dir
71
+ conf.log_path = log_path
72
+ conf.job_name = job_name
73
+ return conf
face-main/face-attendance-system-master/Silent-Face-Anti-Spoofing-master/src/generate_patches.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # @Time : 20-6-9 下午3:06
3
+ # @Author : zhuying
4
+ # @Company : Minivision
5
+ # @File : test.py
6
+ # @Software : PyCharm
7
+ """
8
+ Create patch from original input image by using bbox coordinate
9
+ """
10
+
11
+ import cv2
12
+ import numpy as np
13
+
14
+
15
+ class CropImage:
16
+ @staticmethod
17
+ def _get_new_box(src_w, src_h, bbox, scale):
18
+ x = bbox[0]
19
+ y = bbox[1]
20
+ box_w = bbox[2]
21
+ box_h = bbox[3]
22
+
23
+ scale = min((src_h-1)/box_h, min((src_w-1)/box_w, scale))
24
+
25
+ new_width = box_w * scale
26
+ new_height = box_h * scale
27
+ center_x, center_y = box_w/2+x, box_h/2+y
28
+
29
+ left_top_x = center_x-new_width/2
30
+ left_top_y = center_y-new_height/2
31
+ right_bottom_x = center_x+new_width/2
32
+ right_bottom_y = center_y+new_height/2
33
+
34
+ if left_top_x < 0:
35
+ right_bottom_x -= left_top_x
36
+ left_top_x = 0
37
+
38
+ if left_top_y < 0:
39
+ right_bottom_y -= left_top_y
40
+ left_top_y = 0
41
+
42
+ if right_bottom_x > src_w-1:
43
+ left_top_x -= right_bottom_x-src_w+1
44
+ right_bottom_x = src_w-1
45
+
46
+ if right_bottom_y > src_h-1:
47
+ left_top_y -= right_bottom_y-src_h+1
48
+ right_bottom_y = src_h-1
49
+
50
+ return int(left_top_x), int(left_top_y),\
51
+ int(right_bottom_x), int(right_bottom_y)
52
+
53
+ def crop(self, org_img, bbox, scale, out_w, out_h, crop=True):
54
+
55
+ if not crop:
56
+ dst_img = cv2.resize(org_img, (out_w, out_h))
57
+ else:
58
+ src_h, src_w, _ = np.shape(org_img)
59
+ left_top_x, left_top_y, \
60
+ right_bottom_x, right_bottom_y = self._get_new_box(src_w, src_h, bbox, scale)
61
+
62
+ img = org_img[left_top_y: right_bottom_y+1,
63
+ left_top_x: right_bottom_x+1]
64
+ dst_img = cv2.resize(img, (out_w, out_h))
65
+ return dst_img