Datasets:

ArXiv:
DOI:
License:
ProgramComputer commited on
Commit
4a613d6
·
1 Parent(s): eb54add

Create VGGFace2.py

Browse files
Files changed (1) hide show
  1. VGGFace2.py +185 -0
VGGFace2.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and ProgramComputer.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """VGGFace2 audio-visual human speech dataset."""
18
+
19
+ import json
20
+ import os
21
+ import re
22
+ from urllib.parse import urlparse, parse_qs
23
+ from getpass import getpass
24
+ from hashlib import sha256
25
+ from itertools import repeat
26
+ from multiprocessing import Manager, Pool, Process
27
+ from pathlib import Path
28
+ from shutil import copyfileobj
29
+ from warnings import catch_warnings, filterwarnings
30
+ from urllib3.exceptions import InsecureRequestWarning
31
+
32
+ import pandas as pd
33
+ import requests
34
+
35
+ import datasets
36
+
37
+ _DESCRIPTION = "VGGFace2 is a large-scale face recognition dataset. Images are downloaded from Google Image Search and have large variations in pose, age, illumination, ethnicity and profession."
38
+ _CITATION = """\
39
+ @article{DBLP:journals/corr/abs-1710-08092,
40
+ author = {Qiong Cao and
41
+ Li Shen and
42
+ Weidi Xie and
43
+ Omkar M. Parkhi and
44
+ Andrew Zisserman},
45
+ title = {VGGFace2: {A} dataset for recognising faces across pose and age},
46
+ journal = {CoRR},
47
+ volume = {abs/1710.08092},
48
+ year = {2017},
49
+ url = {http://arxiv.org/abs/1710.08092},
50
+ eprinttype = {arXiv},
51
+ eprint = {1710.08092},
52
+ timestamp = {Wed, 04 Aug 2021 07:50:14 +0200},
53
+ biburl = {https://dblp.org/rec/journals/corr/abs-1710-08092.bib},
54
+ bibsource = {dblp computer science bibliography, https://dblp.org}
55
+ }
56
+ """
57
+
58
+
59
+
60
+ _URLS = {
61
+ "default": {
62
+ "train": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_train.tar.gz",
63
+ "test": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_test.tar.gz",
64
+ }
65
+ }
66
+
67
+
68
+
69
+ class VGGFace2(datasets.GeneratorBasedBuilder):
70
+ """VGGFace2 is dataset contains faces from Google Search"""
71
+
72
+ VERSION = datasets.Version("1.0.0")
73
+
74
+ BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig( version=VERSION
76
+ )
77
+ ]
78
+ 01-Male.txt", "02-Black_Hair.txt","03-Brown_Hair.txt","04-Gray_Hair.txt","05-Blond_Hair.txt","06-Long_Hair.txt",\\
79
+ "07-Mustache_or_Beard.txt","08-Wearing_Hat.txt","09-Eyeglasses.txt","10-Sunglasses.txt","11-Mouth_Open.txt"
80
+ def _info(self):
81
+ features = {
82
+ "file": datasets.Image(),
83
+ "path": datasets.Value("string"),
84
+ "identity": datasets.Value("string"),
85
+ "male": datasets.Value("binary"),
86
+ "black_hair": datasets.Value("string"),
87
+ "gray_hair": datasets.Value("string"),
88
+ "blond_hair": datasets.Value("string"),
89
+ "long_hair": datasets.Value("string"),
90
+ "mustache_or_beard": datasets.Value("string"),
91
+ "wearing_hat": datasets.Value("string"),
92
+ "eyeglasses": datasets.Value("string"),
93
+ "sunglasses": datasets.Value("string"),
94
+ "mouth_open": datasets.Value("string"),
95
+ }
96
+
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ homepage=_URL,
100
+ supervised_keys=datasets.info.SupervisedKeysData("file", "speaker_id"),
101
+ features=datasets.Features(features),
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ targets = (
107
+ ["01-Male.txt", "02-Black_Hair.txt","03-Brown_Hair.txt","04-Gray_Hair.txt","05-Blond_Hair.txt","06-Long_Hair.txt",\\
108
+ "07-Mustache_or_Beard.txt","08-Wearing_Hat.txt","09-Eyeglasses.txt","10-Sunglasses.txt","11-Mouth_Open.txt"]
109
+ )
110
+ target_dict = dict(
111
+ (
112
+ re.sub(r"^\d+-|\.txt$","",target),
113
+ f"https://raw.githubusercontent.com/ox-vgg/vgg_face2/master/attributes/{target}",
114
+ )
115
+ for target in targets
116
+ )
117
+ target_dict['identity'] = "https://huggingface.co/datasets/ProgramComputer/VGGFace2/raw/main/meta/identity_meta.csv"
118
+ metadata = dl_manager.download(
119
+ target_dict
120
+ )
121
+
122
+ mapped_paths = dl_manager.iter_archive(
123
+ dict(
124
+ (
125
+ placeholder_key,
126
+ dict(
127
+ (target, _URLS[target][placeholder_key])
128
+ for target in targets
129
+ ),
130
+ )
131
+ for placeholder_key in ("train", "test")
132
+ )
133
+ )
134
+
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name="train",
138
+ gen_kwargs={
139
+ "paths": mapped_paths["train"],
140
+ "meta_paths": metadata,
141
+ },
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name="test",
145
+ gen_kwargs={
146
+ "paths": mapped_paths["test"],
147
+ "meta_paths": metadata,
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, paths, meta_paths):
153
+ key = 0
154
+ meta = pd.read_csv(
155
+ meta_paths["identity"],
156
+ sep=",",
157
+ index_col=0,
158
+ engine="python",
159
+ )
160
+ for conf in [x for x in meta_paths.values() if x != meta_paths["identity"]]:
161
+
162
+ temp = pd.read_csv(conf,sep='\t', header=None)
163
+ temp.columns = ['Image_Path', conf]
164
+
165
+ temp['Class_ID'] = temp['Image_Path'].str.split('/').str[0]
166
+ temp['Image_Name'] = temp['Image_Path'].str.split('/').str[1]
167
+
168
+ # Drop the 'Image_Path' column from df2
169
+ temp.drop(columns=['Image_Path'], inplace=True)
170
+
171
+ # Merge the two DataFrames on 'Class_ID'
172
+ result_df = meta.merge(df2, on='Class_ID', how='outer')
173
+ print(meta)
174
+ for file_path, file_obj in images:
175
+ if file_path.startswith(_IMAGES_DIR):
176
+ if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
177
+ clip_index = int(clip.stem)
178
+
179
+ label = file_path.split("/")[2]
180
+ yield file_path, {
181
+ "image": {"path": file_path, "bytes": file_obj.read()},
182
+ "label": label,
183
+ }
184
+ key+= 1
185
+