Datasets:
ArXiv:
DOI:
License:
# coding=utf-8 | |
# Copyright 2022 The HuggingFace Datasets Authors and ProgramComputer. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""VGGFace2 audio-visual human speech dataset.""" | |
import json | |
import os | |
import re | |
from urllib.parse import urlparse, parse_qs | |
from getpass import getpass | |
from hashlib import sha256 | |
from itertools import repeat | |
from multiprocessing import Manager, Pool, Process | |
from pathlib import Path | |
from shutil import copyfileobj | |
from warnings import catch_warnings, filterwarnings | |
from urllib3.exceptions import InsecureRequestWarning | |
import pandas as pd | |
import requests | |
import datasets | |
_DESCRIPTION = "VGGFace2 is a large-scale face recognition dataset. Images are downloaded from Google Image Search and have large variations in pose, age, illumination, ethnicity and profession." | |
_CITATION = """\ | |
@article{DBLP:journals/corr/abs-1710-08092, | |
author = {Qiong Cao and | |
Li Shen and | |
Weidi Xie and | |
Omkar M. Parkhi and | |
Andrew Zisserman}, | |
title = {VGGFace2: {A} dataset for recognising faces across pose and age}, | |
journal = {CoRR}, | |
volume = {abs/1710.08092}, | |
year = {2017}, | |
url = {http://arxiv.org/abs/1710.08092}, | |
eprinttype = {arXiv}, | |
eprint = {1710.08092}, | |
timestamp = {Wed, 04 Aug 2021 07:50:14 +0200}, | |
biburl = {https://dblp.org/rec/journals/corr/abs-1710-08092.bib}, | |
bibsource = {dblp computer science bibliography, https://dblp.org} | |
} | |
""" | |
_URLS = { | |
"default": { | |
"train": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_train.tar.gz", | |
"test": "https://huggingface.co/datasets/ProgramComputer/VGGFace2/resolve/main/data/vggface2_test.tar.gz", | |
} | |
} | |
class VGGFace2(datasets.GeneratorBasedBuilder): | |
"""VGGFace2 is dataset contains faces from Google Search""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( version=VERSION | |
) | |
] | |
def _info(self): | |
features = { | |
"file": datasets.Image(), | |
"image_id": datasets.Value("string"), | |
"class_id": datasets.Value("string"), | |
"identity": datasets.Value("string"), | |
"male": datasets.Value("binary"), | |
"black_hair": datasets.Value("string"), | |
"gray_hair": datasets.Value("string"), | |
"blond_hair": datasets.Value("string"), | |
"long_hair": datasets.Value("string"), | |
"mustache_or_beard": datasets.Value("string"), | |
"wearing_hat": datasets.Value("string"), | |
"eyeglasses": datasets.Value("string"), | |
"sunglasses": datasets.Value("string"), | |
"mouth_open": datasets.Value("string"), | |
} | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
supervised_keys=datasets.info.SupervisedKeysData("file", "class_id"), | |
features=datasets.Features(features), | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
targets = ( | |
["01-Male.txt", "02-Black_Hair.txt","03-Brown_Hair.txt","04-Gray_Hair.txt","05-Blond_Hair.txt","06-Long_Hair.txt","07-Mustache_or_Beard.txt","08-Wearing_Hat.txt","09-Eyeglasses.txt","10-Sunglasses.txt","11-Mouth_Open.txt"] | |
) | |
target_dict = dict( | |
( | |
re.sub(r"^\d+-|\.txt$","",target), | |
f"https://raw.githubusercontent.com/ox-vgg/vgg_face2/master/attributes/{target}", | |
) | |
for target in targets | |
) | |
target_dict['identity'] = "https://huggingface.co/datasets/ProgramComputer/VGGFace2/raw/main/meta/identity_meta.csv" | |
metadata = dl_manager.download( | |
target_dict | |
) | |
mapped_paths_train = dl_manager.iter_archive( | |
_URLS["default"]["train"] | |
) | |
mapped_paths_test = dl_manager.iter_archive( | |
_URLS["default"]["test"] | |
) | |
return [ | |
datasets.SplitGenerator( | |
name="train", | |
gen_kwargs={ | |
"paths": mapped_paths_train, | |
"meta_paths": metadata, | |
}, | |
), | |
datasets.SplitGenerator( | |
name="test", | |
gen_kwargs={ | |
"paths": mapped_paths_test, | |
"meta_paths": metadata, | |
}, | |
), | |
] | |
def _generate_examples(self, paths, meta_paths): | |
key = 0 | |
meta = pd.read_csv( | |
meta_paths["identity"], | |
sep=",", | |
index_col=0, | |
engine="python", | |
) | |
for conf in [x for x in meta_paths.values() if x != meta_paths["identity"]]: | |
temp = pd.read_csv(conf,sep='\t', header=None) | |
temp.columns = ['Image_Path', conf] | |
temp['Class_ID'] = temp['Image_Path'].str.split('/').str[0] | |
temp['Image_Name'] = temp['Image_Path'].str.split('/').str[1] | |
# Drop the 'Image_Path' column from df2 | |
temp.drop(columns=['Image_Path'], inplace=True) | |
# Merge the two DataFrames on 'Class_ID' | |
result_df = meta.merge(df2, on='Class_ID', how='outer') | |
print(meta) | |
for file_path, file_obj in images: | |
if file_path.startswith(_IMAGES_DIR): | |
if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep: | |
clip_index = int(clip.stem) | |
label = file_path.split("/")[2] | |
yield file_path, { | |
"image": {"path": file_path, "bytes": file_obj.read()}, | |
"label": label, | |
} | |
key+= 1 | |