|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Human Activity Recognition.""" |
|
|
|
|
|
import os |
|
|
|
import numpy as np |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@misc{misc_smartphone-based_recognition_of_human_activities_and_postural_transitions_341, |
|
author = {Reyes-Ortiz,Jorge, Anguita,Davide, Oneto,Luca, and Parra,Xavier}, |
|
title = {{Smartphone-Based Recognition of Human Activities and Postural Transitions}}, |
|
year = {2015}, |
|
howpublished = {UCI Machine Learning Repository}, |
|
note = {{DOI}: https://doi.org/10.24432/C54G7M} |
|
} |
|
""" |
|
_DESCRIPTION = """\ |
|
The Human Activity Recognition dataset. |
|
""" |
|
_HOMEPAGE = "http://archive.ics.uci.edu/dataset/341/smartphone+based+recognition+of+human+activities+and+postural+transitions" |
|
_LICENSE = "Creative Commons Attribution 4.0 International (CC BY 4.0) license" |
|
_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip' |
|
_LABEL_NAMES = ["WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING", "STAND_TO_SIT", "SIT_TO_STAND", "SIT_TO_LIE", "LIE_TO_SIT", "STAND_TO_LIE", "LIE_TO_STAND"] |
|
|
|
|
|
class HARDataset(datasets.GeneratorBasedBuilder): |
|
"""Human Activity Recognition.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features({ |
|
"features": datasets.Sequence(feature=datasets.Value("float32"), length=561), |
|
"labels": datasets.ClassLabel(12, names=_LABEL_NAMES), |
|
"subject id": datasets.Value("uint8"), |
|
}), |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "Train"), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "Test"), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
X = np.genfromtxt(f"{filepath}/X_{split}.txt") |
|
Y = np.genfromtxt(f"{filepath}/y_{split}.txt").astype(int) - 1 |
|
subject_ids = np.genfromtxt(f"{filepath}/subject_id_{split}.txt").astype(int) |
|
for key, (x, y, id) in enumerate(zip(X, Y, subject_ids)): |
|
yield key, { |
|
"features": x, |
|
"labels": y, |
|
"subject id": id, |
|
} |