HAR / HAR.py
codymlewis's picture
Removed TODO
1d53516
raw
history blame
3.93 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Human Activity Recognition."""
import os
import numpy as np
import datasets
_CITATION = """\
@misc{misc_smartphone-based_recognition_of_human_activities_and_postural_transitions_341,
author = {Reyes-Ortiz,Jorge, Anguita,Davide, Oneto,Luca, and Parra,Xavier},
title = {{Smartphone-Based Recognition of Human Activities and Postural Transitions}},
year = {2015},
howpublished = {UCI Machine Learning Repository},
note = {{DOI}: https://doi.org/10.24432/C54G7M}
}
"""
_DESCRIPTION = """\
The Human Activity Recognition dataset.
"""
_HOMEPAGE = "http://archive.ics.uci.edu/dataset/341/smartphone+based+recognition+of+human+activities+and+postural+transitions"
_LICENSE = "Creative Commons Attribution 4.0 International (CC BY 4.0) license"
_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'
_LABEL_NAMES = ["WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING", "STAND_TO_SIT", "SIT_TO_STAND", "SIT_TO_LIE", "LIE_TO_SIT", "STAND_TO_LIE", "LIE_TO_STAND"]
class HARDataset(datasets.GeneratorBasedBuilder):
"""Human Activity Recognition."""
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features({
"features": datasets.Sequence(feature=datasets.Value("float32"), length=561),
"labels": datasets.ClassLabel(12, names=_LABEL_NAMES),
"subject id": datasets.Value("uint8"),
}),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "Train"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": os.path.join(data_dir, "Test"),
"split": "test"
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
X = np.genfromtxt(f"{filepath}/X_{split}.txt")
Y = np.genfromtxt(f"{filepath}/y_{split}.txt").astype(int) - 1
subject_ids = np.genfromtxt(f"{filepath}/subject_id_{split}.txt").astype(int)
for key, (x, y, id) in enumerate(zip(X, Y, subject_ids)):
yield key, {
"features": x,
"labels": y,
"subject id": id,
}