# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" VinDataVLSP Dataset"""


import datasets
import pandas as pd
import re


_DATA_URL = "https://dutudn-my.sharepoint.com/:u:/g/personal/122180028_sv1_dut_udn_vn/ESeeV5dFDtVKmnvwJA3jUd4BLLJ7DhpOwsyb8QwpldKHwQ?download=1"
_PROMPTS_URLS = {
    "train": "https://drive.google.com/uc?export=download&id=1eOOvCDz0uOBBRzsHK7NALcGA70-XbQrd",
    "test": "https://drive.google.com/uc?export=download&id=1r2wy5K0VL7wL_iMdtzMhGEy-_k3M2Gdv",
    "validation": "https://drive.google.com/uc?export=download&id=1c0YsA4x1Up9qjDpsj1VKH_86m85cTi79"
}

_DESCRIPTION = """\
"""

_LANGUAGES = {
    "vi": {
        "Language": "Vietnamese",
        "Date": "2021-12-11",
        "Size": "11 GB",
        "Version": "vi_100h_2021-12-11",
    },
}


class VinDataVLSPConfig(datasets.BuilderConfig):
    """BuilderConfig for CommonVoice."""

    def __init__(self, name, sub_version, **kwargs):
        """
        Args:
          data_dir: `string`, the path to the folder containing the files in the
            downloaded .tar
          citation: `string`, citation for the data set
          url: `string`, url for information about the data set
          **kwargs: keyword arguments forwarded to super.
        """
        self.sub_version = sub_version
        self.language = kwargs.pop("language", None)
        self.date_of_snapshot = kwargs.pop("date", None)
        self.size = kwargs.pop("size", None)
        self.validated_hr_total = kwargs.pop("val_hrs", None)
        self.total_hr_total = kwargs.pop("total_hrs", None)
        self.num_of_voice = kwargs.pop("num_of_voice", None)
        description = ""
        super(VinDataVLSPConfig, self).__init__(
            name=name, version=datasets.Version("0.1.0", ""), description=description, **kwargs
        )


class VinDataVLSP(datasets.GeneratorBasedBuilder):

    DEFAULT_WRITER_BATCH_SIZE = 1000
    BUILDER_CONFIGS = [
        VinDataVLSPConfig(
            name=lang_id,
            language=_LANGUAGES[lang_id]["Language"],
            sub_version=_LANGUAGES[lang_id]["Version"],
            # date=_LANGUAGES[lang_id]["Date"],
            # size=_LANGUAGES[lang_id]["Size"],
            # val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
            # total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
            # num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
        )
        for lang_id in _LANGUAGES.keys()
    ]

    def _info(self):
        features = datasets.Features(
            {
                "file_path": datasets.Value("string"),
                "script": datasets.Value("string"),
                "audio": datasets.Audio(sampling_rate=16_000),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        tsv_files = dl_manager.download(_PROMPTS_URLS)
        archive = dl_manager.download(_DATA_URL)
        path_to_clips = "./VinDataVLSP"

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "tsv_files": tsv_files["train"],
                    "audio_files": dl_manager.iter_archive(archive),
                    "path_to_clips": path_to_clips,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "tsv_files": tsv_files["test"],
                    "audio_files": dl_manager.iter_archive(archive),
                    "path_to_clips": path_to_clips,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "tsv_files": tsv_files["validation"],
                    "audio_files": dl_manager.iter_archive(archive),
                    "path_to_clips": path_to_clips,
                },
            ),
        ]

    def _generate_examples(self, tsv_files, audio_files, path_to_clips):
        """Yields examples."""
        data_fields = list(self._info().features.keys())

        # audio is not a header of the csv files
        data_fields.remove("audio")
        examples = {}

        df = pd.read_csv(tsv_files, sep="\t", header=0)
        df = df.dropna()
        chars_to_ignore_regex = r'[,?.!\-;:"“%\'�]'

        for file_path, script in zip(df["file_path"], df["script"]):
            # set full path for mp3 audio file
            audio_path = path_to_clips + "/" + file_path
            # Preprocessing script
            if ":" in script:
                two_dot_index = script.index(":")
                script = script[two_dot_index + 1:]
            script = script.replace("\n", " ")
            script = re.sub(chars_to_ignore_regex, '', script).lower()

            examples[audio_path] = {
                "file_path": audio_path,
                "script": script,
            }

        for path, f in audio_files:
            if path.startswith(path_to_clips):
                if path in examples:
                    audio = {"path": path, "bytes": f.read()}
                    yield path, {**examples[path], "audio": audio}