import datasets from datasets import GeneratorBasedBuilder, BuilderConfig, Sequence, Value from .parsing import parse_incr class LanguageSpecificConfig(BuilderConfig): def __init__(self, language, data_file, **kwargs): super().__init__(**kwargs) self.language = language self.data_file = data_file class EnhancedCobaldDataset(GeneratorBasedBuilder): BUILDER_CONFIGS = [ LanguageSpecificConfig( name="en", language="en", data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldEng/refs/heads/main/enhanced/train.conllu", description="English dataset." ), LanguageSpecificConfig( name="ru", language="ru", data_file="https://raw.githubusercontent.com/CobaldAnnotation/CobaldRus/refs/heads/main/enhanced/train.conllu", description="Russian dataset." ), # Other languages here ] def _info(self): return datasets.DatasetInfo( description="A CoBaLD dataset in CoNLL-U plus format.", features=datasets.Features({ "ids": Sequence(Value("string")), "words": Sequence(Value("string")), "lemmas": Sequence(Value("string")), "upos": Sequence(Value("string")), "xpos": Sequence(Value("string")), # huggingface datasets can't handle dicts with dynamic keys, so represent feats as string "feats": Sequence(Value("string")), "heads": Sequence(Value("int32")), "deprels": Sequence(Value("string")), "deps": Sequence(Value("string")), "miscs": Sequence(Value("string")), "deepslots": Sequence(Value("string")), "semclasses": Sequence(Value("string")), "sent_id": Value("string"), "text": Value("string"), }) ) def _split_generators(self, dl_manager): data_path = dl_manager.download_and_extract(self.config.data_file) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_path} ) ] def _generate_examples(self, filepath: str): """ Generator function that reads a CoNLL-U file and yields one sentence at a time. Each sentence is represented as a dictionary where each field is a list. """ yield from enumerate(parse_incr(filepath))