Turing-Machine-Bench / dataset.py
Hai-Tao's picture
initial commit
ec86707
import json
import datasets
_CITATION = """\
@misc{TMBench2025,
title={TMBench Dataset},
author={Your Name},
year={2025}
}
"""
_DESCRIPTION = """\
TMBench is a symbolic rewriting benchmark dataset with four variants:
- TMBench: the latin version
- TMBenchGreek: includes Greek characters in symbol sets
- TMBenchNumber: includes numeric characters in symbol sets
- TMBenchSpecial: includes special characters in symbol sets
Each sample includes:
- An initial string
- A set of rewriting rules (symbol → string)
- Step-wise rewriting results
- Number of deleted characters per step
"""
_HOMEPAGE = "https://gitee.pjlab.org.cn/L1/wuhaitao/DeepCADParser"
class TMBenchConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(TMBenchConfig, self).__init__(**kwargs)
class TMBench(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
TMBenchConfig(name="latin", version=datasets.Version("1.0.0"), description="TMBench with Latin letters"),
TMBenchConfig(name="greek", version=datasets.Version("1.0.0"), description="TMBench with Greek letters"),
TMBenchConfig(name="number", version=datasets.Version("1.0.0"), description="TMBench with numbers"),
TMBenchConfig(name="special", version=datasets.Version("1.0.0"), description="TMBench with special characters"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"id": datasets.Value("string"),
"init_str": datasets.Value("string"),
"rule": datasets.Features({
"__all__": datasets.Value("string")
}),
"delete_count": datasets.Value("int32"),
"step_results": datasets.Sequence(datasets.Value("string")),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
name_map = {
"latin": "TMBench.json",
"greek": "TMBenchGreek.json",
"number": "TMBenchNumber.json",
"special": "TMBenchSpecial.json",
}
file_path = dl_manager.download_and_extract(name_map[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": file_path}
)
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for idx, sample in enumerate(data["samples"]):
yield idx, {
"id": sample.get("id", str(idx)),
"init_str": sample["init_str"],
"rule": sample["rule"],
"delete_count": sample["delete_count"],
"step_results": sample["step_results"],
}