Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Tagalog
Size:
1K - 10K
ArXiv:
DOI:
License:
title: "TLUnified-NER Corpus" | |
description: | | |
This dataset contains the annotated TLUnified corpora from Cruz and Cheng | |
(2021). It consists of a curated sample of around 7,000 documents for the | |
named entity recognition (NER) task. The majority of the corpus are news | |
reports in Tagalog, resembling the domain of the original ConLL 2003. There | |
are three entity types: Person (PER), Organization (ORG), and Location (LOC). | |
### About this repository | |
This repository is a [spaCy project](https://spacy.io/usage/projects) for | |
converting the annotated spaCy files into IOB. The process goes like this: we | |
download the raw corpus from Google Cloud Storage (GCS), convert the spaCy | |
files into a readable IOB format, and parse that using our loading script | |
(i.e., `tlunified-ner.py`). We're also shipping the IOB file so that it's | |
easier to access. | |
directories: ["assets", "corpus/spacy", "corpus/iob"] | |
vars: | |
version: 1.0 | |
assets: | |
- dest: assets/corpus.tar.gz | |
description: "Annotated TLUnified corpora in spaCy format with train, dev, and test splits." | |
url: "https://storage.googleapis.com/ljvmiranda/calamanCy/tl_tlunified_gold/v${vars.version}/corpus.tar.gz" | |
workflows: | |
all: | |
- "setup-data" | |
- "upload-to-hf" | |
commands: | |
- name: "setup-data" | |
help: "Prepare the Tagalog corpora used for training various spaCy components" | |
script: | |
- mkdir -p corpus/spacy | |
- tar -xzvf assets/corpus.tar.gz -C corpus/spacy | |
- python -m spacy_to_iob corpus/spacy/ corpus/iob/ | |
outputs: | |
- corpus/iob/train.iob | |
- corpus/iob/dev.iob | |
- corpus/iob/test.iob | |
- name: "upload-to-hf" | |
help: "Upload dataset to HuggingFace Hub" | |
script: | |
- git push | |
deps: | |
- corpus/iob/train.iob | |
- corpus/iob/dev.iob | |
- corpus/iob/test.iob | |