File size: 4,236 Bytes
877a3f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Author: Roman Janík
# Script for creating a CoNLL format of my dataset from text files and Label Studio annotations.
#

import argparse
import json
import os

from nltk.tokenize import word_tokenize, sent_tokenize


def get_args():
    parser = argparse.ArgumentParser(description="Script for creating a CoNLL format of my dataset from text files and Label Studio annotations.")
    parser.add_argument("-s", "--source_file", required=True, help="Path to source Label Studio json annotations file.")
    parser.add_argument("-o", "--output_dir", required=True, help="Output dir for CoNLL dataset splits.")
    args = parser.parse_args()
    return args


def fix_quotes(sentence):
    return map(lambda word: word.replace('``', '"').replace("''", '"'), sentence)


def process_text_annotations(text, annotations):
    entity_types_map = {
        "Personal names": "p",
        "Institutions": "i",
        "Geographical names": "g",
        "Time expressions": "t",
        "Artifact names/Objects": "o"
    }

    sentences = sent_tokenize(text, language="czech")
    sentences_t = [fix_quotes(word_tokenize(x, language="czech")) for x in sentences]

    sentences_idx = []
    start = 0
    for i, c in enumerate(text):
        if not c.isspace():
            start = i
            break

    for sentence in sentences_t:
        sentence_idx = []
        for word in sentence:
            end = start + len(word)
            sentence_idx.append({"word": word, "start": start, "end": end, "entity_type": "O"})
            for i, _ in enumerate(text):
                if end + i < len(text) and not text[end + i].isspace():
                    start = end + i
                    break
        sentences_idx.append(sentence_idx)

    for n_entity in annotations:
        begin = True
        done = False
        for sentence_idx in sentences_idx:
            for word_idx in sentence_idx:
                if word_idx["start"] >= n_entity["start"]\
                        and (word_idx["end"] <= n_entity["end"]
                             or (not text[word_idx["end"]-1].isalnum()) and len(word_idx["word"]) > 1) and begin:
                    word_idx["entity_type"] = "B-" + entity_types_map[n_entity["labels"][0]]
                    begin = False
                    if word_idx["end"] >= n_entity["end"]:
                        done = True
                        break
                elif word_idx["start"] > n_entity["start"] and (word_idx["end"] <= n_entity["end"]
                    or (not text[word_idx["end"]-1].isalnum() and text[word_idx["start"]].isalnum())):
                    word_idx["entity_type"] = "I-" + entity_types_map[n_entity["labels"][0]]
                    if word_idx["end"] >= n_entity["end"]:
                        done = True
                        break
                elif word_idx["end"] > n_entity["end"]:
                    done = True
                    break
            if done:
                break

    conll_sentences = []
    for sentence_idx in sentences_idx:
        conll_sentence = map(lambda w: w["word"] + " " + w["entity_type"], sentence_idx)
        conll_sentences.append("\n".join(conll_sentence))
    conll_sentences = "\n\n".join(conll_sentences)

    return conll_sentences


def main():
    args = get_args()

    print("Script for creating a CoNLL format of my dataset from text files and Label Studio annotations."
          "Script goes through page text files and their annotations json record. "
          "Output CoNLL dataset file is saved to output directory.")

    with open(args.source_file, encoding="utf-8") as f:
        annotations = json.load(f)

    print("Starting documents processing...")

    with open(os.path.join(args.output_dir, "poner.conll"), "w", encoding="utf-8") as f:
        for page in annotations:
            page_text_path = page["text"].replace("http://localhost:8081", "../../../datasets/poner1.0/data")
            with open(page_text_path, encoding="utf-8") as p_f:
                page_text = p_f.read()
                processed_page = process_text_annotations(page_text, page["ner"])
                f.write(processed_page + "\n\n")

    print("Annotations are processed.")


if __name__ == '__main__':
    main()