taiga75 commited on
Commit
8d7a9f4
·
verified ·
1 Parent(s): 4f80b6c

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,3 +1,65 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ru
3
+ tags:
4
+ - historical-text
5
+ - russian
6
+ - ocr
7
+ - trocr
8
+ license: mit
9
+ metrics:
10
+ - cer
11
+ - wer
12
+ ---
13
+
14
+ # Historical Russian TrOCR Model for Civil Script (ru-trocr-1700s)
15
+
16
+ ## Model Description
17
+ This model is specifically trained to recognize Russian Civil Script (гражданский шрифт) from the 18th century. It handles the following character sets:
18
+ - Historical letters: ѣ, і, ѳ, ѵ, ъ
19
+ - Civil script variations of standard Cyrillic characters
20
+ - Both uppercase and lowercase variants
21
+ - Special typographic features of 18th-century printing
22
+
23
+ ## Model Performance Metrics
24
+ - Character Error Rate (CER): 1.69%
25
+ - Word Error Rate (WER): 5.75%
26
+ - Sequence Accuracy: 80.21%
27
+ - Training Loss: 0.0403
28
+ - Evaluation Loss: 0.0351
29
+
30
+ ## Training Details
31
+ - Base Model: TrOCR
32
+ - Training Duration: ~25.5 hours
33
+ - Epochs: 3
34
+ - Steps: 1227
35
+ - Training Samples per Second: 0.428
36
+ - Special Focus: Civil script character recognition including historical letters and their variants
37
+ - Training Data: 18th-century Russian books from the National Library of Russia
38
+
39
+ ## Historical Context
40
+ The model is trained on texts printed in Civil Script (гражданский шрифт), introduced by Peter the Great's reform in 1708. This script represents a significant transition in Russian typography from Church Slavonic to a more modernized form of writing. The Civil Script remained the standard for Russian publishing houses and typographers until the 1830s, making it the primary typeface for Russian printed books throughout the 18th and early 19th centuries.
41
+
42
+ ![Russian Civil Script Alphabet](XVIII_century_Russian_font.png)
43
+
44
+ ## Limitations and Recommendations
45
+ - Optimized for line-level recognition of historical Russian texts in Civil Script
46
+ - Best performance on well-segmented lines
47
+ - May require pre-processing for damaged or low-quality images
48
+ - Specifically tuned for 18th-century Russian printing conventions
49
+
50
+ ## Usage Example
51
+ ```python
52
+ from transformers import TrOCRProcessor, VisionEncoderDecoderModel
53
+ from PIL import Image
54
+
55
+ processor = TrOCRProcessor.from_pretrained("taiga75/ru-trocr-1700s")
56
+ model = VisionEncoderDecoderModel.from_pretrained("taiga75/ru-trocr-1700s")
57
+
58
+ # Process image
59
+ image = Image.open("path_to_image").convert("RGB")
60
+ pixel_values = processor(image, return_tensors="pt").pixel_values
61
+
62
+ # Generate text
63
+ generated_ids = model.generate(pixel_values)
64
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
65
+ ```
XVIII_century_Russian_font.png ADDED
config.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "model_clean",
3
+ "architectures": [
4
+ "VisionEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "",
8
+ "activation_dropout": 0.0,
9
+ "activation_function": "gelu",
10
+ "add_cross_attention": true,
11
+ "architectures": null,
12
+ "attention_dropout": 0.0,
13
+ "bad_words_ids": null,
14
+ "begin_suppress_tokens": null,
15
+ "bos_token_id": 0,
16
+ "chunk_size_feed_forward": 0,
17
+ "classifier_dropout": 0.0,
18
+ "cross_attention_hidden_size": 768,
19
+ "d_model": 1024,
20
+ "decoder_attention_heads": 16,
21
+ "decoder_ffn_dim": 4096,
22
+ "decoder_layerdrop": 0.0,
23
+ "decoder_layers": 12,
24
+ "decoder_start_token_id": 2,
25
+ "diversity_penalty": 0.0,
26
+ "do_sample": false,
27
+ "dropout": 0.1,
28
+ "early_stopping": false,
29
+ "encoder_no_repeat_ngram_size": 0,
30
+ "eos_token_id": 2,
31
+ "exponential_decay_length_penalty": null,
32
+ "finetuning_task": null,
33
+ "forced_bos_token_id": null,
34
+ "forced_eos_token_id": null,
35
+ "id2label": {
36
+ "0": "LABEL_0",
37
+ "1": "LABEL_1"
38
+ },
39
+ "init_std": 0.02,
40
+ "is_decoder": true,
41
+ "is_encoder_decoder": false,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1
45
+ },
46
+ "layernorm_embedding": true,
47
+ "length_penalty": 1.0,
48
+ "max_length": 64,
49
+ "max_position_embeddings": 512,
50
+ "min_length": 0,
51
+ "model_type": "trocr",
52
+ "no_repeat_ngram_size": 0,
53
+ "num_beam_groups": 1,
54
+ "num_beams": 1,
55
+ "num_return_sequences": 1,
56
+ "output_attentions": false,
57
+ "output_hidden_states": false,
58
+ "output_scores": false,
59
+ "pad_token_id": 1,
60
+ "prefix": null,
61
+ "problem_type": null,
62
+ "pruned_heads": {},
63
+ "remove_invalid_values": false,
64
+ "repetition_penalty": 1.0,
65
+ "return_dict": true,
66
+ "return_dict_in_generate": false,
67
+ "scale_embedding": false,
68
+ "sep_token_id": null,
69
+ "suppress_tokens": null,
70
+ "task_specific_params": null,
71
+ "temperature": 1.0,
72
+ "tf_legacy_loss": false,
73
+ "tie_encoder_decoder": false,
74
+ "tie_word_embeddings": true,
75
+ "tokenizer_class": null,
76
+ "top_k": 50,
77
+ "top_p": 1.0,
78
+ "torch_dtype": null,
79
+ "torchscript": false,
80
+ "typical_p": 1.0,
81
+ "use_bfloat16": false,
82
+ "use_cache": false,
83
+ "use_learned_position_embeddings": true,
84
+ "vocab_size": 50265
85
+ },
86
+ "decoder_start_token_id": 0,
87
+ "encoder": {
88
+ "_name_or_path": "",
89
+ "add_cross_attention": false,
90
+ "architectures": null,
91
+ "attention_probs_dropout_prob": 0.0,
92
+ "bad_words_ids": null,
93
+ "begin_suppress_tokens": null,
94
+ "bos_token_id": null,
95
+ "chunk_size_feed_forward": 0,
96
+ "cross_attention_hidden_size": null,
97
+ "decoder_start_token_id": null,
98
+ "diversity_penalty": 0.0,
99
+ "do_sample": false,
100
+ "early_stopping": false,
101
+ "encoder_no_repeat_ngram_size": 0,
102
+ "encoder_stride": 16,
103
+ "eos_token_id": null,
104
+ "exponential_decay_length_penalty": null,
105
+ "finetuning_task": null,
106
+ "forced_bos_token_id": null,
107
+ "forced_eos_token_id": null,
108
+ "hidden_act": "gelu",
109
+ "hidden_dropout_prob": 0.0,
110
+ "hidden_size": 768,
111
+ "id2label": {
112
+ "0": "LABEL_0",
113
+ "1": "LABEL_1"
114
+ },
115
+ "image_size": 384,
116
+ "initializer_range": 0.02,
117
+ "intermediate_size": 3072,
118
+ "is_decoder": false,
119
+ "is_encoder_decoder": false,
120
+ "label2id": {
121
+ "LABEL_0": 0,
122
+ "LABEL_1": 1
123
+ },
124
+ "layer_norm_eps": 1e-12,
125
+ "length_penalty": 1.0,
126
+ "max_length": 20,
127
+ "min_length": 0,
128
+ "model_type": "vit",
129
+ "no_repeat_ngram_size": 0,
130
+ "num_attention_heads": 12,
131
+ "num_beam_groups": 1,
132
+ "num_beams": 1,
133
+ "num_channels": 3,
134
+ "num_hidden_layers": 12,
135
+ "num_return_sequences": 1,
136
+ "output_attentions": false,
137
+ "output_hidden_states": false,
138
+ "output_scores": false,
139
+ "pad_token_id": null,
140
+ "patch_size": 16,
141
+ "prefix": null,
142
+ "problem_type": null,
143
+ "pruned_heads": {},
144
+ "qkv_bias": false,
145
+ "remove_invalid_values": false,
146
+ "repetition_penalty": 1.0,
147
+ "return_dict": true,
148
+ "return_dict_in_generate": false,
149
+ "sep_token_id": null,
150
+ "suppress_tokens": null,
151
+ "task_specific_params": null,
152
+ "temperature": 1.0,
153
+ "tf_legacy_loss": false,
154
+ "tie_encoder_decoder": false,
155
+ "tie_word_embeddings": true,
156
+ "tokenizer_class": null,
157
+ "top_k": 50,
158
+ "top_p": 1.0,
159
+ "torch_dtype": null,
160
+ "torchscript": false,
161
+ "typical_p": 1.0,
162
+ "use_bfloat16": false
163
+ },
164
+ "eos_token_id": 2,
165
+ "is_encoder_decoder": true,
166
+ "max_length": 64,
167
+ "model_type": "vision-encoder-decoder",
168
+ "pad_token_id": 1,
169
+ "processor_class": "TrOCRProcessor",
170
+ "tie_word_embeddings": false,
171
+ "torch_dtype": "float32",
172
+ "transformers_version": "4.44.1",
173
+ "vocab_size": 50265
174
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "early_stopping": true,
4
+ "eos_token_id": 2,
5
+ "max_length": 64,
6
+ "num_beams": 4,
7
+ "pad_token_id": 1,
8
+ "transformers_version": "4.44.1"
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52176226bbe849befe48a03f5111332fd6d12de50186084c02796d7dbe1bd0bd
3
+ size 1335747032
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "processor_class": "TrOCRProcessor",
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 384,
21
+ "width": 384
22
+ }
23
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 512,
52
+ "pad_token": "<pad>",
53
+ "processor_class": "TrOCRProcessor",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": "<unk>"
58
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff