dianecy commited on
Commit
0b32e3c
·
verified ·
1 Parent(s): 0974c47

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. RIS-DMMI/.gitignore +17 -0
  3. RIS-DMMI/README.md +79 -0
  4. RIS-DMMI/__pycache__/args.cpython-39.pyc +0 -0
  5. RIS-DMMI/__pycache__/transforms.cpython-39.pyc +0 -0
  6. RIS-DMMI/__pycache__/utils.cpython-39.pyc +0 -0
  7. RIS-DMMI/args.py +65 -0
  8. RIS-DMMI/bert/CONTRIBUTING.md +31 -0
  9. RIS-DMMI/bert/LICENSE +202 -0
  10. RIS-DMMI/bert/README.md +1117 -0
  11. RIS-DMMI/bert/__init__.py +15 -0
  12. RIS-DMMI/bert/__pycache__/__init__.cpython-39.pyc +0 -0
  13. RIS-DMMI/bert/__pycache__/activations.cpython-39.pyc +0 -0
  14. RIS-DMMI/bert/__pycache__/configuration_bert.cpython-39.pyc +0 -0
  15. RIS-DMMI/bert/__pycache__/configuration_utils.cpython-39.pyc +0 -0
  16. RIS-DMMI/bert/__pycache__/file_utils.cpython-39.pyc +0 -0
  17. RIS-DMMI/bert/__pycache__/generation_utils.cpython-39.pyc +0 -0
  18. RIS-DMMI/bert/__pycache__/modeling_bert.cpython-39.pyc +0 -0
  19. RIS-DMMI/bert/__pycache__/modeling_utils.cpython-39.pyc +0 -0
  20. RIS-DMMI/bert/__pycache__/tokenization_bert.cpython-39.pyc +0 -0
  21. RIS-DMMI/bert/__pycache__/tokenization_utils.cpython-39.pyc +0 -0
  22. RIS-DMMI/bert/__pycache__/tokenization_utils_base.cpython-39.pyc +0 -0
  23. RIS-DMMI/bert/activations.py +56 -0
  24. RIS-DMMI/bert/bert-base-uncased-vocab.txt +0 -0
  25. RIS-DMMI/bert/configuration_bert.py +144 -0
  26. RIS-DMMI/bert/configuration_utils.py +414 -0
  27. RIS-DMMI/bert/create_pretraining_data.py +469 -0
  28. RIS-DMMI/bert/extract_features.py +419 -0
  29. RIS-DMMI/bert/file_utils.py +816 -0
  30. RIS-DMMI/bert/generation_utils.py +993 -0
  31. RIS-DMMI/bert/modeling.py +986 -0
  32. RIS-DMMI/bert/modeling_bert.py +1569 -0
  33. RIS-DMMI/bert/modeling_test.py +277 -0
  34. RIS-DMMI/bert/modeling_utils.py +1268 -0
  35. RIS-DMMI/bert/multilingual.md +303 -0
  36. RIS-DMMI/bert/optimization.py +174 -0
  37. RIS-DMMI/bert/optimization_test.py +48 -0
  38. RIS-DMMI/bert/predicting_movie_reviews_with_bert_on_tf_hub.ipynb +1231 -0
  39. RIS-DMMI/bert/requirements.txt +2 -0
  40. RIS-DMMI/bert/run_classifier.py +981 -0
  41. RIS-DMMI/bert/run_classifier_with_tfhub.py +314 -0
  42. RIS-DMMI/bert/run_pretraining.py +493 -0
  43. RIS-DMMI/bert/run_squad.py +1283 -0
  44. RIS-DMMI/bert/sample_text.txt +33 -0
  45. RIS-DMMI/bert/tokenization.py +399 -0
  46. RIS-DMMI/bert/tokenization_bert.py +546 -0
  47. RIS-DMMI/bert/tokenization_test.py +137 -0
  48. RIS-DMMI/bert/tokenization_utils.py +723 -0
  49. RIS-DMMI/bert/tokenization_utils_base.py +0 -0
  50. RIS-DMMI/bert/vocab.txt +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RIS-DMMI/refer/evaluation/tokenizer/stanford-corenlp-3.4.1.jar filter=lfs diff=lfs merge=lfs -text
RIS-DMMI/.gitignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .vscode
2
+ .idea
3
+ lib/__pycache__/*
4
+ *.pkl
5
+ *.json
6
+ models/*
7
+ bert/pytorch_model.bin
8
+ *.jpg
9
+ *.pth
10
+ *.png
11
+ vis*
12
+ *.zip
13
+ *.csv
14
+ *.pyc
15
+ *.so
16
+ *.json
17
+ *.p
RIS-DMMI/README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RIS-DMMI
2
+ This repository provides the PyTorch implementation of DMMI in the following papers:<br />
3
+ __Beyond One-to-One: Rethinking the Referring Image Segmentation (ICCV2023)__ <br />
4
+
5
+ # News
6
+ * 2023.10.03-The final version of our dataset has been released. Please remember to download the latest version.
7
+ * 2023.10.03-We release our code.
8
+
9
+ # Dataset
10
+ We collect a new comprehensive dataset Ref-ZOM (**Z**ero/**O**ne/**M**any), which contains image-text pairs in one-to-zero, one-to-one and one-to-many conditions. Similar to RefCOCO, RefCOCO+ and G-Ref, all the images in Ref-ZOM are selected from COCO dataset. Here, we provide the text, image and annotation information of Ref-ZOM, which should be utilized with COCO_trainval2014 together. <br />
11
+ Our dataset could be downloaded from:<br />
12
+ [[Baidu Cloud](https://pan.baidu.com/s/1CxPYGWEadHhcViTH2iI7jw?pwd=g7uu)] [[Google Drive](https://drive.google.com/drive/folders/1FaH6U5pywSf0Ufnn4lYIVaykYxqU2vrA?usp=sharing)] <br />
13
+ Remember to download original COCO dataset from:<br />
14
+ [[COCO Dowanload](https://cocodataset.org/#download)]<br />
15
+
16
+ # Code
17
+
18
+ **Prepare**<br />
19
+ * Download the COCO_train2014 and COCO_val2014, and merge the two dataset as a new folder “trainval2014”. Then, in the Line-52 in `/refer/refer.py`, give the path of this folder to `self.Image_DIR`<br />
20
+ * Download and rename the "Ref-ZOM(final).p" as "refs(final).p". Then put refs(final).p and instances.json into `/refer/data/ref-zom/*`. <br />
21
+ * Prepare the Bert similar to [LAVT](https://github.com/yz93/LAVT-RIS)
22
+ * Prepare the Refcoco, Refcoco+ and Refcocog similar to [LAVT](https://github.com/yz93/LAVT-RIS)
23
+
24
+ **Train**<br />
25
+ * Remember to change `--output_dir` and `--pretrained_backbone` as your path.<br />
26
+ * Utilize `--model` to select the backbone. 'dmmi-swin' for Swin-Base and 'dmmi_res' for resnet-50.<br />
27
+ * Utilize `--dataset`, `--splitBy` and `--split` to select the dataset as follwos:<br />
28
+ ```
29
+ # Refcoco
30
+ --dataset refcoco, --splitBy unc, --split val
31
+ # Refcoco+
32
+ --dataset refcoco+, --splitBy unc, --split val
33
+ # Refcocog(umd)
34
+ --dataset refcocog, --splitBy umd, --split val
35
+ # Refcocog(google)
36
+ --dataset refcocog, --splitBy google, --split val
37
+ # Ref-zom
38
+ --dataset ref-zom, --splitBy final, --split test
39
+ ```
40
+ * Begin training!!<br />
41
+ ```
42
+ sh train.sh
43
+ ```
44
+
45
+ **Test**
46
+ * Remember to change `--test_parameter` as your path. Meanwhile, set the `--model`, `--dataset`, `--splitBy` and `--split` properly. <br />
47
+ * Begin test!!<br />
48
+ ```
49
+ sh test.sh
50
+ ```
51
+
52
+ # Parameter
53
+ **Refcocog(umd)**<br />
54
+ | Backbone | oIoU | mIoU | Google Drive |Baidu Cloud |
55
+ | ------------- | ------------- | ------------- | ------------- | ------------- |
56
+ | ResNet-101 | 59.02 | 62.59 | [Link](https://drive.google.com/file/d/1ziDIeioglD08QQyL-_yGFFlao3PtcJJS/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1uKJ-Wu5TtJhphXNOXo3mIA?pwd=6cgb) |
57
+ | Swin-Base | 63.46 | 66.48 | [Link](https://drive.google.com/file/d/1uuGWSYLGYa_qMxTlnZxH6p9FMxQLOQfZ/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1eAT0NgkID4qXpoXMf2bjEg?pwd=bq7w) |
58
+
59
+ **Ref-ZOM**<br />
60
+ | Backbone | oIoU | mIoU | Google Drive |Baidu Cloud |
61
+ | ------------- | ------------- | ------------- | ------------- | ------------- |
62
+ | Swin-Base | 68.77 | 68.25 | [Link](https://drive.google.com/file/d/1Ut_E-Fru0bCmjtaC2YhgOLZ7eJorOOpi/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1T-u55rpbc4_CNEXmsA-OJg?pwd=hc6e) |
63
+
64
+ # Acknowledgements
65
+
66
+ We strongly appreciate the wonderful work of [LAVT](https://github.com/yz93/LAVT-RIS). Our code is partially founded on this code-base. If you think our work is helpful, we suggest you refer to [LAVT](https://github.com/yz93/LAVT-RIS) and cite it as well.<br />
67
+
68
+ # Citation
69
+ If you find our work is helpful and want to cite our work, please use the following citation info.<br />
70
+ ```
71
+ @InProceedings{Hu_2023_ICCV,
72
+ author = {Hu, Yutao and Wang, Qixiong and Shao, Wenqi and Xie, Enze and Li, Zhenguo and Han, Jungong and Luo, Ping},
73
+ title = {Beyond One-to-One: Rethinking the Referring Image Segmentation},
74
+ booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
75
+ month = {October},
76
+ year = {2023},
77
+ pages = {4067-4077}
78
+ }
79
+
RIS-DMMI/__pycache__/args.cpython-39.pyc ADDED
Binary file (3.25 kB). View file
 
RIS-DMMI/__pycache__/transforms.cpython-39.pyc ADDED
Binary file (5.48 kB). View file
 
RIS-DMMI/__pycache__/utils.cpython-39.pyc ADDED
Binary file (7.13 kB). View file
 
RIS-DMMI/args.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+
4
+
5
+ def get_parser():
6
+ parser = argparse.ArgumentParser(description='dmmi training and testing')
7
+ parser.add_argument('--amsgrad', action='store_true',
8
+ help='if true, set amsgrad to True in an Adam or AdamW optimizer.')
9
+ parser.add_argument('-b', '--batch-size', default=8, type=int)
10
+ parser.add_argument('--bert_tokenizer', default='bert-base-uncased', help='BERT tokenizer')
11
+ parser.add_argument('--ck_bert', default='bert-base-uncased', help='pre-trained BERT weights')
12
+ parser.add_argument('--dataset', default='refcoco', help='refcoco, refcoco+, or refcocog')
13
+ parser.add_argument('--ddp_trained_weights', action='store_true',
14
+ help='Only needs specified when testing,'
15
+ 'whether the weights to be loaded are from a DDP-trained model')
16
+ parser.add_argument('--device', default='cuda:0', help='device') # only used when testing on a single machine
17
+ parser.add_argument('--epochs', default=40, type=int, metavar='N', help='number of total epochs to run')
18
+ parser.add_argument('--fusion_drop', default=0.0, type=float, help='dropout rate for PWAMs')
19
+ parser.add_argument('--img_size', default=480, type=int, help='input image size')
20
+ # parser.add_argument("--local_rank", default=int(os.getenv("LOCAL_RANK", 0)), type=int, help='local rank for DistributedDataParallel')
21
+ parser.add_argument('--lr', default=0.00005, type=float, help='the initial learning rate')
22
+ parser.add_argument('--mha', default='', help='If specified, should be in the format of a-b-c-d, e.g., 4-4-4-4,'
23
+ 'where a, b, c, and d refer to the numbers of heads in stage-1,'
24
+ 'stage-2, stage-3, and stage-4 PWAMs')
25
+ parser.add_argument('--model', default='dmmi', help='model: dmmi')
26
+ parser.add_argument('--model_id', default='dmmi', help='name to identify the model')
27
+ parser.add_argument('--output_dir', default='./', help='path to save the paramters')
28
+ parser.add_argument('--pin_mem', action='store_true',
29
+ help='If true, pin memory when using the data loader.')
30
+ parser.add_argument('--pretrained_backbone', default='',
31
+ help='path to pre-trained Swin backbone weights')
32
+ parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
33
+ parser.add_argument('--refer_data_root', default='/data2/dataset/COCO2014/', help='REFER dataset root directory')
34
+ parser.add_argument('--resume', default='', help='resume from checkpoint')
35
+ parser.add_argument('--test_parameter', default='', help='test from this parameter')
36
+ parser.add_argument('--split', default='val', help='only used when testing')
37
+ parser.add_argument('--splitBy', default='unc', help='change to umd or google when the dataset is G-Ref (RefCOCOg)')
38
+ parser.add_argument('--swin_type', default='base',
39
+ help='tiny, small, base, or large variants of the Swin Transformer')
40
+ parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float, metavar='W', help='weight decay',
41
+ dest='weight_decay')
42
+ parser.add_argument('--window12', action='store_true',
43
+ help='only needs specified when testing,'
44
+ 'when training, window size is inferred from pre-trained weights file name'
45
+ '(containing \'window12\'). Initialize Swin with window size 12 instead of the default 7.')
46
+ parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers')
47
+
48
+
49
+ # metric loss related ones
50
+ parser.add_argument('--metric_learning', action='store_true',help='whether to use metric learning')
51
+ parser.add_argument('--metric_loss_weight', default=0.1, type=float, help='weight for metric loss')
52
+ parser.add_argument('--metric_mode', default='hardpos_rev3', help='test options..')
53
+ parser.add_argument('--exclude_multiobj', action='store_true', help='exclude multi-object images')
54
+ parser.add_argument('--hn_prob', default=0.0, type=float, help='hard negative probability')
55
+ parser.add_argument('--hp_selection', default='strict', help='test options..')
56
+ parser.add_argument('--margin_value', default=10, type=float, help='weight for metric loss')
57
+ parser.add_argument('--temperature', default=0.05, type=float, help='test options..')
58
+ # parser.add_argument('--addzero', action='store_true', help='test options..')
59
+
60
+ return parser
61
+
62
+
63
+ if __name__ == "__main__":
64
+ parser = get_parser()
65
+ args_dict = parser.parse_args()
RIS-DMMI/bert/CONTRIBUTING.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to Contribute
2
+
3
+ BERT needs to maintain permanent compatibility with the pre-trained model files,
4
+ so we do not plan to make any major changes to this library (other than what was
5
+ promised in the README). However, we can accept small patches related to
6
+ re-factoring and documentation. To submit contributes, there are just a few
7
+ small guidelines you need to follow.
8
+
9
+ ## Contributor License Agreement
10
+
11
+ Contributions to this project must be accompanied by a Contributor License
12
+ Agreement. You (or your employer) retain the copyright to your contribution;
13
+ this simply gives us permission to use and redistribute your contributions as
14
+ part of the project. Head over to <https://cla.developers.google.com/> to see
15
+ your current agreements on file or to sign a new one.
16
+
17
+ You generally only need to submit a CLA once, so if you've already submitted one
18
+ (even if it was for a different project), you probably don't need to do it
19
+ again.
20
+
21
+ ## Code reviews
22
+
23
+ All submissions, including submissions by project members, require review. We
24
+ use GitHub pull requests for this purpose. Consult
25
+ [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
26
+ information on using pull requests.
27
+
28
+ ## Community Guidelines
29
+
30
+ This project follows
31
+ [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
RIS-DMMI/bert/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
RIS-DMMI/bert/README.md ADDED
@@ -0,0 +1,1117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BERT
2
+
3
+ **\*\*\*\*\* New March 11th, 2020: Smaller BERT Models \*\*\*\*\***
4
+
5
+ This is a release of 24 smaller BERT models (English only, uncased, trained with WordPiece masking) referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962).
6
+
7
+ We have shown that the standard BERT recipe (including model architecture and training objective) is effective on a wide range of model sizes, beyond BERT-Base and BERT-Large. The smaller BERT models are intended for environments with restricted computational resources. They can be fine-tuned in the same manner as the original BERT models. However, they are most effective in the context of knowledge distillation, where the fine-tuning labels are produced by a larger and more accurate teacher.
8
+
9
+ Our goal is to enable research in institutions with fewer computational resources and encourage the community to seek directions of innovation alternative to increasing model capacity.
10
+
11
+ You can download all 24 from [here][all], or individually from the table below:
12
+
13
+ | |H=128|H=256|H=512|H=768|
14
+ |---|:---:|:---:|:---:|:---:|
15
+ | **L=2** |[**2/128 (BERT-Tiny)**][2_128]|[2/256][2_256]|[2/512][2_512]|[2/768][2_768]|
16
+ | **L=4** |[4/128][4_128]|[**4/256 (BERT-Mini)**][4_256]|[**4/512 (BERT-Small)**][4_512]|[4/768][4_768]|
17
+ | **L=6** |[6/128][6_128]|[6/256][6_256]|[6/512][6_512]|[6/768][6_768]|
18
+ | **L=8** |[8/128][8_128]|[8/256][8_256]|[**8/512 (BERT-Medium)**][8_512]|[8/768][8_768]|
19
+ | **L=10** |[10/128][10_128]|[10/256][10_256]|[10/512][10_512]|[10/768][10_768]|
20
+ | **L=12** |[12/128][12_128]|[12/256][12_256]|[12/512][12_512]|[**12/768 (BERT-Base)**][12_768]|
21
+
22
+ Note that the BERT-Base model in this release is included for completeness only; it was re-trained under the same regime as the original model.
23
+
24
+ Here are the corresponding GLUE scores on the test set:
25
+
26
+ |Model|Score|CoLA|SST-2|MRPC|STS-B|QQP|MNLI-m|MNLI-mm|QNLI(v2)|RTE|WNLI|AX|
27
+ |---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
28
+ |BERT-Tiny|64.2|0.0|83.2|81.1/71.1|74.3/73.6|62.2/83.4|70.2|70.3|81.5|57.2|62.3|21.0|
29
+ |BERT-Mini|65.8|0.0|85.9|81.1/71.8|75.4/73.3|66.4/86.2|74.8|74.3|84.1|57.9|62.3|26.1|
30
+ |BERT-Small|71.2|27.8|89.7|83.4/76.2|78.8/77.0|68.1/87.0|77.6|77.0|86.4|61.8|62.3|28.6|
31
+ |BERT-Medium|73.5|38.0|89.6|86.6/81.6|80.4/78.4|69.6/87.9|80.0|79.1|87.7|62.2|62.3|30.5|
32
+
33
+ For each task, we selected the best fine-tuning hyperparameters from the lists below, and trained for 4 epochs:
34
+ - batch sizes: 8, 16, 32, 64, 128
35
+ - learning rates: 3e-4, 1e-4, 5e-5, 3e-5
36
+
37
+ If you use these models, please cite the following paper:
38
+
39
+ ```
40
+ @article{turc2019,
41
+ title={Well-Read Students Learn Better: On the Importance of Pre-training Compact Models},
42
+ author={Turc, Iulia and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
43
+ journal={arXiv preprint arXiv:1908.08962v2 },
44
+ year={2019}
45
+ }
46
+ ```
47
+
48
+ [2_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-128_A-2.zip
49
+ [2_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-256_A-4.zip
50
+ [2_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-512_A-8.zip
51
+ [2_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-2_H-768_A-12.zip
52
+ [4_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-128_A-2.zip
53
+ [4_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-256_A-4.zip
54
+ [4_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-512_A-8.zip
55
+ [4_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-4_H-768_A-12.zip
56
+ [6_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-128_A-2.zip
57
+ [6_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-256_A-4.zip
58
+ [6_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-512_A-8.zip
59
+ [6_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-6_H-768_A-12.zip
60
+ [8_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-128_A-2.zip
61
+ [8_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-256_A-4.zip
62
+ [8_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-512_A-8.zip
63
+ [8_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-8_H-768_A-12.zip
64
+ [10_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-128_A-2.zip
65
+ [10_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-256_A-4.zip
66
+ [10_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-512_A-8.zip
67
+ [10_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-10_H-768_A-12.zip
68
+ [12_128]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-128_A-2.zip
69
+ [12_256]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-256_A-4.zip
70
+ [12_512]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-512_A-8.zip
71
+ [12_768]: https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-768_A-12.zip
72
+ [all]: https://storage.googleapis.com/bert_models/2020_02_20/all_bert_models.zip
73
+
74
+ **\*\*\*\*\* New May 31st, 2019: Whole Word Masking Models \*\*\*\*\***
75
+
76
+ This is a release of several new models which were the result of an improvement
77
+ the pre-processing code.
78
+
79
+ In the original pre-processing code, we randomly select WordPiece tokens to
80
+ mask. For example:
81
+
82
+ `Input Text: the man jumped up , put his basket on phil ##am ##mon ' s head`
83
+ `Original Masked Input: [MASK] man [MASK] up , put his [MASK] on phil
84
+ [MASK] ##mon ' s head`
85
+
86
+ The new technique is called Whole Word Masking. In this case, we always mask
87
+ *all* of the the tokens corresponding to a word at once. The overall masking
88
+ rate remains the same.
89
+
90
+ `Whole Word Masked Input: the man [MASK] up , put his basket on [MASK] [MASK]
91
+ [MASK] ' s head`
92
+
93
+ The training is identical -- we still predict each masked WordPiece token
94
+ independently. The improvement comes from the fact that the original prediction
95
+ task was too 'easy' for words that had been split into multiple WordPieces.
96
+
97
+ This can be enabled during data generation by passing the flag
98
+ `--do_whole_word_mask=True` to `create_pretraining_data.py`.
99
+
100
+ Pre-trained models with Whole Word Masking are linked below. The data and
101
+ training were otherwise identical, and the models have identical structure and
102
+ vocab to the original models. We only include BERT-Large models. When using
103
+ these models, please make it clear in the paper that you are using the Whole
104
+ Word Masking variant of BERT-Large.
105
+
106
+ * **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip)**:
107
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
108
+
109
+ * **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_cased_L-24_H-1024_A-16.zip)**:
110
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
111
+
112
+ Model | SQUAD 1.1 F1/EM | Multi NLI Accuracy
113
+ ---------------------------------------- | :-------------: | :----------------:
114
+ BERT-Large, Uncased (Original) | 91.0/84.3 | 86.05
115
+ BERT-Large, Uncased (Whole Word Masking) | 92.8/86.7 | 87.07
116
+ BERT-Large, Cased (Original) | 91.5/84.8 | 86.09
117
+ BERT-Large, Cased (Whole Word Masking) | 92.9/86.7 | 86.46
118
+
119
+ **\*\*\*\*\* New February 7th, 2019: TfHub Module \*\*\*\*\***
120
+
121
+ BERT has been uploaded to [TensorFlow Hub](https://tfhub.dev). See
122
+ `run_classifier_with_tfhub.py` for an example of how to use the TF Hub module,
123
+ or run an example in the browser on
124
+ [Colab](https://colab.sandbox.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb).
125
+
126
+ **\*\*\*\*\* New November 23rd, 2018: Un-normalized multilingual model + Thai +
127
+ Mongolian \*\*\*\*\***
128
+
129
+ We uploaded a new multilingual model which does *not* perform any normalization
130
+ on the input (no lower casing, accent stripping, or Unicode normalization), and
131
+ additionally inclues Thai and Mongolian.
132
+
133
+ **It is recommended to use this version for developing multilingual models,
134
+ especially on languages with non-Latin alphabets.**
135
+
136
+ This does not require any code changes, and can be downloaded here:
137
+
138
+ * **[`BERT-Base, Multilingual Cased`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**:
139
+ 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
140
+
141
+ **\*\*\*\*\* New November 15th, 2018: SOTA SQuAD 2.0 System \*\*\*\*\***
142
+
143
+ We released code changes to reproduce our 83% F1 SQuAD 2.0 system, which is
144
+ currently 1st place on the leaderboard by 3%. See the SQuAD 2.0 section of the
145
+ README for details.
146
+
147
+ **\*\*\*\*\* New November 5th, 2018: Third-party PyTorch and Chainer versions of
148
+ BERT available \*\*\*\*\***
149
+
150
+ NLP researchers from HuggingFace made a
151
+ [PyTorch version of BERT available](https://github.com/huggingface/pytorch-pretrained-BERT)
152
+ which is compatible with our pre-trained checkpoints and is able to reproduce
153
+ our results. Sosuke Kobayashi also made a
154
+ [Chainer version of BERT available](https://github.com/soskek/bert-chainer)
155
+ (Thanks!) We were not involved in the creation or maintenance of the PyTorch
156
+ implementation so please direct any questions towards the authors of that
157
+ repository.
158
+
159
+ **\*\*\*\*\* New November 3rd, 2018: Multilingual and Chinese models available
160
+ \*\*\*\*\***
161
+
162
+ We have made two new BERT models available:
163
+
164
+ * **[`BERT-Base, Multilingual`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)
165
+ (Not recommended, use `Multilingual Cased` instead)**: 102 languages,
166
+ 12-layer, 768-hidden, 12-heads, 110M parameters
167
+ * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**:
168
+ Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M
169
+ parameters
170
+
171
+ We use character-based tokenization for Chinese, and WordPiece tokenization for
172
+ all other languages. Both models should work out-of-the-box without any code
173
+ changes. We did update the implementation of `BasicTokenizer` in
174
+ `tokenization.py` to support Chinese character tokenization, so please update if
175
+ you forked it. However, we did not change the tokenization API.
176
+
177
+ For more, see the
178
+ [Multilingual README](https://github.com/google-research/bert/blob/master/multilingual.md).
179
+
180
+ **\*\*\*\*\* End new information \*\*\*\*\***
181
+
182
+ ## Introduction
183
+
184
+ **BERT**, or **B**idirectional **E**ncoder **R**epresentations from
185
+ **T**ransformers, is a new method of pre-training language representations which
186
+ obtains state-of-the-art results on a wide array of Natural Language Processing
187
+ (NLP) tasks.
188
+
189
+ Our academic paper which describes BERT in detail and provides full results on a
190
+ number of tasks can be found here:
191
+ [https://arxiv.org/abs/1810.04805](https://arxiv.org/abs/1810.04805).
192
+
193
+ To give a few numbers, here are the results on the
194
+ [SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/) question answering
195
+ task:
196
+
197
+ SQuAD v1.1 Leaderboard (Oct 8th 2018) | Test EM | Test F1
198
+ ------------------------------------- | :------: | :------:
199
+ 1st Place Ensemble - BERT | **87.4** | **93.2**
200
+ 2nd Place Ensemble - nlnet | 86.0 | 91.7
201
+ 1st Place Single Model - BERT | **85.1** | **91.8**
202
+ 2nd Place Single Model - nlnet | 83.5 | 90.1
203
+
204
+ And several natural language inference tasks:
205
+
206
+ System | MultiNLI | Question NLI | SWAG
207
+ ----------------------- | :------: | :----------: | :------:
208
+ BERT | **86.7** | **91.1** | **86.3**
209
+ OpenAI GPT (Prev. SOTA) | 82.2 | 88.1 | 75.0
210
+
211
+ Plus many other tasks.
212
+
213
+ Moreover, these results were all obtained with almost no task-specific neural
214
+ network architecture design.
215
+
216
+ If you already know what BERT is and you just want to get started, you can
217
+ [download the pre-trained models](#pre-trained-models) and
218
+ [run a state-of-the-art fine-tuning](#fine-tuning-with-bert) in only a few
219
+ minutes.
220
+
221
+ ## What is BERT?
222
+
223
+ BERT is a method of pre-training language representations, meaning that we train
224
+ a general-purpose "language understanding" model on a large text corpus (like
225
+ Wikipedia), and then use that model for downstream NLP tasks that we care about
226
+ (like question answering). BERT outperforms previous methods because it is the
227
+ first *unsupervised*, *deeply bidirectional* system for pre-training NLP.
228
+
229
+ *Unsupervised* means that BERT was trained using only a plain text corpus, which
230
+ is important because an enormous amount of plain text data is publicly available
231
+ on the web in many languages.
232
+
233
+ Pre-trained representations can also either be *context-free* or *contextual*,
234
+ and contextual representations can further be *unidirectional* or
235
+ *bidirectional*. Context-free models such as
236
+ [word2vec](https://www.tensorflow.org/tutorials/representation/word2vec) or
237
+ [GloVe](https://nlp.stanford.edu/projects/glove/) generate a single "word
238
+ embedding" representation for each word in the vocabulary, so `bank` would have
239
+ the same representation in `bank deposit` and `river bank`. Contextual models
240
+ instead generate a representation of each word that is based on the other words
241
+ in the sentence.
242
+
243
+ BERT was built upon recent work in pre-training contextual representations —
244
+ including [Semi-supervised Sequence Learning](https://arxiv.org/abs/1511.01432),
245
+ [Generative Pre-Training](https://blog.openai.com/language-unsupervised/),
246
+ [ELMo](https://allennlp.org/elmo), and
247
+ [ULMFit](http://nlp.fast.ai/classification/2018/05/15/introducting-ulmfit.html)
248
+ — but crucially these models are all *unidirectional* or *shallowly
249
+ bidirectional*. This means that each word is only contextualized using the words
250
+ to its left (or right). For example, in the sentence `I made a bank deposit` the
251
+ unidirectional representation of `bank` is only based on `I made a` but not
252
+ `deposit`. Some previous work does combine the representations from separate
253
+ left-context and right-context models, but only in a "shallow" manner. BERT
254
+ represents "bank" using both its left and right context — `I made a ... deposit`
255
+ — starting from the very bottom of a deep neural network, so it is *deeply
256
+ bidirectional*.
257
+
258
+ BERT uses a simple approach for this: We mask out 15% of the words in the input,
259
+ run the entire sequence through a deep bidirectional
260
+ [Transformer](https://arxiv.org/abs/1706.03762) encoder, and then predict only
261
+ the masked words. For example:
262
+
263
+ ```
264
+ Input: the man went to the [MASK1] . he bought a [MASK2] of milk.
265
+ Labels: [MASK1] = store; [MASK2] = gallon
266
+ ```
267
+
268
+ In order to learn relationships between sentences, we also train on a simple
269
+ task which can be generated from any monolingual corpus: Given two sentences `A`
270
+ and `B`, is `B` the actual next sentence that comes after `A`, or just a random
271
+ sentence from the corpus?
272
+
273
+ ```
274
+ Sentence A: the man went to the store .
275
+ Sentence B: he bought a gallon of milk .
276
+ Label: IsNextSentence
277
+ ```
278
+
279
+ ```
280
+ Sentence A: the man went to the store .
281
+ Sentence B: penguins are flightless .
282
+ Label: NotNextSentence
283
+ ```
284
+
285
+ We then train a large model (12-layer to 24-layer Transformer) on a large corpus
286
+ (Wikipedia + [BookCorpus](http://yknzhu.wixsite.com/mbweb)) for a long time (1M
287
+ update steps), and that's BERT.
288
+
289
+ Using BERT has two stages: *Pre-training* and *fine-tuning*.
290
+
291
+ **Pre-training** is fairly expensive (four days on 4 to 16 Cloud TPUs), but is a
292
+ one-time procedure for each language (current models are English-only, but
293
+ multilingual models will be released in the near future). We are releasing a
294
+ number of pre-trained models from the paper which were pre-trained at Google.
295
+ Most NLP researchers will never need to pre-train their own model from scratch.
296
+
297
+ **Fine-tuning** is inexpensive. All of the results in the paper can be
298
+ replicated in at most 1 hour on a single Cloud TPU, or a few hours on a GPU,
299
+ starting from the exact same pre-trained model. SQuAD, for example, can be
300
+ trained in around 30 minutes on a single Cloud TPU to achieve a Dev F1 score of
301
+ 91.0%, which is the single system state-of-the-art.
302
+
303
+ The other important aspect of BERT is that it can be adapted to many types of
304
+ NLP tasks very easily. In the paper, we demonstrate state-of-the-art results on
305
+ sentence-level (e.g., SST-2), sentence-pair-level (e.g., MultiNLI), word-level
306
+ (e.g., NER), and span-level (e.g., SQuAD) tasks with almost no task-specific
307
+ modifications.
308
+
309
+ ## What has been released in this repository?
310
+
311
+ We are releasing the following:
312
+
313
+ * TensorFlow code for the BERT model architecture (which is mostly a standard
314
+ [Transformer](https://arxiv.org/abs/1706.03762) architecture).
315
+ * Pre-trained checkpoints for both the lowercase and cased version of
316
+ `BERT-Base` and `BERT-Large` from the paper.
317
+ * TensorFlow code for push-button replication of the most important
318
+ fine-tuning experiments from the paper, including SQuAD, MultiNLI, and MRPC.
319
+
320
+ All of the code in this repository works out-of-the-box with CPU, GPU, and Cloud
321
+ TPU.
322
+
323
+ ## Pre-trained models
324
+
325
+ We are releasing the `BERT-Base` and `BERT-Large` models from the paper.
326
+ `Uncased` means that the text has been lowercased before WordPiece tokenization,
327
+ e.g., `John Smith` becomes `john smith`. The `Uncased` model also strips out any
328
+ accent markers. `Cased` means that the true case and accent markers are
329
+ preserved. Typically, the `Uncased` model is better unless you know that case
330
+ information is important for your task (e.g., Named Entity Recognition or
331
+ Part-of-Speech tagging).
332
+
333
+ These models are all released under the same license as the source code (Apache
334
+ 2.0).
335
+
336
+ For information about the Multilingual and Chinese model, see the
337
+ [Multilingual README](https://github.com/google-research/bert/blob/master/multilingual.md).
338
+
339
+ **When using a cased model, make sure to pass `--do_lower=False` to the training
340
+ scripts. (Or pass `do_lower_case=False` directly to `FullTokenizer` if you're
341
+ using your own script.)**
342
+
343
+ The links to the models are here (right-click, 'Save link as...' on the name):
344
+
345
+ * **[`BERT-Large, Uncased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip)**:
346
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
347
+ * **[`BERT-Large, Cased (Whole Word Masking)`](https://storage.googleapis.com/bert_models/2019_05_30/wwm_cased_L-24_H-1024_A-16.zip)**:
348
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
349
+ * **[`BERT-Base, Uncased`](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip)**:
350
+ 12-layer, 768-hidden, 12-heads, 110M parameters
351
+ * **[`BERT-Large, Uncased`](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip)**:
352
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
353
+ * **[`BERT-Base, Cased`](https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip)**:
354
+ 12-layer, 768-hidden, 12-heads , 110M parameters
355
+ * **[`BERT-Large, Cased`](https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip)**:
356
+ 24-layer, 1024-hidden, 16-heads, 340M parameters
357
+ * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**:
358
+ 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
359
+ * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)
360
+ (Not recommended, use `Multilingual Cased` instead)**: 102 languages,
361
+ 12-layer, 768-hidden, 12-heads, 110M parameters
362
+ * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**:
363
+ Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M
364
+ parameters
365
+
366
+ Each .zip file contains three items:
367
+
368
+ * A TensorFlow checkpoint (`bert_model.ckpt`) containing the pre-trained
369
+ weights (which is actually 3 files).
370
+ * A vocab file (`vocab.txt`) to map WordPiece to word id.
371
+ * A config file (`bert_config.json`) which specifies the hyperparameters of
372
+ the model.
373
+
374
+ ## Fine-tuning with BERT
375
+
376
+ **Important**: All results on the paper were fine-tuned on a single Cloud TPU,
377
+ which has 64GB of RAM. It is currently not possible to re-produce most of the
378
+ `BERT-Large` results on the paper using a GPU with 12GB - 16GB of RAM, because
379
+ the maximum batch size that can fit in memory is too small. We are working on
380
+ adding code to this repository which allows for much larger effective batch size
381
+ on the GPU. See the section on [out-of-memory issues](#out-of-memory-issues) for
382
+ more details.
383
+
384
+ This code was tested with TensorFlow 1.11.0. It was tested with Python2 and
385
+ Python3 (but more thoroughly with Python2, since this is what's used internally
386
+ in Google).
387
+
388
+ The fine-tuning examples which use `BERT-Base` should be able to run on a GPU
389
+ that has at least 12GB of RAM using the hyperparameters given.
390
+
391
+ ### Fine-tuning with Cloud TPUs
392
+
393
+ Most of the examples below assumes that you will be running training/evaluation
394
+ on your local machine, using a GPU like a Titan X or GTX 1080.
395
+
396
+ However, if you have access to a Cloud TPU that you want to train on, just add
397
+ the following flags to `run_classifier.py` or `run_squad.py`:
398
+
399
+ ```
400
+ --use_tpu=True \
401
+ --tpu_name=$TPU_NAME
402
+ ```
403
+
404
+ Please see the
405
+ [Google Cloud TPU tutorial](https://cloud.google.com/tpu/docs/tutorials/mnist)
406
+ for how to use Cloud TPUs. Alternatively, you can use the Google Colab notebook
407
+ "[BERT FineTuning with Cloud TPUs](https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)".
408
+
409
+ On Cloud TPUs, the pretrained model and the output directory will need to be on
410
+ Google Cloud Storage. For example, if you have a bucket named `some_bucket`, you
411
+ might use the following flags instead:
412
+
413
+ ```
414
+ --output_dir=gs://some_bucket/my_output_dir/
415
+ ```
416
+
417
+ The unzipped pre-trained model files can also be found in the Google Cloud
418
+ Storage folder `gs://bert_models/2018_10_18`. For example:
419
+
420
+ ```
421
+ export BERT_BASE_DIR=gs://bert_models/2018_10_18/uncased_L-12_H-768_A-12
422
+ ```
423
+
424
+ ### Sentence (and sentence-pair) classification tasks
425
+
426
+ Before running this example you must download the
427
+ [GLUE data](https://gluebenchmark.com/tasks) by running
428
+ [this script](https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e)
429
+ and unpack it to some directory `$GLUE_DIR`. Next, download the `BERT-Base`
430
+ checkpoint and unzip it to some directory `$BERT_BASE_DIR`.
431
+
432
+ This example code fine-tunes `BERT-Base` on the Microsoft Research Paraphrase
433
+ Corpus (MRPC) corpus, which only contains 3,600 examples and can fine-tune in a
434
+ few minutes on most GPUs.
435
+
436
+ ```shell
437
+ export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
438
+ export GLUE_DIR=/path/to/glue
439
+
440
+ python run_classifier.py \
441
+ --task_name=MRPC \
442
+ --do_train=true \
443
+ --do_eval=true \
444
+ --data_dir=$GLUE_DIR/MRPC \
445
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
446
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
447
+ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
448
+ --max_seq_length=128 \
449
+ --train_batch_size=32 \
450
+ --learning_rate=2e-5 \
451
+ --num_train_epochs=3.0 \
452
+ --output_dir=/tmp/mrpc_output/
453
+ ```
454
+
455
+ You should see output like this:
456
+
457
+ ```
458
+ ***** Eval results *****
459
+ eval_accuracy = 0.845588
460
+ eval_loss = 0.505248
461
+ global_step = 343
462
+ loss = 0.505248
463
+ ```
464
+
465
+ This means that the Dev set accuracy was 84.55%. Small sets like MRPC have a
466
+ high variance in the Dev set accuracy, even when starting from the same
467
+ pre-training checkpoint. If you re-run multiple times (making sure to point to
468
+ different `output_dir`), you should see results between 84% and 88%.
469
+
470
+ A few other pre-trained models are implemented off-the-shelf in
471
+ `run_classifier.py`, so it should be straightforward to follow those examples to
472
+ use BERT for any single-sentence or sentence-pair classification task.
473
+
474
+ Note: You might see a message `Running train on CPU`. This really just means
475
+ that it's running on something other than a Cloud TPU, which includes a GPU.
476
+
477
+ #### Prediction from classifier
478
+
479
+ Once you have trained your classifier you can use it in inference mode by using
480
+ the --do_predict=true command. You need to have a file named test.tsv in the
481
+ input folder. Output will be created in file called test_results.tsv in the
482
+ output folder. Each line will contain output for each sample, columns are the
483
+ class probabilities.
484
+
485
+ ```shell
486
+ export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
487
+ export GLUE_DIR=/path/to/glue
488
+ export TRAINED_CLASSIFIER=/path/to/fine/tuned/classifier
489
+
490
+ python run_classifier.py \
491
+ --task_name=MRPC \
492
+ --do_predict=true \
493
+ --data_dir=$GLUE_DIR/MRPC \
494
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
495
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
496
+ --init_checkpoint=$TRAINED_CLASSIFIER \
497
+ --max_seq_length=128 \
498
+ --output_dir=/tmp/mrpc_output/
499
+ ```
500
+
501
+ ### SQuAD 1.1
502
+
503
+ The Stanford Question Answering Dataset (SQuAD) is a popular question answering
504
+ benchmark dataset. BERT (at the time of the release) obtains state-of-the-art
505
+ results on SQuAD with almost no task-specific network architecture modifications
506
+ or data augmentation. However, it does require semi-complex data pre-processing
507
+ and post-processing to deal with (a) the variable-length nature of SQuAD context
508
+ paragraphs, and (b) the character-level answer annotations which are used for
509
+ SQuAD training. This processing is implemented and documented in `run_squad.py`.
510
+
511
+ To run on SQuAD, you will first need to download the dataset. The
512
+ [SQuAD website](https://rajpurkar.github.io/SQuAD-explorer/) does not seem to
513
+ link to the v1.1 datasets any longer, but the necessary files can be found here:
514
+
515
+ * [train-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json)
516
+ * [dev-v1.1.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json)
517
+ * [evaluate-v1.1.py](https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py)
518
+
519
+ Download these to some directory `$SQUAD_DIR`.
520
+
521
+ The state-of-the-art SQuAD results from the paper currently cannot be reproduced
522
+ on a 12GB-16GB GPU due to memory constraints (in fact, even batch size 1 does
523
+ not seem to fit on a 12GB GPU using `BERT-Large`). However, a reasonably strong
524
+ `BERT-Base` model can be trained on the GPU with these hyperparameters:
525
+
526
+ ```shell
527
+ python run_squad.py \
528
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
529
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
530
+ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
531
+ --do_train=True \
532
+ --train_file=$SQUAD_DIR/train-v1.1.json \
533
+ --do_predict=True \
534
+ --predict_file=$SQUAD_DIR/dev-v1.1.json \
535
+ --train_batch_size=12 \
536
+ --learning_rate=3e-5 \
537
+ --num_train_epochs=2.0 \
538
+ --max_seq_length=384 \
539
+ --doc_stride=128 \
540
+ --output_dir=/tmp/squad_base/
541
+ ```
542
+
543
+ The dev set predictions will be saved into a file called `predictions.json` in
544
+ the `output_dir`:
545
+
546
+ ```shell
547
+ python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ./squad/predictions.json
548
+ ```
549
+
550
+ Which should produce an output like this:
551
+
552
+ ```shell
553
+ {"f1": 88.41249612335034, "exact_match": 81.2488174077578}
554
+ ```
555
+
556
+ You should see a result similar to the 88.5% reported in the paper for
557
+ `BERT-Base`.
558
+
559
+ If you have access to a Cloud TPU, you can train with `BERT-Large`. Here is a
560
+ set of hyperparameters (slightly different than the paper) which consistently
561
+ obtain around 90.5%-91.0% F1 single-system trained only on SQuAD:
562
+
563
+ ```shell
564
+ python run_squad.py \
565
+ --vocab_file=$BERT_LARGE_DIR/vocab.txt \
566
+ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \
567
+ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \
568
+ --do_train=True \
569
+ --train_file=$SQUAD_DIR/train-v1.1.json \
570
+ --do_predict=True \
571
+ --predict_file=$SQUAD_DIR/dev-v1.1.json \
572
+ --train_batch_size=24 \
573
+ --learning_rate=3e-5 \
574
+ --num_train_epochs=2.0 \
575
+ --max_seq_length=384 \
576
+ --doc_stride=128 \
577
+ --output_dir=gs://some_bucket/squad_large/ \
578
+ --use_tpu=True \
579
+ --tpu_name=$TPU_NAME
580
+ ```
581
+
582
+ For example, one random run with these parameters produces the following Dev
583
+ scores:
584
+
585
+ ```shell
586
+ {"f1": 90.87081895814865, "exact_match": 84.38978240302744}
587
+ ```
588
+
589
+ If you fine-tune for one epoch on
590
+ [TriviaQA](http://nlp.cs.washington.edu/triviaqa/) before this the results will
591
+ be even better, but you will need to convert TriviaQA into the SQuAD json
592
+ format.
593
+
594
+ ### SQuAD 2.0
595
+
596
+ This model is also implemented and documented in `run_squad.py`.
597
+
598
+ To run on SQuAD 2.0, you will first need to download the dataset. The necessary
599
+ files can be found here:
600
+
601
+ * [train-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json)
602
+ * [dev-v2.0.json](https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json)
603
+ * [evaluate-v2.0.py](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/)
604
+
605
+ Download these to some directory `$SQUAD_DIR`.
606
+
607
+ On Cloud TPU you can run with BERT-Large as follows:
608
+
609
+ ```shell
610
+ python run_squad.py \
611
+ --vocab_file=$BERT_LARGE_DIR/vocab.txt \
612
+ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \
613
+ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \
614
+ --do_train=True \
615
+ --train_file=$SQUAD_DIR/train-v2.0.json \
616
+ --do_predict=True \
617
+ --predict_file=$SQUAD_DIR/dev-v2.0.json \
618
+ --train_batch_size=24 \
619
+ --learning_rate=3e-5 \
620
+ --num_train_epochs=2.0 \
621
+ --max_seq_length=384 \
622
+ --doc_stride=128 \
623
+ --output_dir=gs://some_bucket/squad_large/ \
624
+ --use_tpu=True \
625
+ --tpu_name=$TPU_NAME \
626
+ --version_2_with_negative=True
627
+ ```
628
+
629
+ We assume you have copied everything from the output directory to a local
630
+ directory called ./squad/. The initial dev set predictions will be at
631
+ ./squad/predictions.json and the differences between the score of no answer ("")
632
+ and the best non-null answer for each question will be in the file
633
+ ./squad/null_odds.json
634
+
635
+ Run this script to tune a threshold for predicting null versus non-null answers:
636
+
637
+ python $SQUAD_DIR/evaluate-v2.0.py $SQUAD_DIR/dev-v2.0.json
638
+ ./squad/predictions.json --na-prob-file ./squad/null_odds.json
639
+
640
+ Assume the script outputs "best_f1_thresh" THRESH. (Typical values are between
641
+ -1.0 and -5.0). You can now re-run the model to generate predictions with the
642
+ derived threshold or alternatively you can extract the appropriate answers from
643
+ ./squad/nbest_predictions.json.
644
+
645
+ ```shell
646
+ python run_squad.py \
647
+ --vocab_file=$BERT_LARGE_DIR/vocab.txt \
648
+ --bert_config_file=$BERT_LARGE_DIR/bert_config.json \
649
+ --init_checkpoint=$BERT_LARGE_DIR/bert_model.ckpt \
650
+ --do_train=False \
651
+ --train_file=$SQUAD_DIR/train-v2.0.json \
652
+ --do_predict=True \
653
+ --predict_file=$SQUAD_DIR/dev-v2.0.json \
654
+ --train_batch_size=24 \
655
+ --learning_rate=3e-5 \
656
+ --num_train_epochs=2.0 \
657
+ --max_seq_length=384 \
658
+ --doc_stride=128 \
659
+ --output_dir=gs://some_bucket/squad_large/ \
660
+ --use_tpu=True \
661
+ --tpu_name=$TPU_NAME \
662
+ --version_2_with_negative=True \
663
+ --null_score_diff_threshold=$THRESH
664
+ ```
665
+
666
+ ### Out-of-memory issues
667
+
668
+ All experiments in the paper were fine-tuned on a Cloud TPU, which has 64GB of
669
+ device RAM. Therefore, when using a GPU with 12GB - 16GB of RAM, you are likely
670
+ to encounter out-of-memory issues if you use the same hyperparameters described
671
+ in the paper.
672
+
673
+ The factors that affect memory usage are:
674
+
675
+ * **`max_seq_length`**: The released models were trained with sequence lengths
676
+ up to 512, but you can fine-tune with a shorter max sequence length to save
677
+ substantial memory. This is controlled by the `max_seq_length` flag in our
678
+ example code.
679
+
680
+ * **`train_batch_size`**: The memory usage is also directly proportional to
681
+ the batch size.
682
+
683
+ * **Model type, `BERT-Base` vs. `BERT-Large`**: The `BERT-Large` model
684
+ requires significantly more memory than `BERT-Base`.
685
+
686
+ * **Optimizer**: The default optimizer for BERT is Adam, which requires a lot
687
+ of extra memory to store the `m` and `v` vectors. Switching to a more memory
688
+ efficient optimizer can reduce memory usage, but can also affect the
689
+ results. We have not experimented with other optimizers for fine-tuning.
690
+
691
+ Using the default training scripts (`run_classifier.py` and `run_squad.py`), we
692
+ benchmarked the maximum batch size on single Titan X GPU (12GB RAM) with
693
+ TensorFlow 1.11.0:
694
+
695
+ System | Seq Length | Max Batch Size
696
+ ------------ | ---------- | --------------
697
+ `BERT-Base` | 64 | 64
698
+ ... | 128 | 32
699
+ ... | 256 | 16
700
+ ... | 320 | 14
701
+ ... | 384 | 12
702
+ ... | 512 | 6
703
+ `BERT-Large` | 64 | 12
704
+ ... | 128 | 6
705
+ ... | 256 | 2
706
+ ... | 320 | 1
707
+ ... | 384 | 0
708
+ ... | 512 | 0
709
+
710
+ Unfortunately, these max batch sizes for `BERT-Large` are so small that they
711
+ will actually harm the model accuracy, regardless of the learning rate used. We
712
+ are working on adding code to this repository which will allow much larger
713
+ effective batch sizes to be used on the GPU. The code will be based on one (or
714
+ both) of the following techniques:
715
+
716
+ * **Gradient accumulation**: The samples in a minibatch are typically
717
+ independent with respect to gradient computation (excluding batch
718
+ normalization, which is not used here). This means that the gradients of
719
+ multiple smaller minibatches can be accumulated before performing the weight
720
+ update, and this will be exactly equivalent to a single larger update.
721
+
722
+ * [**Gradient checkpointing**](https://github.com/openai/gradient-checkpointing):
723
+ The major use of GPU/TPU memory during DNN training is caching the
724
+ intermediate activations in the forward pass that are necessary for
725
+ efficient computation in the backward pass. "Gradient checkpointing" trades
726
+ memory for compute time by re-computing the activations in an intelligent
727
+ way.
728
+
729
+ **However, this is not implemented in the current release.**
730
+
731
+ ## Using BERT to extract fixed feature vectors (like ELMo)
732
+
733
+ In certain cases, rather than fine-tuning the entire pre-trained model
734
+ end-to-end, it can be beneficial to obtained *pre-trained contextual
735
+ embeddings*, which are fixed contextual representations of each input token
736
+ generated from the hidden layers of the pre-trained model. This should also
737
+ mitigate most of the out-of-memory issues.
738
+
739
+ As an example, we include the script `extract_features.py` which can be used
740
+ like this:
741
+
742
+ ```shell
743
+ # Sentence A and Sentence B are separated by the ||| delimiter for sentence
744
+ # pair tasks like question answering and entailment.
745
+ # For single sentence inputs, put one sentence per line and DON'T use the
746
+ # delimiter.
747
+ echo 'Who was Jim Henson ? ||| Jim Henson was a puppeteer' > /tmp/input.txt
748
+
749
+ python extract_features.py \
750
+ --input_file=/tmp/input.txt \
751
+ --output_file=/tmp/output.jsonl \
752
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
753
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
754
+ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
755
+ --layers=-1,-2,-3,-4 \
756
+ --max_seq_length=128 \
757
+ --batch_size=8
758
+ ```
759
+
760
+ This will create a JSON file (one line per line of input) containing the BERT
761
+ activations from each Transformer layer specified by `layers` (-1 is the final
762
+ hidden layer of the Transformer, etc.)
763
+
764
+ Note that this script will produce very large output files (by default, around
765
+ 15kb for every input token).
766
+
767
+ If you need to maintain alignment between the original and tokenized words (for
768
+ projecting training labels), see the [Tokenization](#tokenization) section
769
+ below.
770
+
771
+ **Note:** You may see a message like `Could not find trained model in model_dir:
772
+ /tmp/tmpuB5g5c, running initialization to predict.` This message is expected, it
773
+ just means that we are using the `init_from_checkpoint()` API rather than the
774
+ saved model API. If you don't specify a checkpoint or specify an invalid
775
+ checkpoint, this script will complain.
776
+
777
+ ## Tokenization
778
+
779
+ For sentence-level tasks (or sentence-pair) tasks, tokenization is very simple.
780
+ Just follow the example code in `run_classifier.py` and `extract_features.py`.
781
+ The basic procedure for sentence-level tasks is:
782
+
783
+ 1. Instantiate an instance of `tokenizer = tokenization.FullTokenizer`
784
+
785
+ 2. Tokenize the raw text with `tokens = tokenizer.tokenize(raw_text)`.
786
+
787
+ 3. Truncate to the maximum sequence length. (You can use up to 512, but you
788
+ probably want to use shorter if possible for memory and speed reasons.)
789
+
790
+ 4. Add the `[CLS]` and `[SEP]` tokens in the right place.
791
+
792
+ Word-level and span-level tasks (e.g., SQuAD and NER) are more complex, since
793
+ you need to maintain alignment between your input text and output text so that
794
+ you can project your training labels. SQuAD is a particularly complex example
795
+ because the input labels are *character*-based, and SQuAD paragraphs are often
796
+ longer than our maximum sequence length. See the code in `run_squad.py` to show
797
+ how we handle this.
798
+
799
+ Before we describe the general recipe for handling word-level tasks, it's
800
+ important to understand what exactly our tokenizer is doing. It has three main
801
+ steps:
802
+
803
+ 1. **Text normalization**: Convert all whitespace characters to spaces, and
804
+ (for the `Uncased` model) lowercase the input and strip out accent markers.
805
+ E.g., `John Johanson's, → john johanson's,`.
806
+
807
+ 2. **Punctuation splitting**: Split *all* punctuation characters on both sides
808
+ (i.e., add whitespace around all punctuation characters). Punctuation
809
+ characters are defined as (a) Anything with a `P*` Unicode class, (b) any
810
+ non-letter/number/space ASCII character (e.g., characters like `$` which are
811
+ technically not punctuation). E.g., `john johanson's, → john johanson ' s ,`
812
+
813
+ 3. **WordPiece tokenization**: Apply whitespace tokenization to the output of
814
+ the above procedure, and apply
815
+ [WordPiece](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py)
816
+ tokenization to each token separately. (Our implementation is directly based
817
+ on the one from `tensor2tensor`, which is linked). E.g., `john johanson ' s
818
+ , → john johan ##son ' s ,`
819
+
820
+ The advantage of this scheme is that it is "compatible" with most existing
821
+ English tokenizers. For example, imagine that you have a part-of-speech tagging
822
+ task which looks like this:
823
+
824
+ ```
825
+ Input: John Johanson 's house
826
+ Labels: NNP NNP POS NN
827
+ ```
828
+
829
+ The tokenized output will look like this:
830
+
831
+ ```
832
+ Tokens: john johan ##son ' s house
833
+ ```
834
+
835
+ Crucially, this would be the same output as if the raw text were `John
836
+ Johanson's house` (with no space before the `'s`).
837
+
838
+ If you have a pre-tokenized representation with word-level annotations, you can
839
+ simply tokenize each input word independently, and deterministically maintain an
840
+ original-to-tokenized alignment:
841
+
842
+ ```python
843
+ ### Input
844
+ orig_tokens = ["John", "Johanson", "'s", "house"]
845
+ labels = ["NNP", "NNP", "POS", "NN"]
846
+
847
+ ### Output
848
+ bert_tokens = []
849
+
850
+ # Token map will be an int -> int mapping between the `orig_tokens` index and
851
+ # the `bert_tokens` index.
852
+ orig_to_tok_map = []
853
+
854
+ tokenizer = tokenization.FullTokenizer(
855
+ vocab_file=vocab_file, do_lower_case=True)
856
+
857
+ bert_tokens.append("[CLS]")
858
+ for orig_token in orig_tokens:
859
+ orig_to_tok_map.append(len(bert_tokens))
860
+ bert_tokens.extend(tokenizer.tokenize(orig_token))
861
+ bert_tokens.append("[SEP]")
862
+
863
+ # bert_tokens == ["[CLS]", "john", "johan", "##son", "'", "s", "house", "[SEP]"]
864
+ # orig_to_tok_map == [1, 2, 4, 6]
865
+ ```
866
+
867
+ Now `orig_to_tok_map` can be used to project `labels` to the tokenized
868
+ representation.
869
+
870
+ There are common English tokenization schemes which will cause a slight mismatch
871
+ between how BERT was pre-trained. For example, if your input tokenization splits
872
+ off contractions like `do n't`, this will cause a mismatch. If it is possible to
873
+ do so, you should pre-process your data to convert these back to raw-looking
874
+ text, but if it's not possible, this mismatch is likely not a big deal.
875
+
876
+ ## Pre-training with BERT
877
+
878
+ We are releasing code to do "masked LM" and "next sentence prediction" on an
879
+ arbitrary text corpus. Note that this is *not* the exact code that was used for
880
+ the paper (the original code was written in C++, and had some additional
881
+ complexity), but this code does generate pre-training data as described in the
882
+ paper.
883
+
884
+ Here's how to run the data generation. The input is a plain text file, with one
885
+ sentence per line. (It is important that these be actual sentences for the "next
886
+ sentence prediction" task). Documents are delimited by empty lines. The output
887
+ is a set of `tf.train.Example`s serialized into `TFRecord` file format.
888
+
889
+ You can perform sentence segmentation with an off-the-shelf NLP toolkit such as
890
+ [spaCy](https://spacy.io/). The `create_pretraining_data.py` script will
891
+ concatenate segments until they reach the maximum sequence length to minimize
892
+ computational waste from padding (see the script for more details). However, you
893
+ may want to intentionally add a slight amount of noise to your input data (e.g.,
894
+ randomly truncate 2% of input segments) to make it more robust to non-sentential
895
+ input during fine-tuning.
896
+
897
+ This script stores all of the examples for the entire input file in memory, so
898
+ for large data files you should shard the input file and call the script
899
+ multiple times. (You can pass in a file glob to `run_pretraining.py`, e.g.,
900
+ `tf_examples.tf_record*`.)
901
+
902
+ The `max_predictions_per_seq` is the maximum number of masked LM predictions per
903
+ sequence. You should set this to around `max_seq_length` * `masked_lm_prob` (the
904
+ script doesn't do that automatically because the exact value needs to be passed
905
+ to both scripts).
906
+
907
+ ```shell
908
+ python create_pretraining_data.py \
909
+ --input_file=./sample_text.txt \
910
+ --output_file=/tmp/tf_examples.tfrecord \
911
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
912
+ --do_lower_case=True \
913
+ --max_seq_length=128 \
914
+ --max_predictions_per_seq=20 \
915
+ --masked_lm_prob=0.15 \
916
+ --random_seed=12345 \
917
+ --dupe_factor=5
918
+ ```
919
+
920
+ Here's how to run the pre-training. Do not include `init_checkpoint` if you are
921
+ pre-training from scratch. The model configuration (including vocab size) is
922
+ specified in `bert_config_file`. This demo code only pre-trains for a small
923
+ number of steps (20), but in practice you will probably want to set
924
+ `num_train_steps` to 10000 steps or more. The `max_seq_length` and
925
+ `max_predictions_per_seq` parameters passed to `run_pretraining.py` must be the
926
+ same as `create_pretraining_data.py`.
927
+
928
+ ```shell
929
+ python run_pretraining.py \
930
+ --input_file=/tmp/tf_examples.tfrecord \
931
+ --output_dir=/tmp/pretraining_output \
932
+ --do_train=True \
933
+ --do_eval=True \
934
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
935
+ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
936
+ --train_batch_size=32 \
937
+ --max_seq_length=128 \
938
+ --max_predictions_per_seq=20 \
939
+ --num_train_steps=20 \
940
+ --num_warmup_steps=10 \
941
+ --learning_rate=2e-5
942
+ ```
943
+
944
+ This will produce an output like this:
945
+
946
+ ```
947
+ ***** Eval results *****
948
+ global_step = 20
949
+ loss = 0.0979674
950
+ masked_lm_accuracy = 0.985479
951
+ masked_lm_loss = 0.0979328
952
+ next_sentence_accuracy = 1.0
953
+ next_sentence_loss = 3.45724e-05
954
+ ```
955
+
956
+ Note that since our `sample_text.txt` file is very small, this example training
957
+ will overfit that data in only a few steps and produce unrealistically high
958
+ accuracy numbers.
959
+
960
+ ### Pre-training tips and caveats
961
+
962
+ * **If using your own vocabulary, make sure to change `vocab_size` in
963
+ `bert_config.json`. If you use a larger vocabulary without changing this,
964
+ you will likely get NaNs when training on GPU or TPU due to unchecked
965
+ out-of-bounds access.**
966
+ * If your task has a large domain-specific corpus available (e.g., "movie
967
+ reviews" or "scientific papers"), it will likely be beneficial to run
968
+ additional steps of pre-training on your corpus, starting from the BERT
969
+ checkpoint.
970
+ * The learning rate we used in the paper was 1e-4. However, if you are doing
971
+ additional steps of pre-training starting from an existing BERT checkpoint,
972
+ you should use a smaller learning rate (e.g., 2e-5).
973
+ * Current BERT models are English-only, but we do plan to release a
974
+ multilingual model which has been pre-trained on a lot of languages in the
975
+ near future (hopefully by the end of November 2018).
976
+ * Longer sequences are disproportionately expensive because attention is
977
+ quadratic to the sequence length. In other words, a batch of 64 sequences of
978
+ length 512 is much more expensive than a batch of 256 sequences of
979
+ length 128. The fully-connected/convolutional cost is the same, but the
980
+ attention cost is far greater for the 512-length sequences. Therefore, one
981
+ good recipe is to pre-train for, say, 90,000 steps with a sequence length of
982
+ 128 and then for 10,000 additional steps with a sequence length of 512. The
983
+ very long sequences are mostly needed to learn positional embeddings, which
984
+ can be learned fairly quickly. Note that this does require generating the
985
+ data twice with different values of `max_seq_length`.
986
+ * If you are pre-training from scratch, be prepared that pre-training is
987
+ computationally expensive, especially on GPUs. If you are pre-training from
988
+ scratch, our recommended recipe is to pre-train a `BERT-Base` on a single
989
+ [preemptible Cloud TPU v2](https://cloud.google.com/tpu/docs/pricing), which
990
+ takes about 2 weeks at a cost of about $500 USD (based on the pricing in
991
+ October 2018). You will have to scale down the batch size when only training
992
+ on a single Cloud TPU, compared to what was used in the paper. It is
993
+ recommended to use the largest batch size that fits into TPU memory.
994
+
995
+ ### Pre-training data
996
+
997
+ We will **not** be able to release the pre-processed datasets used in the paper.
998
+ For Wikipedia, the recommended pre-processing is to download
999
+ [the latest dump](https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2),
1000
+ extract the text with
1001
+ [`WikiExtractor.py`](https://github.com/attardi/wikiextractor), and then apply
1002
+ any necessary cleanup to convert it into plain text.
1003
+
1004
+ Unfortunately the researchers who collected the
1005
+ [BookCorpus](http://yknzhu.wixsite.com/mbweb) no longer have it available for
1006
+ public download. The
1007
+ [Project Guttenberg Dataset](https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html)
1008
+ is a somewhat smaller (200M word) collection of older books that are public
1009
+ domain.
1010
+
1011
+ [Common Crawl](http://commoncrawl.org/) is another very large collection of
1012
+ text, but you will likely have to do substantial pre-processing and cleanup to
1013
+ extract a usable corpus for pre-training BERT.
1014
+
1015
+ ### Learning a new WordPiece vocabulary
1016
+
1017
+ This repository does not include code for *learning* a new WordPiece vocabulary.
1018
+ The reason is that the code used in the paper was implemented in C++ with
1019
+ dependencies on Google's internal libraries. For English, it is almost always
1020
+ better to just start with our vocabulary and pre-trained models. For learning
1021
+ vocabularies of other languages, there are a number of open source options
1022
+ available. However, keep in mind that these are not compatible with our
1023
+ `tokenization.py` library:
1024
+
1025
+ * [Google's SentencePiece library](https://github.com/google/sentencepiece)
1026
+
1027
+ * [tensor2tensor's WordPiece generation script](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder_build_subword.py)
1028
+
1029
+ * [Rico Sennrich's Byte Pair Encoding library](https://github.com/rsennrich/subword-nmt)
1030
+
1031
+ ## Using BERT in Colab
1032
+
1033
+ If you want to use BERT with [Colab](https://colab.research.google.com), you can
1034
+ get started with the notebook
1035
+ "[BERT FineTuning with Cloud TPUs](https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)".
1036
+ **At the time of this writing (October 31st, 2018), Colab users can access a
1037
+ Cloud TPU completely for free.** Note: One per user, availability limited,
1038
+ requires a Google Cloud Platform account with storage (although storage may be
1039
+ purchased with free credit for signing up with GCP), and this capability may not
1040
+ longer be available in the future. Click on the BERT Colab that was just linked
1041
+ for more information.
1042
+
1043
+ ## FAQ
1044
+
1045
+ #### Is this code compatible with Cloud TPUs? What about GPUs?
1046
+
1047
+ Yes, all of the code in this repository works out-of-the-box with CPU, GPU, and
1048
+ Cloud TPU. However, GPU training is single-GPU only.
1049
+
1050
+ #### I am getting out-of-memory errors, what is wrong?
1051
+
1052
+ See the section on [out-of-memory issues](#out-of-memory-issues) for more
1053
+ information.
1054
+
1055
+ #### Is there a PyTorch version available?
1056
+
1057
+ There is no official PyTorch implementation. However, NLP researchers from
1058
+ HuggingFace made a
1059
+ [PyTorch version of BERT available](https://github.com/huggingface/pytorch-pretrained-BERT)
1060
+ which is compatible with our pre-trained checkpoints and is able to reproduce
1061
+ our results. We were not involved in the creation or maintenance of the PyTorch
1062
+ implementation so please direct any questions towards the authors of that
1063
+ repository.
1064
+
1065
+ #### Is there a Chainer version available?
1066
+
1067
+ There is no official Chainer implementation. However, Sosuke Kobayashi made a
1068
+ [Chainer version of BERT available](https://github.com/soskek/bert-chainer)
1069
+ which is compatible with our pre-trained checkpoints and is able to reproduce
1070
+ our results. We were not involved in the creation or maintenance of the Chainer
1071
+ implementation so please direct any questions towards the authors of that
1072
+ repository.
1073
+
1074
+ #### Will models in other languages be released?
1075
+
1076
+ Yes, we plan to release a multi-lingual BERT model in the near future. We cannot
1077
+ make promises about exactly which languages will be included, but it will likely
1078
+ be a single model which includes *most* of the languages which have a
1079
+ significantly-sized Wikipedia.
1080
+
1081
+ #### Will models larger than `BERT-Large` be released?
1082
+
1083
+ So far we have not attempted to train anything larger than `BERT-Large`. It is
1084
+ possible that we will release larger models if we are able to obtain significant
1085
+ improvements.
1086
+
1087
+ #### What license is this library released under?
1088
+
1089
+ All code *and* models are released under the Apache 2.0 license. See the
1090
+ `LICENSE` file for more information.
1091
+
1092
+ #### How do I cite BERT?
1093
+
1094
+ For now, cite [the Arxiv paper](https://arxiv.org/abs/1810.04805):
1095
+
1096
+ ```
1097
+ @article{devlin2018bert,
1098
+ title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
1099
+ author={Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
1100
+ journal={arXiv preprint arXiv:1810.04805},
1101
+ year={2018}
1102
+ }
1103
+ ```
1104
+
1105
+ If we submit the paper to a conference or journal, we will update the BibTeX.
1106
+
1107
+ ## Disclaimer
1108
+
1109
+ This is not an official Google product.
1110
+
1111
+ ## Contact information
1112
+
1113
+ For help or issues using BERT, please submit a GitHub issue.
1114
+
1115
+ For personal communication related to BERT, please contact Jacob Devlin
1116
+ (`[email protected]`), Ming-Wei Chang (`[email protected]`), or
1117
+ Kenton Lee (`[email protected]`).
RIS-DMMI/bert/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
RIS-DMMI/bert/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (140 Bytes). View file
 
RIS-DMMI/bert/__pycache__/activations.cpython-39.pyc ADDED
Binary file (1.94 kB). View file
 
RIS-DMMI/bert/__pycache__/configuration_bert.cpython-39.pyc ADDED
Binary file (7.88 kB). View file
 
RIS-DMMI/bert/__pycache__/configuration_utils.cpython-39.pyc ADDED
Binary file (16.4 kB). View file
 
RIS-DMMI/bert/__pycache__/file_utils.cpython-39.pyc ADDED
Binary file (24.8 kB). View file
 
RIS-DMMI/bert/__pycache__/generation_utils.cpython-39.pyc ADDED
Binary file (28 kB). View file
 
RIS-DMMI/bert/__pycache__/modeling_bert.cpython-39.pyc ADDED
Binary file (55.2 kB). View file
 
RIS-DMMI/bert/__pycache__/modeling_utils.cpython-39.pyc ADDED
Binary file (48 kB). View file
 
RIS-DMMI/bert/__pycache__/tokenization_bert.cpython-39.pyc ADDED
Binary file (19.3 kB). View file
 
RIS-DMMI/bert/__pycache__/tokenization_utils.cpython-39.pyc ADDED
Binary file (24.9 kB). View file
 
RIS-DMMI/bert/__pycache__/tokenization_utils_base.cpython-39.pyc ADDED
Binary file (82.4 kB). View file
 
RIS-DMMI/bert/activations.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ def swish(x):
12
+ return x * torch.sigmoid(x)
13
+
14
+
15
+ def _gelu_python(x):
16
+ """ Original Implementation of the gelu activation function in Google Bert repo when initially created.
17
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
18
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
19
+ This is now written in C in torch.nn.functional
20
+ Also see https://arxiv.org/abs/1606.08415
21
+ """
22
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
23
+
24
+
25
+ def gelu_new(x):
26
+ """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
27
+ Also see https://arxiv.org/abs/1606.08415
28
+ """
29
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
30
+
31
+
32
+ if torch.__version__ < "1.4.0":
33
+ gelu = _gelu_python
34
+ else:
35
+ gelu = F.gelu
36
+
37
+
38
+ def gelu_fast(x):
39
+ return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
40
+
41
+
42
+ ACT2FN = {
43
+ "relu": F.relu,
44
+ "swish": swish,
45
+ "gelu": gelu,
46
+ "tanh": torch.tanh,
47
+ "gelu_new": gelu_new,
48
+ "gelu_fast": gelu_fast,
49
+ }
50
+
51
+
52
+ def get_activation(activation_string):
53
+ if activation_string in ACT2FN:
54
+ return ACT2FN[activation_string]
55
+ else:
56
+ raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
RIS-DMMI/bert/bert-base-uncased-vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
RIS-DMMI/bert/configuration_bert.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ BERT model configuration """
17
+
18
+
19
+ import logging
20
+
21
+ from .configuration_utils import PretrainedConfig
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
+ # "bert-base-uncased": "/mnt/petrelfs/huyutao.vendor/code/lavit/bert/config.json",
28
+ "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
29
+ "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
30
+ "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
31
+ "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
32
+ "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
33
+ "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
34
+ "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
35
+ "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
36
+ "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
37
+ "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
38
+ "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
39
+ "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
40
+ "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
41
+ "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
42
+ "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
43
+ "cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/config.json",
44
+ "cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/config.json",
45
+ "cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/config.json",
46
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/config.json",
47
+ "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
48
+ "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
49
+ "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
50
+ # See all BERT models at https://huggingface.co/models?filter=bert
51
+ }
52
+
53
+
54
+ class BertConfig(PretrainedConfig):
55
+ r"""
56
+ This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
57
+ It is used to instantiate an BERT model according to the specified arguments, defining the model
58
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
59
+ the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
60
+
61
+ Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
62
+ to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
63
+ for more information.
64
+
65
+
66
+ Args:
67
+ vocab_size (:obj:`int`, optional, defaults to 30522):
68
+ Vocabulary size of the BERT model. Defines the different tokens that
69
+ can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
70
+ hidden_size (:obj:`int`, optional, defaults to 768):
71
+ Dimensionality of the encoder layers and the pooler layer.
72
+ num_hidden_layers (:obj:`int`, optional, defaults to 12):
73
+ Number of hidden layers in the Transformer encoder.
74
+ num_attention_heads (:obj:`int`, optional, defaults to 12):
75
+ Number of attention heads for each attention layer in the Transformer encoder.
76
+ intermediate_size (:obj:`int`, optional, defaults to 3072):
77
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
78
+ hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
79
+ The non-linear activation function (function or string) in the encoder and pooler.
80
+ If string, "gelu", "relu", "swish" and "gelu_new" are supported.
81
+ hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
82
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
83
+ attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
84
+ The dropout ratio for the attention probabilities.
85
+ max_position_embeddings (:obj:`int`, optional, defaults to 512):
86
+ The maximum sequence length that this model might ever be used with.
87
+ Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
88
+ type_vocab_size (:obj:`int`, optional, defaults to 2):
89
+ The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
90
+ initializer_range (:obj:`float`, optional, defaults to 0.02):
91
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
92
+ layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
93
+ The epsilon used by the layer normalization layers.
94
+ gradient_checkpointing (:obj:`bool`, optional, defaults to False):
95
+ If True, use gradient checkpointing to save memory at the expense of slower backward pass.
96
+
97
+ Example::
98
+
99
+ >>> from transformers import BertModel, BertConfig
100
+
101
+ >>> # Initializing a BERT bert-base-uncased style configuration
102
+ >>> configuration = BertConfig()
103
+
104
+ >>> # Initializing a model from the bert-base-uncased style configuration
105
+ >>> model = BertModel(configuration)
106
+
107
+ >>> # Accessing the model configuration
108
+ >>> configuration = model.config
109
+ """
110
+ model_type = "bert"
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=30522,
115
+ hidden_size=768,
116
+ num_hidden_layers=12,
117
+ num_attention_heads=12,
118
+ intermediate_size=3072,
119
+ hidden_act="gelu",
120
+ hidden_dropout_prob=0.1,
121
+ attention_probs_dropout_prob=0.1,
122
+ max_position_embeddings=512,
123
+ type_vocab_size=2,
124
+ initializer_range=0.02,
125
+ layer_norm_eps=1e-12,
126
+ pad_token_id=0,
127
+ gradient_checkpointing=False,
128
+ **kwargs
129
+ ):
130
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
131
+
132
+ self.vocab_size = vocab_size
133
+ self.hidden_size = hidden_size
134
+ self.num_hidden_layers = num_hidden_layers
135
+ self.num_attention_heads = num_attention_heads
136
+ self.hidden_act = hidden_act
137
+ self.intermediate_size = intermediate_size
138
+ self.hidden_dropout_prob = hidden_dropout_prob
139
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
140
+ self.max_position_embeddings = max_position_embeddings
141
+ self.type_vocab_size = type_vocab_size
142
+ self.initializer_range = initializer_range
143
+ self.layer_norm_eps = layer_norm_eps
144
+ self.gradient_checkpointing = gradient_checkpointing
RIS-DMMI/bert/configuration_utils.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Configuration base class and utilities."""
17
+
18
+
19
+ import copy
20
+ import json
21
+ import logging
22
+ import os
23
+ from typing import Dict, Tuple
24
+
25
+ from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
26
+
27
+ import pdb
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ class PretrainedConfig(object):
32
+ r""" Base class for all configuration classes.
33
+ Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving configurations.
34
+
35
+ Note:
36
+ A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to initialize a model does **not** load the model weights.
37
+ It only affects the model's configuration.
38
+
39
+ Class attributes (overridden by derived classes):
40
+ - ``model_type``: a string that identifies the model type, that we serialize into the JSON file, and that we use to recreate the correct object in :class:`~transformers.AutoConfig`.
41
+
42
+ Args:
43
+ finetuning_task (:obj:`string` or :obj:`None`, `optional`, defaults to :obj:`None`):
44
+ Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow or PyTorch) checkpoint.
45
+ num_labels (:obj:`int`, `optional`, defaults to `2`):
46
+ Number of classes to use when the model is a classification model (sequences/tokens)
47
+ output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
48
+ Should the model returns all hidden-states.
49
+ output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
50
+ Should the model returns all attentions.
51
+ torchscript (:obj:`bool`, `optional`, defaults to :obj:`False`):
52
+ Is the model used with Torchscript (for PyTorch models).
53
+ """
54
+ model_type: str = ""
55
+
56
+ def __init__(self, **kwargs):
57
+ # Attributes with defaults
58
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
59
+ self.output_attentions = kwargs.pop("output_attentions", False)
60
+ self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
61
+ self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
62
+ self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
63
+ self.pruned_heads = kwargs.pop("pruned_heads", {})
64
+
65
+ # Is decoder is used in encoder-decoder models to differentiate encoder from decoder
66
+ self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
67
+ self.is_decoder = kwargs.pop("is_decoder", False)
68
+
69
+ # Parameters for sequence generation
70
+ self.max_length = kwargs.pop("max_length", 20)
71
+ self.min_length = kwargs.pop("min_length", 0)
72
+ self.do_sample = kwargs.pop("do_sample", False)
73
+ self.early_stopping = kwargs.pop("early_stopping", False)
74
+ self.num_beams = kwargs.pop("num_beams", 1)
75
+ self.temperature = kwargs.pop("temperature", 1.0)
76
+ self.top_k = kwargs.pop("top_k", 50)
77
+ self.top_p = kwargs.pop("top_p", 1.0)
78
+ self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
79
+ self.length_penalty = kwargs.pop("length_penalty", 1.0)
80
+ self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
81
+ self.bad_words_ids = kwargs.pop("bad_words_ids", None)
82
+ self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
83
+
84
+ # Fine-tuning task arguments
85
+ self.architectures = kwargs.pop("architectures", None)
86
+ self.finetuning_task = kwargs.pop("finetuning_task", None)
87
+ self.id2label = kwargs.pop("id2label", None)
88
+ self.label2id = kwargs.pop("label2id", None)
89
+ if self.id2label is not None:
90
+ kwargs.pop("num_labels", None)
91
+ self.id2label = dict((int(key), value) for key, value in self.id2label.items())
92
+ # Keys are always strings in JSON so convert ids to int here.
93
+ else:
94
+ self.num_labels = kwargs.pop("num_labels", 2)
95
+
96
+ # Tokenizer arguments TODO: eventually tokenizer and models should share the same config
97
+ self.prefix = kwargs.pop("prefix", None)
98
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
99
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
100
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
101
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
102
+
103
+ # task specific arguments
104
+ self.task_specific_params = kwargs.pop("task_specific_params", None)
105
+
106
+ # TPU arguments
107
+ self.xla_device = kwargs.pop("xla_device", None)
108
+
109
+ # Additional attributes without default values
110
+ for key, value in kwargs.items():
111
+ try:
112
+ setattr(self, key, value)
113
+ except AttributeError as err:
114
+ logger.error("Can't set {} with value {} for {}".format(key, value, self))
115
+ raise err
116
+
117
+ @property
118
+ def num_labels(self):
119
+ return len(self.id2label)
120
+
121
+ @num_labels.setter
122
+ def num_labels(self, num_labels):
123
+ self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
124
+ self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
125
+
126
+ def save_pretrained(self, save_directory):
127
+ """
128
+ Save a configuration object to the directory `save_directory`, so that it
129
+ can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
130
+
131
+ Args:
132
+ save_directory (:obj:`string`):
133
+ Directory where the configuration JSON file will be saved.
134
+ """
135
+ if os.path.isfile(save_directory):
136
+ raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
137
+ os.makedirs(save_directory, exist_ok=True)
138
+ # If we save using the predefined names, we can load using `from_pretrained`
139
+ output_config_file = os.path.join(save_directory, CONFIG_NAME)
140
+
141
+ self.to_json_file(output_config_file, use_diff=True)
142
+ logger.info("Configuration saved in {}".format(output_config_file))
143
+
144
+ @classmethod
145
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
146
+ r"""
147
+
148
+ Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pre-trained model configuration.
149
+
150
+ Args:
151
+ pretrained_model_name_or_path (:obj:`string`):
152
+ either:
153
+ - a string with the `shortcut name` of a pre-trained model configuration to load from cache or
154
+ download, e.g.: ``bert-base-uncased``.
155
+ - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to
156
+ our S3, e.g.: ``dbmdz/bert-base-german-cased``.
157
+ - a path to a `directory` containing a configuration file saved using the
158
+ :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``.
159
+ - a path or url to a saved configuration JSON `file`, e.g.:
160
+ ``./my_model_directory/configuration.json``.
161
+ cache_dir (:obj:`string`, `optional`):
162
+ Path to a directory in which a downloaded pre-trained model
163
+ configuration should be cached if the standard cache should not be used.
164
+ kwargs (:obj:`Dict[str, any]`, `optional`):
165
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
166
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
167
+ controlled by the `return_unused_kwargs` keyword parameter.
168
+ force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
169
+ Force to (re-)download the model weights and configuration files and override the cached versions if they exist.
170
+ resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
171
+ Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
172
+ proxies (:obj:`Dict`, `optional`):
173
+ A dictionary of proxy servers to use by protocol or endpoint, e.g.:
174
+ :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
175
+ The proxies are used on each request.
176
+ return_unused_kwargs: (`optional`) bool:
177
+ If False, then this function returns just the final configuration object.
178
+ If True, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs` is a
179
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: ie the part
180
+ of kwargs which has not been used to update `config` and is otherwise ignored.
181
+
182
+ Returns:
183
+ :class:`PretrainedConfig`: An instance of a configuration object
184
+
185
+ Examples::
186
+
187
+ # We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
188
+ # derived class: BertConfig
189
+ config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
190
+ config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
191
+ config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
192
+ config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
193
+ assert config.output_attention == True
194
+ config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
195
+ foo=False, return_unused_kwargs=True)
196
+ assert config.output_attention == True
197
+ assert unused_kwargs == {'foo': False}
198
+
199
+ """
200
+ try:
201
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
202
+ except:
203
+ import pdb
204
+ pdb.set_trace()
205
+ return cls.from_dict(config_dict, **kwargs)
206
+
207
+ @classmethod
208
+ def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs) -> Tuple[Dict, Dict]:
209
+ """
210
+ From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used
211
+ for instantiating a Config using `from_dict`.
212
+
213
+ Parameters:
214
+ pretrained_model_name_or_path (:obj:`string`):
215
+ The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
216
+
217
+ Returns:
218
+ :obj:`Tuple[Dict, Dict]`: The dictionary that will be used to instantiate the configuration object.
219
+
220
+ """
221
+ cache_dir = kwargs.pop("cache_dir", None)
222
+ force_download = kwargs.pop("force_download", False)
223
+ resume_download = kwargs.pop("resume_download", False)
224
+ proxies = kwargs.pop("proxies", None)
225
+ local_files_only = kwargs.pop("local_files_only", False)
226
+
227
+ if os.path.isdir(pretrained_model_name_or_path):
228
+ config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
229
+ elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
230
+ config_file = pretrained_model_name_or_path
231
+ else:
232
+ config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False)
233
+ # config_file = '/mnt/cache/huyutao.vendor/code/refer_seg/lavit/bert/config.json'
234
+
235
+ try:
236
+ # Load from URL or cache if already cached
237
+ resolved_config_file = cached_path(
238
+ config_file,
239
+ cache_dir=cache_dir,
240
+ force_download=force_download,
241
+ proxies=proxies,
242
+ resume_download=resume_download,
243
+ local_files_only=local_files_only,
244
+ )
245
+ # pdb.set_trace()
246
+ # Load config dict
247
+ if resolved_config_file is None:
248
+ raise EnvironmentError
249
+ config_dict = cls._dict_from_json_file(resolved_config_file)
250
+
251
+ except EnvironmentError:
252
+ msg = (
253
+ f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
254
+ f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
255
+ f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
256
+ )
257
+ raise EnvironmentError(msg)
258
+
259
+ except json.JSONDecodeError:
260
+ msg = (
261
+ "Couldn't reach server at '{}' to download configuration file or "
262
+ "configuration file is not a valid JSON file. "
263
+ "Please check network or file content here: {}.".format(config_file, resolved_config_file)
264
+ )
265
+ raise EnvironmentError(msg)
266
+
267
+ if resolved_config_file == config_file:
268
+ logger.info("loading configuration file {}".format(config_file))
269
+ else:
270
+ logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
271
+
272
+ return config_dict, kwargs
273
+
274
+ @classmethod
275
+ def from_dict(cls, config_dict: Dict, **kwargs) -> "PretrainedConfig":
276
+ """
277
+ Constructs a `Config` from a Python dictionary of parameters.
278
+
279
+ Args:
280
+ config_dict (:obj:`Dict[str, any]`):
281
+ Dictionary that will be used to instantiate the configuration object. Such a dictionary can be retrieved
282
+ from a pre-trained checkpoint by leveraging the :func:`~transformers.PretrainedConfig.get_config_dict`
283
+ method.
284
+ kwargs (:obj:`Dict[str, any]`):
285
+ Additional parameters from which to initialize the configuration object.
286
+
287
+ Returns:
288
+ :class:`PretrainedConfig`: An instance of a configuration object
289
+ """
290
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
291
+
292
+ config = cls(**config_dict)
293
+
294
+ if hasattr(config, "pruned_heads"):
295
+ config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
296
+
297
+ # Update config with kwargs if needed
298
+ to_remove = []
299
+ for key, value in kwargs.items():
300
+ if hasattr(config, key):
301
+ setattr(config, key, value)
302
+ to_remove.append(key)
303
+ for key in to_remove:
304
+ kwargs.pop(key, None)
305
+
306
+ logger.info("Model config %s", str(config))
307
+ if return_unused_kwargs:
308
+ return config, kwargs
309
+ else:
310
+ return config
311
+
312
+ @classmethod
313
+ def from_json_file(cls, json_file: str) -> "PretrainedConfig":
314
+ """
315
+ Constructs a `Config` from the path to a json file of parameters.
316
+
317
+ Args:
318
+ json_file (:obj:`string`):
319
+ Path to the JSON file containing the parameters.
320
+
321
+ Returns:
322
+ :class:`PretrainedConfig`: An instance of a configuration object
323
+
324
+ """
325
+ config_dict = cls._dict_from_json_file(json_file)
326
+ return cls(**config_dict)
327
+
328
+ @classmethod
329
+ def _dict_from_json_file(cls, json_file: str):
330
+ with open(json_file, "r", encoding="utf-8") as reader:
331
+ text = reader.read()
332
+ return json.loads(text)
333
+
334
+ def __eq__(self, other):
335
+ return self.__dict__ == other.__dict__
336
+
337
+ def __repr__(self):
338
+ return "{} {}".format(self.__class__.__name__, self.to_json_string())
339
+
340
+ def to_diff_dict(self):
341
+ """
342
+ Removes all attributes from config which correspond to the default
343
+ config attributes for better readability and serializes to a Python
344
+ dictionary.
345
+
346
+ Returns:
347
+ :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
348
+ """
349
+ config_dict = self.to_dict()
350
+
351
+ # get the default config dict
352
+ default_config_dict = PretrainedConfig().to_dict()
353
+
354
+ serializable_config_dict = {}
355
+
356
+ # only serialize values that differ from the default config
357
+ for key, value in config_dict.items():
358
+ if key not in default_config_dict or value != default_config_dict[key]:
359
+ serializable_config_dict[key] = value
360
+
361
+ return serializable_config_dict
362
+
363
+ def to_dict(self):
364
+ """
365
+ Serializes this instance to a Python dictionary.
366
+
367
+ Returns:
368
+ :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
369
+ """
370
+ output = copy.deepcopy(self.__dict__)
371
+ if hasattr(self.__class__, "model_type"):
372
+ output["model_type"] = self.__class__.model_type
373
+ return output
374
+
375
+ def to_json_string(self, use_diff=True):
376
+ """
377
+ Serializes this instance to a JSON string.
378
+
379
+ Args:
380
+ use_diff (:obj:`bool`):
381
+ If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON string.
382
+
383
+ Returns:
384
+ :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.
385
+ """
386
+ if use_diff is True:
387
+ config_dict = self.to_diff_dict()
388
+ else:
389
+ config_dict = self.to_dict()
390
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
391
+
392
+ def to_json_file(self, json_file_path, use_diff=True):
393
+ """
394
+ Save this instance to a json file.
395
+
396
+ Args:
397
+ json_file_path (:obj:`string`):
398
+ Path to the JSON file in which this configuration instance's parameters will be saved.
399
+ use_diff (:obj:`bool`):
400
+ If set to True, only the difference between the config instance and the default PretrainedConfig() is serialized to JSON file.
401
+ """
402
+ with open(json_file_path, "w", encoding="utf-8") as writer:
403
+ writer.write(self.to_json_string(use_diff=use_diff))
404
+
405
+ def update(self, config_dict: Dict):
406
+ """
407
+ Updates attributes of this class
408
+ with attributes from `config_dict`.
409
+
410
+ Args:
411
+ :obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class.
412
+ """
413
+ for key, value in config_dict.items():
414
+ setattr(self, key, value)
RIS-DMMI/bert/create_pretraining_data.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Create masked LM/next sentence masked_lm TF examples for BERT."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import collections
22
+ import random
23
+ import tokenization
24
+ import tensorflow as tf
25
+
26
+ flags = tf.flags
27
+
28
+ FLAGS = flags.FLAGS
29
+
30
+ flags.DEFINE_string("input_file", None,
31
+ "Input raw text file (or comma-separated list of files).")
32
+
33
+ flags.DEFINE_string(
34
+ "output_file", None,
35
+ "Output TF example file (or comma-separated list of files).")
36
+
37
+ flags.DEFINE_string("vocab_file", None,
38
+ "The vocabulary file that the BERT model was trained on.")
39
+
40
+ flags.DEFINE_bool(
41
+ "do_lower_case", True,
42
+ "Whether to lower case the input text. Should be True for uncased "
43
+ "models and False for cased models.")
44
+
45
+ flags.DEFINE_bool(
46
+ "do_whole_word_mask", False,
47
+ "Whether to use whole word masking rather than per-WordPiece masking.")
48
+
49
+ flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.")
50
+
51
+ flags.DEFINE_integer("max_predictions_per_seq", 20,
52
+ "Maximum number of masked LM predictions per sequence.")
53
+
54
+ flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.")
55
+
56
+ flags.DEFINE_integer(
57
+ "dupe_factor", 10,
58
+ "Number of times to duplicate the input data (with different masks).")
59
+
60
+ flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.")
61
+
62
+ flags.DEFINE_float(
63
+ "short_seq_prob", 0.1,
64
+ "Probability of creating sequences which are shorter than the "
65
+ "maximum length.")
66
+
67
+
68
+ class TrainingInstance(object):
69
+ """A single training instance (sentence pair)."""
70
+
71
+ def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,
72
+ is_random_next):
73
+ self.tokens = tokens
74
+ self.segment_ids = segment_ids
75
+ self.is_random_next = is_random_next
76
+ self.masked_lm_positions = masked_lm_positions
77
+ self.masked_lm_labels = masked_lm_labels
78
+
79
+ def __str__(self):
80
+ s = ""
81
+ s += "tokens: %s\n" % (" ".join(
82
+ [tokenization.printable_text(x) for x in self.tokens]))
83
+ s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids]))
84
+ s += "is_random_next: %s\n" % self.is_random_next
85
+ s += "masked_lm_positions: %s\n" % (" ".join(
86
+ [str(x) for x in self.masked_lm_positions]))
87
+ s += "masked_lm_labels: %s\n" % (" ".join(
88
+ [tokenization.printable_text(x) for x in self.masked_lm_labels]))
89
+ s += "\n"
90
+ return s
91
+
92
+ def __repr__(self):
93
+ return self.__str__()
94
+
95
+
96
+ def write_instance_to_example_files(instances, tokenizer, max_seq_length,
97
+ max_predictions_per_seq, output_files):
98
+ """Create TF example files from `TrainingInstance`s."""
99
+ writers = []
100
+ for output_file in output_files:
101
+ writers.append(tf.python_io.TFRecordWriter(output_file))
102
+
103
+ writer_index = 0
104
+
105
+ total_written = 0
106
+ for (inst_index, instance) in enumerate(instances):
107
+ input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
108
+ input_mask = [1] * len(input_ids)
109
+ segment_ids = list(instance.segment_ids)
110
+ assert len(input_ids) <= max_seq_length
111
+
112
+ while len(input_ids) < max_seq_length:
113
+ input_ids.append(0)
114
+ input_mask.append(0)
115
+ segment_ids.append(0)
116
+
117
+ assert len(input_ids) == max_seq_length
118
+ assert len(input_mask) == max_seq_length
119
+ assert len(segment_ids) == max_seq_length
120
+
121
+ masked_lm_positions = list(instance.masked_lm_positions)
122
+ masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
123
+ masked_lm_weights = [1.0] * len(masked_lm_ids)
124
+
125
+ while len(masked_lm_positions) < max_predictions_per_seq:
126
+ masked_lm_positions.append(0)
127
+ masked_lm_ids.append(0)
128
+ masked_lm_weights.append(0.0)
129
+
130
+ next_sentence_label = 1 if instance.is_random_next else 0
131
+
132
+ features = collections.OrderedDict()
133
+ features["input_ids"] = create_int_feature(input_ids)
134
+ features["input_mask"] = create_int_feature(input_mask)
135
+ features["segment_ids"] = create_int_feature(segment_ids)
136
+ features["masked_lm_positions"] = create_int_feature(masked_lm_positions)
137
+ features["masked_lm_ids"] = create_int_feature(masked_lm_ids)
138
+ features["masked_lm_weights"] = create_float_feature(masked_lm_weights)
139
+ features["next_sentence_labels"] = create_int_feature([next_sentence_label])
140
+
141
+ tf_example = tf.train.Example(features=tf.train.Features(feature=features))
142
+
143
+ writers[writer_index].write(tf_example.SerializeToString())
144
+ writer_index = (writer_index + 1) % len(writers)
145
+
146
+ total_written += 1
147
+
148
+ if inst_index < 20:
149
+ tf.logging.info("*** Example ***")
150
+ tf.logging.info("tokens: %s" % " ".join(
151
+ [tokenization.printable_text(x) for x in instance.tokens]))
152
+
153
+ for feature_name in features.keys():
154
+ feature = features[feature_name]
155
+ values = []
156
+ if feature.int64_list.value:
157
+ values = feature.int64_list.value
158
+ elif feature.float_list.value:
159
+ values = feature.float_list.value
160
+ tf.logging.info(
161
+ "%s: %s" % (feature_name, " ".join([str(x) for x in values])))
162
+
163
+ for writer in writers:
164
+ writer.close()
165
+
166
+ tf.logging.info("Wrote %d total instances", total_written)
167
+
168
+
169
+ def create_int_feature(values):
170
+ feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
171
+ return feature
172
+
173
+
174
+ def create_float_feature(values):
175
+ feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
176
+ return feature
177
+
178
+
179
+ def create_training_instances(input_files, tokenizer, max_seq_length,
180
+ dupe_factor, short_seq_prob, masked_lm_prob,
181
+ max_predictions_per_seq, rng):
182
+ """Create `TrainingInstance`s from raw text."""
183
+ all_documents = [[]]
184
+
185
+ # Input file format:
186
+ # (1) One sentence per line. These should ideally be actual sentences, not
187
+ # entire paragraphs or arbitrary spans of text. (Because we use the
188
+ # sentence boundaries for the "next sentence prediction" task).
189
+ # (2) Blank lines between documents. Document boundaries are needed so
190
+ # that the "next sentence prediction" task doesn't span between documents.
191
+ for input_file in input_files:
192
+ with tf.gfile.GFile(input_file, "r") as reader:
193
+ while True:
194
+ line = tokenization.convert_to_unicode(reader.readline())
195
+ if not line:
196
+ break
197
+ line = line.strip()
198
+
199
+ # Empty lines are used as document delimiters
200
+ if not line:
201
+ all_documents.append([])
202
+ tokens = tokenizer.tokenize(line)
203
+ if tokens:
204
+ all_documents[-1].append(tokens)
205
+
206
+ # Remove empty documents
207
+ all_documents = [x for x in all_documents if x]
208
+ rng.shuffle(all_documents)
209
+
210
+ vocab_words = list(tokenizer.vocab.keys())
211
+ instances = []
212
+ for _ in range(dupe_factor):
213
+ for document_index in range(len(all_documents)):
214
+ instances.extend(
215
+ create_instances_from_document(
216
+ all_documents, document_index, max_seq_length, short_seq_prob,
217
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
218
+
219
+ rng.shuffle(instances)
220
+ return instances
221
+
222
+
223
+ def create_instances_from_document(
224
+ all_documents, document_index, max_seq_length, short_seq_prob,
225
+ masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
226
+ """Creates `TrainingInstance`s for a single document."""
227
+ document = all_documents[document_index]
228
+
229
+ # Account for [CLS], [SEP], [SEP]
230
+ max_num_tokens = max_seq_length - 3
231
+
232
+ # We *usually* want to fill up the entire sequence since we are padding
233
+ # to `max_seq_length` anyways, so short sequences are generally wasted
234
+ # computation. However, we *sometimes*
235
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
236
+ # sequences to minimize the mismatch between pre-training and fine-tuning.
237
+ # The `target_seq_length` is just a rough target however, whereas
238
+ # `max_seq_length` is a hard limit.
239
+ target_seq_length = max_num_tokens
240
+ if rng.random() < short_seq_prob:
241
+ target_seq_length = rng.randint(2, max_num_tokens)
242
+
243
+ # We DON'T just concatenate all of the tokens from a document into a long
244
+ # sequence and choose an arbitrary split point because this would make the
245
+ # next sentence prediction task too easy. Instead, we split the input into
246
+ # segments "A" and "B" based on the actual "sentences" provided by the user
247
+ # input.
248
+ instances = []
249
+ current_chunk = []
250
+ current_length = 0
251
+ i = 0
252
+ while i < len(document):
253
+ segment = document[i]
254
+ current_chunk.append(segment)
255
+ current_length += len(segment)
256
+ if i == len(document) - 1 or current_length >= target_seq_length:
257
+ if current_chunk:
258
+ # `a_end` is how many segments from `current_chunk` go into the `A`
259
+ # (first) sentence.
260
+ a_end = 1
261
+ if len(current_chunk) >= 2:
262
+ a_end = rng.randint(1, len(current_chunk) - 1)
263
+
264
+ tokens_a = []
265
+ for j in range(a_end):
266
+ tokens_a.extend(current_chunk[j])
267
+
268
+ tokens_b = []
269
+ # Random next
270
+ is_random_next = False
271
+ if len(current_chunk) == 1 or rng.random() < 0.5:
272
+ is_random_next = True
273
+ target_b_length = target_seq_length - len(tokens_a)
274
+
275
+ # This should rarely go for more than one iteration for large
276
+ # corpora. However, just to be careful, we try to make sure that
277
+ # the random document is not the same as the document
278
+ # we're processing.
279
+ for _ in range(10):
280
+ random_document_index = rng.randint(0, len(all_documents) - 1)
281
+ if random_document_index != document_index:
282
+ break
283
+
284
+ random_document = all_documents[random_document_index]
285
+ random_start = rng.randint(0, len(random_document) - 1)
286
+ for j in range(random_start, len(random_document)):
287
+ tokens_b.extend(random_document[j])
288
+ if len(tokens_b) >= target_b_length:
289
+ break
290
+ # We didn't actually use these segments so we "put them back" so
291
+ # they don't go to waste.
292
+ num_unused_segments = len(current_chunk) - a_end
293
+ i -= num_unused_segments
294
+ # Actual next
295
+ else:
296
+ is_random_next = False
297
+ for j in range(a_end, len(current_chunk)):
298
+ tokens_b.extend(current_chunk[j])
299
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
300
+
301
+ assert len(tokens_a) >= 1
302
+ assert len(tokens_b) >= 1
303
+
304
+ tokens = []
305
+ segment_ids = []
306
+ tokens.append("[CLS]")
307
+ segment_ids.append(0)
308
+ for token in tokens_a:
309
+ tokens.append(token)
310
+ segment_ids.append(0)
311
+
312
+ tokens.append("[SEP]")
313
+ segment_ids.append(0)
314
+
315
+ for token in tokens_b:
316
+ tokens.append(token)
317
+ segment_ids.append(1)
318
+ tokens.append("[SEP]")
319
+ segment_ids.append(1)
320
+
321
+ (tokens, masked_lm_positions,
322
+ masked_lm_labels) = create_masked_lm_predictions(
323
+ tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)
324
+ instance = TrainingInstance(
325
+ tokens=tokens,
326
+ segment_ids=segment_ids,
327
+ is_random_next=is_random_next,
328
+ masked_lm_positions=masked_lm_positions,
329
+ masked_lm_labels=masked_lm_labels)
330
+ instances.append(instance)
331
+ current_chunk = []
332
+ current_length = 0
333
+ i += 1
334
+
335
+ return instances
336
+
337
+
338
+ MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
339
+ ["index", "label"])
340
+
341
+
342
+ def create_masked_lm_predictions(tokens, masked_lm_prob,
343
+ max_predictions_per_seq, vocab_words, rng):
344
+ """Creates the predictions for the masked LM objective."""
345
+
346
+ cand_indexes = []
347
+ for (i, token) in enumerate(tokens):
348
+ if token == "[CLS]" or token == "[SEP]":
349
+ continue
350
+ # Whole Word Masking means that if we mask all of the wordpieces
351
+ # corresponding to an original word. When a word has been split into
352
+ # WordPieces, the first token does not have any marker and any subsequence
353
+ # tokens are prefixed with ##. So whenever we see the ## token, we
354
+ # append it to the previous set of word indexes.
355
+ #
356
+ # Note that Whole Word Masking does *not* change the training code
357
+ # at all -- we still predict each WordPiece independently, softmaxed
358
+ # over the entire vocabulary.
359
+ if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and
360
+ token.startswith("##")):
361
+ cand_indexes[-1].append(i)
362
+ else:
363
+ cand_indexes.append([i])
364
+
365
+ rng.shuffle(cand_indexes)
366
+
367
+ output_tokens = list(tokens)
368
+
369
+ num_to_predict = min(max_predictions_per_seq,
370
+ max(1, int(round(len(tokens) * masked_lm_prob))))
371
+
372
+ masked_lms = []
373
+ covered_indexes = set()
374
+ for index_set in cand_indexes:
375
+ if len(masked_lms) >= num_to_predict:
376
+ break
377
+ # If adding a whole-word mask would exceed the maximum number of
378
+ # predictions, then just skip this candidate.
379
+ if len(masked_lms) + len(index_set) > num_to_predict:
380
+ continue
381
+ is_any_index_covered = False
382
+ for index in index_set:
383
+ if index in covered_indexes:
384
+ is_any_index_covered = True
385
+ break
386
+ if is_any_index_covered:
387
+ continue
388
+ for index in index_set:
389
+ covered_indexes.add(index)
390
+
391
+ masked_token = None
392
+ # 80% of the time, replace with [MASK]
393
+ if rng.random() < 0.8:
394
+ masked_token = "[MASK]"
395
+ else:
396
+ # 10% of the time, keep original
397
+ if rng.random() < 0.5:
398
+ masked_token = tokens[index]
399
+ # 10% of the time, replace with random word
400
+ else:
401
+ masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
402
+
403
+ output_tokens[index] = masked_token
404
+
405
+ masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
406
+ assert len(masked_lms) <= num_to_predict
407
+ masked_lms = sorted(masked_lms, key=lambda x: x.index)
408
+
409
+ masked_lm_positions = []
410
+ masked_lm_labels = []
411
+ for p in masked_lms:
412
+ masked_lm_positions.append(p.index)
413
+ masked_lm_labels.append(p.label)
414
+
415
+ return (output_tokens, masked_lm_positions, masked_lm_labels)
416
+
417
+
418
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
419
+ """Truncates a pair of sequences to a maximum sequence length."""
420
+ while True:
421
+ total_length = len(tokens_a) + len(tokens_b)
422
+ if total_length <= max_num_tokens:
423
+ break
424
+
425
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
426
+ assert len(trunc_tokens) >= 1
427
+
428
+ # We want to sometimes truncate from the front and sometimes from the
429
+ # back to add more randomness and avoid biases.
430
+ if rng.random() < 0.5:
431
+ del trunc_tokens[0]
432
+ else:
433
+ trunc_tokens.pop()
434
+
435
+
436
+ def main(_):
437
+ tf.logging.set_verbosity(tf.logging.INFO)
438
+
439
+ tokenizer = tokenization.FullTokenizer(
440
+ vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
441
+
442
+ input_files = []
443
+ for input_pattern in FLAGS.input_file.split(","):
444
+ input_files.extend(tf.gfile.Glob(input_pattern))
445
+
446
+ tf.logging.info("*** Reading from input files ***")
447
+ for input_file in input_files:
448
+ tf.logging.info(" %s", input_file)
449
+
450
+ rng = random.Random(FLAGS.random_seed)
451
+ instances = create_training_instances(
452
+ input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,
453
+ FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,
454
+ rng)
455
+
456
+ output_files = FLAGS.output_file.split(",")
457
+ tf.logging.info("*** Writing to output files ***")
458
+ for output_file in output_files:
459
+ tf.logging.info(" %s", output_file)
460
+
461
+ write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,
462
+ FLAGS.max_predictions_per_seq, output_files)
463
+
464
+
465
+ if __name__ == "__main__":
466
+ flags.mark_flag_as_required("input_file")
467
+ flags.mark_flag_as_required("output_file")
468
+ flags.mark_flag_as_required("vocab_file")
469
+ tf.app.run()
RIS-DMMI/bert/extract_features.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Extract pre-computed feature vectors from BERT."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import codecs
22
+ import collections
23
+ import json
24
+ import re
25
+
26
+ import modeling
27
+ import tokenization
28
+ import tensorflow as tf
29
+
30
+ flags = tf.flags
31
+
32
+ FLAGS = flags.FLAGS
33
+
34
+ flags.DEFINE_string("input_file", None, "")
35
+
36
+ flags.DEFINE_string("output_file", None, "")
37
+
38
+ flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
39
+
40
+ flags.DEFINE_string(
41
+ "bert_config_file", None,
42
+ "The config json file corresponding to the pre-trained BERT model. "
43
+ "This specifies the model architecture.")
44
+
45
+ flags.DEFINE_integer(
46
+ "max_seq_length", 128,
47
+ "The maximum total input sequence length after WordPiece tokenization. "
48
+ "Sequences longer than this will be truncated, and sequences shorter "
49
+ "than this will be padded.")
50
+
51
+ flags.DEFINE_string(
52
+ "init_checkpoint", None,
53
+ "Initial checkpoint (usually from a pre-trained BERT model).")
54
+
55
+ flags.DEFINE_string("vocab_file", None,
56
+ "The vocabulary file that the BERT model was trained on.")
57
+
58
+ flags.DEFINE_bool(
59
+ "do_lower_case", True,
60
+ "Whether to lower case the input text. Should be True for uncased "
61
+ "models and False for cased models.")
62
+
63
+ flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
64
+
65
+ flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
66
+
67
+ flags.DEFINE_string("master", None,
68
+ "If using a TPU, the address of the master.")
69
+
70
+ flags.DEFINE_integer(
71
+ "num_tpu_cores", 8,
72
+ "Only used if `use_tpu` is True. Total number of TPU cores to use.")
73
+
74
+ flags.DEFINE_bool(
75
+ "use_one_hot_embeddings", False,
76
+ "If True, tf.one_hot will be used for embedding lookups, otherwise "
77
+ "tf.nn.embedding_lookup will be used. On TPUs, this should be True "
78
+ "since it is much faster.")
79
+
80
+
81
+ class InputExample(object):
82
+
83
+ def __init__(self, unique_id, text_a, text_b):
84
+ self.unique_id = unique_id
85
+ self.text_a = text_a
86
+ self.text_b = text_b
87
+
88
+
89
+ class InputFeatures(object):
90
+ """A single set of features of data."""
91
+
92
+ def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
93
+ self.unique_id = unique_id
94
+ self.tokens = tokens
95
+ self.input_ids = input_ids
96
+ self.input_mask = input_mask
97
+ self.input_type_ids = input_type_ids
98
+
99
+
100
+ def input_fn_builder(features, seq_length):
101
+ """Creates an `input_fn` closure to be passed to TPUEstimator."""
102
+
103
+ all_unique_ids = []
104
+ all_input_ids = []
105
+ all_input_mask = []
106
+ all_input_type_ids = []
107
+
108
+ for feature in features:
109
+ all_unique_ids.append(feature.unique_id)
110
+ all_input_ids.append(feature.input_ids)
111
+ all_input_mask.append(feature.input_mask)
112
+ all_input_type_ids.append(feature.input_type_ids)
113
+
114
+ def input_fn(params):
115
+ """The actual input function."""
116
+ batch_size = params["batch_size"]
117
+
118
+ num_examples = len(features)
119
+
120
+ # This is for demo purposes and does NOT scale to large data sets. We do
121
+ # not use Dataset.from_generator() because that uses tf.py_func which is
122
+ # not TPU compatible. The right way to load data is with TFRecordReader.
123
+ d = tf.data.Dataset.from_tensor_slices({
124
+ "unique_ids":
125
+ tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
126
+ "input_ids":
127
+ tf.constant(
128
+ all_input_ids, shape=[num_examples, seq_length],
129
+ dtype=tf.int32),
130
+ "input_mask":
131
+ tf.constant(
132
+ all_input_mask,
133
+ shape=[num_examples, seq_length],
134
+ dtype=tf.int32),
135
+ "input_type_ids":
136
+ tf.constant(
137
+ all_input_type_ids,
138
+ shape=[num_examples, seq_length],
139
+ dtype=tf.int32),
140
+ })
141
+
142
+ d = d.batch(batch_size=batch_size, drop_remainder=False)
143
+ return d
144
+
145
+ return input_fn
146
+
147
+
148
+ def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
149
+ use_one_hot_embeddings):
150
+ """Returns `model_fn` closure for TPUEstimator."""
151
+
152
+ def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
153
+ """The `model_fn` for TPUEstimator."""
154
+
155
+ unique_ids = features["unique_ids"]
156
+ input_ids = features["input_ids"]
157
+ input_mask = features["input_mask"]
158
+ input_type_ids = features["input_type_ids"]
159
+
160
+ model = modeling.BertModel(
161
+ config=bert_config,
162
+ is_training=False,
163
+ input_ids=input_ids,
164
+ input_mask=input_mask,
165
+ token_type_ids=input_type_ids,
166
+ use_one_hot_embeddings=use_one_hot_embeddings)
167
+
168
+ if mode != tf.estimator.ModeKeys.PREDICT:
169
+ raise ValueError("Only PREDICT modes are supported: %s" % (mode))
170
+
171
+ tvars = tf.trainable_variables()
172
+ scaffold_fn = None
173
+ (assignment_map,
174
+ initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
175
+ tvars, init_checkpoint)
176
+ if use_tpu:
177
+
178
+ def tpu_scaffold():
179
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
180
+ return tf.train.Scaffold()
181
+
182
+ scaffold_fn = tpu_scaffold
183
+ else:
184
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
185
+
186
+ tf.logging.info("**** Trainable Variables ****")
187
+ for var in tvars:
188
+ init_string = ""
189
+ if var.name in initialized_variable_names:
190
+ init_string = ", *INIT_FROM_CKPT*"
191
+ tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
192
+ init_string)
193
+
194
+ all_layers = model.get_all_encoder_layers()
195
+
196
+ predictions = {
197
+ "unique_id": unique_ids,
198
+ }
199
+
200
+ for (i, layer_index) in enumerate(layer_indexes):
201
+ predictions["layer_output_%d" % i] = all_layers[layer_index]
202
+
203
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
204
+ mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
205
+ return output_spec
206
+
207
+ return model_fn
208
+
209
+
210
+ def convert_examples_to_features(examples, seq_length, tokenizer):
211
+ """Loads a data file into a list of `InputBatch`s."""
212
+
213
+ features = []
214
+ for (ex_index, example) in enumerate(examples):
215
+ tokens_a = tokenizer.tokenize(example.text_a)
216
+
217
+ tokens_b = None
218
+ if example.text_b:
219
+ tokens_b = tokenizer.tokenize(example.text_b)
220
+
221
+ if tokens_b:
222
+ # Modifies `tokens_a` and `tokens_b` in place so that the total
223
+ # length is less than the specified length.
224
+ # Account for [CLS], [SEP], [SEP] with "- 3"
225
+ _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
226
+ else:
227
+ # Account for [CLS] and [SEP] with "- 2"
228
+ if len(tokens_a) > seq_length - 2:
229
+ tokens_a = tokens_a[0:(seq_length - 2)]
230
+
231
+ # The convention in BERT is:
232
+ # (a) For sequence pairs:
233
+ # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
234
+ # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
235
+ # (b) For single sequences:
236
+ # tokens: [CLS] the dog is hairy . [SEP]
237
+ # type_ids: 0 0 0 0 0 0 0
238
+ #
239
+ # Where "type_ids" are used to indicate whether this is the first
240
+ # sequence or the second sequence. The embedding vectors for `type=0` and
241
+ # `type=1` were learned during pre-training and are added to the wordpiece
242
+ # embedding vector (and position vector). This is not *strictly* necessary
243
+ # since the [SEP] token unambiguously separates the sequences, but it makes
244
+ # it easier for the model to learn the concept of sequences.
245
+ #
246
+ # For classification tasks, the first vector (corresponding to [CLS]) is
247
+ # used as as the "sentence vector". Note that this only makes sense because
248
+ # the entire model is fine-tuned.
249
+ tokens = []
250
+ input_type_ids = []
251
+ tokens.append("[CLS]")
252
+ input_type_ids.append(0)
253
+ for token in tokens_a:
254
+ tokens.append(token)
255
+ input_type_ids.append(0)
256
+ tokens.append("[SEP]")
257
+ input_type_ids.append(0)
258
+
259
+ if tokens_b:
260
+ for token in tokens_b:
261
+ tokens.append(token)
262
+ input_type_ids.append(1)
263
+ tokens.append("[SEP]")
264
+ input_type_ids.append(1)
265
+
266
+ input_ids = tokenizer.convert_tokens_to_ids(tokens)
267
+
268
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
269
+ # tokens are attended to.
270
+ input_mask = [1] * len(input_ids)
271
+
272
+ # Zero-pad up to the sequence length.
273
+ while len(input_ids) < seq_length:
274
+ input_ids.append(0)
275
+ input_mask.append(0)
276
+ input_type_ids.append(0)
277
+
278
+ assert len(input_ids) == seq_length
279
+ assert len(input_mask) == seq_length
280
+ assert len(input_type_ids) == seq_length
281
+
282
+ if ex_index < 5:
283
+ tf.logging.info("*** Example ***")
284
+ tf.logging.info("unique_id: %s" % (example.unique_id))
285
+ tf.logging.info("tokens: %s" % " ".join(
286
+ [tokenization.printable_text(x) for x in tokens]))
287
+ tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
288
+ tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
289
+ tf.logging.info(
290
+ "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
291
+
292
+ features.append(
293
+ InputFeatures(
294
+ unique_id=example.unique_id,
295
+ tokens=tokens,
296
+ input_ids=input_ids,
297
+ input_mask=input_mask,
298
+ input_type_ids=input_type_ids))
299
+ return features
300
+
301
+
302
+ def _truncate_seq_pair(tokens_a, tokens_b, max_length):
303
+ """Truncates a sequence pair in place to the maximum length."""
304
+
305
+ # This is a simple heuristic which will always truncate the longer sequence
306
+ # one token at a time. This makes more sense than truncating an equal percent
307
+ # of tokens from each, since if one sequence is very short then each token
308
+ # that's truncated likely contains more information than a longer sequence.
309
+ while True:
310
+ total_length = len(tokens_a) + len(tokens_b)
311
+ if total_length <= max_length:
312
+ break
313
+ if len(tokens_a) > len(tokens_b):
314
+ tokens_a.pop()
315
+ else:
316
+ tokens_b.pop()
317
+
318
+
319
+ def read_examples(input_file):
320
+ """Read a list of `InputExample`s from an input file."""
321
+ examples = []
322
+ unique_id = 0
323
+ with tf.gfile.GFile(input_file, "r") as reader:
324
+ while True:
325
+ line = tokenization.convert_to_unicode(reader.readline())
326
+ if not line:
327
+ break
328
+ line = line.strip()
329
+ text_a = None
330
+ text_b = None
331
+ m = re.match(r"^(.*) \|\|\| (.*)$", line)
332
+ if m is None:
333
+ text_a = line
334
+ else:
335
+ text_a = m.group(1)
336
+ text_b = m.group(2)
337
+ examples.append(
338
+ InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
339
+ unique_id += 1
340
+ return examples
341
+
342
+
343
+ def main(_):
344
+ tf.logging.set_verbosity(tf.logging.INFO)
345
+
346
+ layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
347
+
348
+ bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
349
+
350
+ tokenizer = tokenization.FullTokenizer(
351
+ vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
352
+
353
+ is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
354
+ run_config = tf.contrib.tpu.RunConfig(
355
+ master=FLAGS.master,
356
+ tpu_config=tf.contrib.tpu.TPUConfig(
357
+ num_shards=FLAGS.num_tpu_cores,
358
+ per_host_input_for_training=is_per_host))
359
+
360
+ examples = read_examples(FLAGS.input_file)
361
+
362
+ features = convert_examples_to_features(
363
+ examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
364
+
365
+ unique_id_to_feature = {}
366
+ for feature in features:
367
+ unique_id_to_feature[feature.unique_id] = feature
368
+
369
+ model_fn = model_fn_builder(
370
+ bert_config=bert_config,
371
+ init_checkpoint=FLAGS.init_checkpoint,
372
+ layer_indexes=layer_indexes,
373
+ use_tpu=FLAGS.use_tpu,
374
+ use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
375
+
376
+ # If TPU is not available, this will fall back to normal Estimator on CPU
377
+ # or GPU.
378
+ estimator = tf.contrib.tpu.TPUEstimator(
379
+ use_tpu=FLAGS.use_tpu,
380
+ model_fn=model_fn,
381
+ config=run_config,
382
+ predict_batch_size=FLAGS.batch_size)
383
+
384
+ input_fn = input_fn_builder(
385
+ features=features, seq_length=FLAGS.max_seq_length)
386
+
387
+ with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
388
+ "w")) as writer:
389
+ for result in estimator.predict(input_fn, yield_single_examples=True):
390
+ unique_id = int(result["unique_id"])
391
+ feature = unique_id_to_feature[unique_id]
392
+ output_json = collections.OrderedDict()
393
+ output_json["linex_index"] = unique_id
394
+ all_features = []
395
+ for (i, token) in enumerate(feature.tokens):
396
+ all_layers = []
397
+ for (j, layer_index) in enumerate(layer_indexes):
398
+ layer_output = result["layer_output_%d" % j]
399
+ layers = collections.OrderedDict()
400
+ layers["index"] = layer_index
401
+ layers["values"] = [
402
+ round(float(x), 6) for x in layer_output[i:(i + 1)].flat
403
+ ]
404
+ all_layers.append(layers)
405
+ features = collections.OrderedDict()
406
+ features["token"] = token
407
+ features["layers"] = all_layers
408
+ all_features.append(features)
409
+ output_json["features"] = all_features
410
+ writer.write(json.dumps(output_json) + "\n")
411
+
412
+
413
+ if __name__ == "__main__":
414
+ flags.mark_flag_as_required("input_file")
415
+ flags.mark_flag_as_required("vocab_file")
416
+ flags.mark_flag_as_required("bert_config_file")
417
+ flags.mark_flag_as_required("init_checkpoint")
418
+ flags.mark_flag_as_required("output_file")
419
+ tf.app.run()
RIS-DMMI/bert/file_utils.py ADDED
@@ -0,0 +1,816 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for working with the local dataset cache.
3
+ This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
4
+ Copyright by the AllenNLP authors.
5
+ """
6
+
7
+ import fnmatch
8
+ import json
9
+ import logging
10
+ import os
11
+ import shutil
12
+ import sys
13
+ import tarfile
14
+ import tempfile
15
+ from contextlib import contextmanager
16
+ from functools import partial, wraps
17
+ from hashlib import sha256
18
+ from pathlib import Path
19
+ from typing import Dict, Optional, Union
20
+ from urllib.parse import urlparse
21
+ from zipfile import ZipFile, is_zipfile
22
+ import pdb
23
+ import requests
24
+ from filelock import FileLock
25
+ from tqdm.auto import tqdm
26
+
27
+ #from . import __version__
28
+ __version__ = "3.0.2"
29
+
30
+ logger = logging.getLogger(__name__) # pylint: disable=invalid-name
31
+
32
+ try:
33
+ USE_TF = os.environ.get("USE_TF", "AUTO").upper()
34
+ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
35
+ if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
36
+ import torch
37
+
38
+ _torch_available = True # pylint: disable=invalid-name
39
+ logger.info("PyTorch version {} available.".format(torch.__version__))
40
+ else:
41
+ logger.info("Disabling PyTorch because USE_TF is set")
42
+ _torch_available = False
43
+ except ImportError:
44
+ _torch_available = False # pylint: disable=invalid-name
45
+
46
+ try:
47
+ USE_TF = os.environ.get("USE_TF", "AUTO").upper()
48
+ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
49
+
50
+ if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
51
+ import tensorflow as tf
52
+
53
+ assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
54
+ _tf_available = True # pylint: disable=invalid-name
55
+ logger.info("TensorFlow version {} available.".format(tf.__version__))
56
+ else:
57
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
58
+ _tf_available = False
59
+ except (ImportError, AssertionError):
60
+ _tf_available = False # pylint: disable=invalid-name
61
+
62
+
63
+ try:
64
+ from torch.hub import _get_torch_home
65
+
66
+ torch_cache_home = _get_torch_home()
67
+ except ImportError:
68
+ torch_cache_home = os.path.expanduser(
69
+ os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
70
+ )
71
+
72
+
73
+ try:
74
+ import torch_xla.core.xla_model as xm # noqa: F401
75
+
76
+ if _torch_available:
77
+ _torch_tpu_available = True # pylint: disable=
78
+ else:
79
+ _torch_tpu_available = False
80
+ except ImportError:
81
+ _torch_tpu_available = False
82
+
83
+
84
+ try:
85
+ import psutil # noqa: F401
86
+
87
+ _psutil_available = True
88
+
89
+ except ImportError:
90
+ _psutil_available = False
91
+
92
+
93
+ try:
94
+ import py3nvml # noqa: F401
95
+
96
+ _py3nvml_available = True
97
+
98
+ except ImportError:
99
+ _py3nvml_available = False
100
+
101
+
102
+ try:
103
+ from apex import amp # noqa: F401
104
+
105
+ _has_apex = True
106
+ except ImportError:
107
+ _has_apex = False
108
+
109
+ default_cache_path = os.path.join(torch_cache_home, "transformers")
110
+
111
+
112
+ PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
113
+ PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
114
+ TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
115
+
116
+ WEIGHTS_NAME = "pytorch_model.bin"
117
+ TF2_WEIGHTS_NAME = "tf_model.h5"
118
+ TF_WEIGHTS_NAME = "model.ckpt"
119
+ CONFIG_NAME = "config.json"
120
+ MODEL_CARD_NAME = "modelcard.json"
121
+
122
+
123
+ MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
124
+ DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
125
+ DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
126
+
127
+ S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
128
+ CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
129
+
130
+
131
+ def is_torch_available():
132
+ return _torch_available
133
+
134
+
135
+ def is_tf_available():
136
+ return _tf_available
137
+
138
+
139
+ def is_torch_tpu_available():
140
+ return _torch_tpu_available
141
+
142
+
143
+ def is_psutil_available():
144
+ return _psutil_available
145
+
146
+
147
+ def is_py3nvml_available():
148
+ return _py3nvml_available
149
+
150
+
151
+ def is_apex_available():
152
+ return _has_apex
153
+
154
+
155
+ def add_start_docstrings(*docstr):
156
+ def docstring_decorator(fn):
157
+ fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
158
+ return fn
159
+
160
+ return docstring_decorator
161
+
162
+
163
+ def add_start_docstrings_to_callable(*docstr):
164
+ def docstring_decorator(fn):
165
+ class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
166
+ intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
167
+ note = r"""
168
+
169
+ .. note::
170
+ Although the recipe for forward pass needs to be defined within
171
+ this function, one should call the :class:`Module` instance afterwards
172
+ instead of this since the former takes care of running the
173
+ pre and post processing steps while the latter silently ignores them.
174
+ """
175
+ fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
176
+ return fn
177
+
178
+ return docstring_decorator
179
+
180
+
181
+ def add_end_docstrings(*docstr):
182
+ def docstring_decorator(fn):
183
+ fn.__doc__ = fn.__doc__ + "".join(docstr)
184
+ return fn
185
+
186
+ return docstring_decorator
187
+
188
+
189
+ PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
190
+ Example::
191
+
192
+ >>> from transformers import {tokenizer_class}, {model_class}
193
+ >>> import torch
194
+
195
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
196
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
197
+
198
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
199
+ >>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
200
+
201
+ >>> outputs = model(**inputs, labels=labels)
202
+ >>> loss, scores = outputs[:2]
203
+ """
204
+
205
+ PT_QUESTION_ANSWERING_SAMPLE = r"""
206
+ Example::
207
+
208
+ >>> from transformers import {tokenizer_class}, {model_class}
209
+ >>> import torch
210
+
211
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
212
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
213
+
214
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
215
+ >>> start_positions = torch.tensor([1])
216
+ >>> end_positions = torch.tensor([3])
217
+
218
+ >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
219
+ >>> loss, start_scores, end_scores = outputs[:3]
220
+ """
221
+
222
+ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
223
+ Example::
224
+
225
+ >>> from transformers import {tokenizer_class}, {model_class}
226
+ >>> import torch
227
+
228
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
229
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
230
+
231
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
232
+ >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
233
+ >>> outputs = model(**inputs, labels=labels)
234
+ >>> loss, logits = outputs[:2]
235
+ """
236
+
237
+ PT_MASKED_LM_SAMPLE = r"""
238
+ Example::
239
+
240
+ >>> from transformers import {tokenizer_class}, {model_class}
241
+ >>> import torch
242
+
243
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
244
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
245
+
246
+ >>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"]
247
+
248
+ >>> outputs = model(input_ids, labels=input_ids)
249
+ >>> loss, prediction_scores = outputs[:2]
250
+ """
251
+
252
+ PT_BASE_MODEL_SAMPLE = r"""
253
+ Example::
254
+
255
+ >>> from transformers import {tokenizer_class}, {model_class}
256
+ >>> import torch
257
+
258
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
259
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
260
+
261
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
262
+ >>> outputs = model(**inputs)
263
+
264
+ >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
265
+ """
266
+
267
+ PT_MULTIPLE_CHOICE_SAMPLE = r"""
268
+ Example::
269
+
270
+ >>> from transformers import {tokenizer_class}, {model_class}
271
+ >>> import torch
272
+
273
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
274
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
275
+
276
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
277
+ >>> choice0 = "It is eaten with a fork and a knife."
278
+ >>> choice1 = "It is eaten while held in the hand."
279
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
280
+
281
+ >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
282
+ >>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1
283
+
284
+ >>> # the linear classifier still needs to be trained
285
+ >>> loss, logits = outputs[:2]
286
+ """
287
+
288
+ PT_CAUSAL_LM_SAMPLE = r"""
289
+ Example::
290
+
291
+ >>> import torch
292
+ >>> from transformers import {tokenizer_class}, {model_class}
293
+
294
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
295
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
296
+
297
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
298
+ >>> outputs = model(**inputs, labels=inputs["input_ids"])
299
+ >>> loss, logits = outputs[:2]
300
+ """
301
+
302
+ TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
303
+ Example::
304
+
305
+ >>> from transformers import {tokenizer_class}, {model_class}
306
+ >>> import tensorflow as tf
307
+
308
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
309
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
310
+
311
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
312
+ >>> input_ids = inputs["input_ids"]
313
+ >>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1
314
+
315
+ >>> outputs = model(inputs)
316
+ >>> loss, scores = outputs[:2]
317
+ """
318
+
319
+ TF_QUESTION_ANSWERING_SAMPLE = r"""
320
+ Example::
321
+
322
+ >>> from transformers import {tokenizer_class}, {model_class}
323
+ >>> import tensorflow as tf
324
+
325
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
326
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
327
+
328
+ >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
329
+ >>> input_dict = tokenizer(question, text, return_tensors='tf')
330
+ >>> start_scores, end_scores = model(input_dict)
331
+
332
+ >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
333
+ >>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1])
334
+ """
335
+
336
+ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
337
+ Example::
338
+
339
+ >>> from transformers import {tokenizer_class}, {model_class}
340
+ >>> import tensorflow as tf
341
+
342
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
343
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
344
+
345
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
346
+ >>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
347
+
348
+ >>> outputs = model(inputs)
349
+ >>> loss, logits = outputs[:2]
350
+ """
351
+
352
+ TF_MASKED_LM_SAMPLE = r"""
353
+ Example::
354
+ >>> from transformers import {tokenizer_class}, {model_class}
355
+ >>> import tensorflow as tf
356
+
357
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
358
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
359
+
360
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
361
+
362
+ >>> outputs = model(input_ids)
363
+ >>> prediction_scores = outputs[0]
364
+ """
365
+
366
+ TF_BASE_MODEL_SAMPLE = r"""
367
+ Example::
368
+
369
+ >>> from transformers import {tokenizer_class}, {model_class}
370
+ >>> import tensorflow as tf
371
+
372
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
373
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
374
+
375
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
376
+ >>> outputs = model(inputs)
377
+
378
+ >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
379
+ """
380
+
381
+ TF_MULTIPLE_CHOICE_SAMPLE = r"""
382
+ Example::
383
+
384
+ >>> from transformers import {tokenizer_class}, {model_class}
385
+ >>> import tensorflow as tf
386
+
387
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
388
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
389
+
390
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
391
+ >>> choice0 = "It is eaten with a fork and a knife."
392
+ >>> choice1 = "It is eaten while held in the hand."
393
+
394
+ >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)
395
+ >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
396
+ >>> outputs = model(inputs) # batch size is 1
397
+
398
+ >>> # the linear classifier still needs to be trained
399
+ >>> logits = outputs[0]
400
+ """
401
+
402
+ TF_CAUSAL_LM_SAMPLE = r"""
403
+ Example::
404
+
405
+ >>> from transformers import {tokenizer_class}, {model_class}
406
+ >>> import tensorflow as tf
407
+
408
+ >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
409
+ >>> model = {model_class}.from_pretrained('{checkpoint}')
410
+
411
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
412
+ >>> outputs = model(inputs)
413
+ >>> logits = outputs[0]
414
+ """
415
+
416
+
417
+ def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None):
418
+ def docstring_decorator(fn):
419
+ model_class = fn.__qualname__.split(".")[0]
420
+ is_tf_class = model_class[:2] == "TF"
421
+
422
+ if "SequenceClassification" in model_class:
423
+ code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE
424
+ elif "QuestionAnswering" in model_class:
425
+ code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE
426
+ elif "TokenClassification" in model_class:
427
+ code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE
428
+ elif "MultipleChoice" in model_class:
429
+ code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE
430
+ elif "MaskedLM" in model_class:
431
+ code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE
432
+ elif "LMHead" in model_class:
433
+ code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE
434
+ elif "Model" in model_class:
435
+ code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE
436
+ else:
437
+ raise ValueError(f"Docstring can't be built for model {model_class}")
438
+
439
+ built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
440
+ fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + built_doc
441
+ return fn
442
+
443
+ return docstring_decorator
444
+
445
+
446
+ def is_remote_url(url_or_filename):
447
+ parsed = urlparse(url_or_filename)
448
+ return parsed.scheme in ("http", "https")
449
+
450
+
451
+ def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
452
+ """
453
+ Resolve a model identifier, and a file name, to a HF-hosted url
454
+ on either S3 or Cloudfront (a Content Delivery Network, or CDN).
455
+
456
+ Cloudfront is replicated over the globe so downloads are way faster
457
+ for the end user (and it also lowers our bandwidth costs). However, it
458
+ is more aggressively cached by default, so may not always reflect the
459
+ latest changes to the underlying file (default TTL is 24 hours).
460
+
461
+ In terms of client-side caching from this library, even though
462
+ Cloudfront relays the ETags from S3, using one or the other
463
+ (or switching from one to the other) will affect caching: cached files
464
+ are not shared between the two because the cached file's name contains
465
+ a hash of the url.
466
+ """
467
+ # endpoint = '/mnt/petrelfs/huyutao/code/lavit_hu1/bert/'
468
+ endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
469
+ legacy_format = "/" not in model_id
470
+ if legacy_format:
471
+ # return f"{endpoint}/{filename}"
472
+ return f"{endpoint}/{model_id}-{filename}"
473
+ else:
474
+ pdb.set_trace()
475
+
476
+ return f"{endpoint}/{model_id}/{filename}"
477
+ # if legacy_format:
478
+ # return f"{endpoint}/{model_id}-{filename}"
479
+ # else:
480
+ # return f"{endpoint}/{model_id}/{filename}"
481
+
482
+
483
+ def url_to_filename(url, etag=None):
484
+ """
485
+ Convert `url` into a hashed filename in a repeatable way.
486
+ If `etag` is specified, append its hash to the url's, delimited
487
+ by a period.
488
+ If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
489
+ so that TF 2.0 can identify it as a HDF5 file
490
+ (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
491
+ """
492
+ url_bytes = url.encode("utf-8")
493
+ url_hash = sha256(url_bytes)
494
+ filename = url_hash.hexdigest()
495
+
496
+ if etag:
497
+ etag_bytes = etag.encode("utf-8")
498
+ etag_hash = sha256(etag_bytes)
499
+ filename += "." + etag_hash.hexdigest()
500
+
501
+ if url.endswith(".h5"):
502
+ filename += ".h5"
503
+
504
+ return filename
505
+
506
+
507
+ def filename_to_url(filename, cache_dir=None):
508
+ """
509
+ Return the url and etag (which may be ``None``) stored for `filename`.
510
+ Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
511
+ """
512
+ if cache_dir is None:
513
+ cache_dir = TRANSFORMERS_CACHE
514
+ if isinstance(cache_dir, Path):
515
+ cache_dir = str(cache_dir)
516
+
517
+ cache_path = os.path.join(cache_dir, filename)
518
+ if not os.path.exists(cache_path):
519
+ raise EnvironmentError("file {} not found".format(cache_path))
520
+
521
+ meta_path = cache_path + ".json"
522
+ if not os.path.exists(meta_path):
523
+ raise EnvironmentError("file {} not found".format(meta_path))
524
+
525
+ with open(meta_path, encoding="utf-8") as meta_file:
526
+ metadata = json.load(meta_file)
527
+ url = metadata["url"]
528
+ etag = metadata["etag"]
529
+
530
+ return url, etag
531
+
532
+
533
+ def cached_path(
534
+ url_or_filename,
535
+ cache_dir=None,
536
+ force_download=False,
537
+ proxies=None,
538
+ resume_download=False,
539
+ user_agent: Union[Dict, str, None] = None,
540
+ extract_compressed_file=False,
541
+ force_extract=False,
542
+ local_files_only=False,
543
+ ) -> Optional[str]:
544
+ """
545
+ Given something that might be a URL (or might be a local path),
546
+ determine which. If it's a URL, download the file and cache it, and
547
+ return the path to the cached file. If it's already a local path,
548
+ make sure the file exists and then return the path.
549
+ Args:
550
+ cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
551
+ force_download: if True, re-dowload the file even if it's already cached in the cache dir.
552
+ resume_download: if True, resume the download if incompletly recieved file is found.
553
+ user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
554
+ extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
555
+ file in a folder along the archive.
556
+ force_extract: if True when extract_compressed_file is True and the archive was already extracted,
557
+ re-extract the archive and overide the folder where it was extracted.
558
+
559
+ Return:
560
+ None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
561
+ Local path (string) otherwise
562
+ """
563
+ if cache_dir is None:
564
+ cache_dir = TRANSFORMERS_CACHE
565
+ if isinstance(url_or_filename, Path):
566
+ url_or_filename = str(url_or_filename)
567
+ if isinstance(cache_dir, Path):
568
+ cache_dir = str(cache_dir)
569
+
570
+ if is_remote_url(url_or_filename):
571
+ # URL, so get it from the cache (downloading if necessary)
572
+ output_path = get_from_cache(
573
+ url_or_filename,
574
+ cache_dir=cache_dir,
575
+ force_download=force_download,
576
+ proxies=proxies,
577
+ resume_download=resume_download,
578
+ user_agent=user_agent,
579
+ local_files_only=local_files_only,
580
+ )
581
+ elif os.path.exists(url_or_filename):
582
+ # File, and it exists.
583
+ output_path = url_or_filename
584
+ elif urlparse(url_or_filename).scheme == "":
585
+ # File, but it doesn't exist.
586
+ raise EnvironmentError("file {} not found".format(url_or_filename))
587
+ else:
588
+ # Something unknown
589
+ raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
590
+
591
+ if extract_compressed_file:
592
+ if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
593
+ return output_path
594
+
595
+ # Path where we extract compressed archives
596
+ # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
597
+ output_dir, output_file = os.path.split(output_path)
598
+ output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
599
+ output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
600
+
601
+ if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
602
+ return output_path_extracted
603
+
604
+ # Prevent parallel extractions
605
+ lock_path = output_path + ".lock"
606
+ with FileLock(lock_path):
607
+ shutil.rmtree(output_path_extracted, ignore_errors=True)
608
+ os.makedirs(output_path_extracted)
609
+ if is_zipfile(output_path):
610
+ with ZipFile(output_path, "r") as zip_file:
611
+ zip_file.extractall(output_path_extracted)
612
+ zip_file.close()
613
+ elif tarfile.is_tarfile(output_path):
614
+ tar_file = tarfile.open(output_path)
615
+ tar_file.extractall(output_path_extracted)
616
+ tar_file.close()
617
+ else:
618
+ raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
619
+
620
+ return output_path_extracted
621
+
622
+ return output_path
623
+
624
+
625
+ def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None):
626
+ ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
627
+ if is_torch_available():
628
+ ua += "; torch/{}".format(torch.__version__)
629
+ if is_tf_available():
630
+ ua += "; tensorflow/{}".format(tf.__version__)
631
+ if isinstance(user_agent, dict):
632
+ ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
633
+ elif isinstance(user_agent, str):
634
+ ua += "; " + user_agent
635
+ headers = {"user-agent": ua}
636
+ if resume_size > 0:
637
+ headers["Range"] = "bytes=%d-" % (resume_size,)
638
+ response = requests.get(url, stream=True, proxies=proxies, headers=headers)
639
+ if response.status_code == 416: # Range not satisfiable
640
+ return
641
+ content_length = response.headers.get("Content-Length")
642
+ total = resume_size + int(content_length) if content_length is not None else None
643
+ progress = tqdm(
644
+ unit="B",
645
+ unit_scale=True,
646
+ total=total,
647
+ initial=resume_size,
648
+ desc="Downloading",
649
+ disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
650
+ )
651
+ for chunk in response.iter_content(chunk_size=1024):
652
+ if chunk: # filter out keep-alive new chunks
653
+ progress.update(len(chunk))
654
+ temp_file.write(chunk)
655
+ progress.close()
656
+
657
+
658
+ def get_from_cache(
659
+ url,
660
+ cache_dir=None,
661
+ force_download=False,
662
+ proxies=None,
663
+ etag_timeout=10,
664
+ resume_download=False,
665
+ user_agent: Union[Dict, str, None] = None,
666
+ local_files_only=False,
667
+ ) -> Optional[str]:
668
+ """
669
+ Given a URL, look for the corresponding file in the local cache.
670
+ If it's not there, download it. Then return the path to the cached file.
671
+
672
+ Return:
673
+ None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
674
+ Local path (string) otherwise
675
+ """
676
+ if cache_dir is None:
677
+ cache_dir = TRANSFORMERS_CACHE
678
+ if isinstance(cache_dir, Path):
679
+ cache_dir = str(cache_dir)
680
+
681
+ os.makedirs(cache_dir, exist_ok=True)
682
+
683
+ etag = None
684
+ if not local_files_only:
685
+ try:
686
+ response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
687
+ if response.status_code == 200:
688
+ etag = response.headers.get("ETag")
689
+ except (EnvironmentError, requests.exceptions.Timeout):
690
+ # etag is already None
691
+ pass
692
+
693
+ filename = url_to_filename(url, etag)
694
+
695
+ # get cache path to put the file
696
+ cache_path = os.path.join(cache_dir, filename)
697
+
698
+ # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
699
+ # try to get the last downloaded one
700
+ if etag is None:
701
+ if os.path.exists(cache_path):
702
+ return cache_path
703
+ else:
704
+ matching_files = [
705
+ file
706
+ for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
707
+ if not file.endswith(".json") and not file.endswith(".lock")
708
+ ]
709
+ if len(matching_files) > 0:
710
+ return os.path.join(cache_dir, matching_files[-1])
711
+ else:
712
+ # If files cannot be found and local_files_only=True,
713
+ # the models might've been found if local_files_only=False
714
+ # Notify the user about that
715
+ if local_files_only:
716
+ raise ValueError(
717
+ "Cannot find the requested files in the cached path and outgoing traffic has been"
718
+ " disabled. To enable model look-ups and downloads online, set 'local_files_only'"
719
+ " to False."
720
+ )
721
+ return None
722
+
723
+ # From now on, etag is not None.
724
+ if os.path.exists(cache_path) and not force_download:
725
+ return cache_path
726
+
727
+ # Prevent parallel downloads of the same file with a lock.
728
+ lock_path = cache_path + ".lock"
729
+ with FileLock(lock_path):
730
+
731
+ # If the download just completed while the lock was activated.
732
+ if os.path.exists(cache_path) and not force_download:
733
+ # Even if returning early like here, the lock will be released.
734
+ return cache_path
735
+
736
+ if resume_download:
737
+ incomplete_path = cache_path + ".incomplete"
738
+
739
+ @contextmanager
740
+ def _resumable_file_manager():
741
+ with open(incomplete_path, "a+b") as f:
742
+ yield f
743
+
744
+ temp_file_manager = _resumable_file_manager
745
+ if os.path.exists(incomplete_path):
746
+ resume_size = os.stat(incomplete_path).st_size
747
+ else:
748
+ resume_size = 0
749
+ else:
750
+ temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
751
+ resume_size = 0
752
+
753
+ # Download to temporary file, then copy to cache dir once finished.
754
+ # Otherwise you get corrupt cache entries if the download gets interrupted.
755
+ with temp_file_manager() as temp_file:
756
+ logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
757
+
758
+ http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
759
+
760
+ logger.info("storing %s in cache at %s", url, cache_path)
761
+ os.replace(temp_file.name, cache_path)
762
+
763
+ logger.info("creating metadata file for %s", cache_path)
764
+ meta = {"url": url, "etag": etag}
765
+ meta_path = cache_path + ".json"
766
+ with open(meta_path, "w") as meta_file:
767
+ json.dump(meta, meta_file)
768
+
769
+ return cache_path
770
+
771
+
772
+ class cached_property(property):
773
+ """
774
+ Descriptor that mimics @property but caches output in member variable.
775
+
776
+ From tensorflow_datasets
777
+
778
+ Built-in in functools from Python 3.8.
779
+ """
780
+
781
+ def __get__(self, obj, objtype=None):
782
+ # See docs.python.org/3/howto/descriptor.html#properties
783
+ if obj is None:
784
+ return self
785
+ if self.fget is None:
786
+ raise AttributeError("unreadable attribute")
787
+ attr = "__cached_" + self.fget.__name__
788
+ cached = getattr(obj, attr, None)
789
+ if cached is None:
790
+ cached = self.fget(obj)
791
+ setattr(obj, attr, cached)
792
+ return cached
793
+
794
+
795
+ def torch_required(func):
796
+ # Chose a different decorator name than in tests so it's clear they are not the same.
797
+ @wraps(func)
798
+ def wrapper(*args, **kwargs):
799
+ if is_torch_available():
800
+ return func(*args, **kwargs)
801
+ else:
802
+ raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
803
+
804
+ return wrapper
805
+
806
+
807
+ def tf_required(func):
808
+ # Chose a different decorator name than in tests so it's clear they are not the same.
809
+ @wraps(func)
810
+ def wrapper(*args, **kwargs):
811
+ if is_tf_available():
812
+ return func(*args, **kwargs)
813
+ else:
814
+ raise ImportError(f"Method `{func.__name__}` requires TF.")
815
+
816
+ return wrapper
RIS-DMMI/bert/generation_utils.py ADDED
@@ -0,0 +1,993 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import logging
18
+ from typing import Iterable, Optional, Tuple
19
+
20
+ import torch
21
+ from torch import Tensor
22
+ from torch.nn import functional as F
23
+
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class GenerationMixin:
29
+ """
30
+ A class contraining all of the functions supporting generation, to be used as a mixin in PreTrainedModel.
31
+ """
32
+
33
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
34
+ return {"input_ids": input_ids}
35
+
36
+ def adjust_logits_during_generation(self, logits, **kwargs):
37
+ return logits
38
+
39
+ def _use_cache(self, outputs, use_cache):
40
+ """During generation, decide whether to pass the `past` variable to the next forward pass."""
41
+ if len(outputs) <= 1 or use_cache is False:
42
+ return False
43
+ if hasattr(self.config, "mem_len") and self.config.mem_len == 0:
44
+ return False
45
+ return True
46
+
47
+ def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
48
+ """repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858). """
49
+ for i in range(batch_size * num_beams):
50
+ for previous_token in set(prev_output_tokens[i].tolist()):
51
+ # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
52
+ if lprobs[i, previous_token] < 0:
53
+ lprobs[i, previous_token] *= repetition_penalty
54
+ else:
55
+ lprobs[i, previous_token] /= repetition_penalty
56
+
57
+ def postprocess_next_token_scores(
58
+ self,
59
+ scores,
60
+ input_ids,
61
+ no_repeat_ngram_size,
62
+ bad_words_ids,
63
+ cur_len,
64
+ min_length,
65
+ max_length,
66
+ eos_token_id,
67
+ repetition_penalty,
68
+ batch_size,
69
+ num_beams,
70
+ ):
71
+ # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
72
+ if repetition_penalty != 1.0:
73
+ self.enforce_repetition_penalty_(
74
+ scores, batch_size, num_beams, input_ids, repetition_penalty,
75
+ )
76
+
77
+ # set eos token prob to zero if min_length is not reached
78
+ if eos_token_id is not None and cur_len < min_length:
79
+ scores[:, eos_token_id] = -float("inf")
80
+
81
+ if no_repeat_ngram_size > 0:
82
+ # calculate a list of banned tokens to prevent repetitively generating the same ngrams
83
+ num_batch_hypotheses = batch_size * num_beams
84
+ # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
85
+ banned_batch_tokens = calc_banned_ngram_tokens(
86
+ input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
87
+ )
88
+ for i, banned_tokens in enumerate(banned_batch_tokens):
89
+ scores[i, banned_tokens] = -float("inf")
90
+
91
+ if bad_words_ids is not None:
92
+ # calculate a list of banned tokens according to bad words
93
+ banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
94
+
95
+ for i, banned_tokens in enumerate(banned_tokens):
96
+ scores[i, banned_tokens] = -float("inf")
97
+
98
+ return scores
99
+
100
+ @torch.no_grad()
101
+ def generate(
102
+ self,
103
+ input_ids: Optional[torch.LongTensor] = None,
104
+ max_length: Optional[int] = None,
105
+ min_length: Optional[int] = None,
106
+ do_sample: Optional[bool] = None,
107
+ early_stopping: Optional[bool] = None,
108
+ num_beams: Optional[int] = None,
109
+ temperature: Optional[float] = None,
110
+ top_k: Optional[int] = None,
111
+ top_p: Optional[float] = None,
112
+ repetition_penalty: Optional[float] = None,
113
+ bad_words_ids: Optional[Iterable[int]] = None,
114
+ bos_token_id: Optional[int] = None,
115
+ pad_token_id: Optional[int] = None,
116
+ eos_token_id: Optional[int] = None,
117
+ length_penalty: Optional[float] = None,
118
+ no_repeat_ngram_size: Optional[int] = None,
119
+ num_return_sequences: Optional[int] = None,
120
+ attention_mask: Optional[torch.LongTensor] = None,
121
+ decoder_start_token_id: Optional[int] = None,
122
+ use_cache: Optional[bool] = None,
123
+ **model_specific_kwargs
124
+ ) -> torch.LongTensor:
125
+ r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
126
+
127
+ Adapted in part from `Facebook's XLM beam search code`_.
128
+
129
+ .. _`Facebook's XLM beam search code`:
130
+ https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
131
+
132
+
133
+ Parameters:
134
+
135
+ input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
136
+ The sequence used as a prompt for the generation. If `None` the method initializes
137
+ it as an empty `torch.LongTensor` of shape `(1,)`.
138
+
139
+ max_length: (`optional`) int
140
+ The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
141
+
142
+ min_length: (`optional`) int
143
+ The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
144
+
145
+ do_sample: (`optional`) bool
146
+ If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
147
+
148
+ early_stopping: (`optional`) bool
149
+ if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
150
+
151
+ num_beams: (`optional`) int
152
+ Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
153
+
154
+ temperature: (`optional`) float
155
+ The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
156
+
157
+ top_k: (`optional`) int
158
+ The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
159
+
160
+ top_p: (`optional`) float
161
+ The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
162
+
163
+ repetition_penalty: (`optional`) float
164
+ The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
165
+
166
+ pad_token_id: (`optional`) int
167
+ Padding token. Default to specicic model pad_token_id or None if it does not exist.
168
+
169
+ bos_token_id: (`optional`) int
170
+ BOS token. Defaults to `bos_token_id` as defined in the models config.
171
+
172
+ eos_token_id: (`optional`) int
173
+ EOS token. Defaults to `eos_token_id` as defined in the models config.
174
+
175
+ length_penalty: (`optional`) float
176
+ Exponential penalty to the length. Default to 1.
177
+
178
+ no_repeat_ngram_size: (`optional`) int
179
+ If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
180
+ bad_words_ids: (`optional`) list of lists of int
181
+ `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
182
+
183
+ num_return_sequences: (`optional`) int
184
+ The number of independently computed returned sequences for each element in the batch. Default to 1.
185
+
186
+ attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
187
+ Mask to avoid performing attention on padding token indices.
188
+ Mask values selected in ``[0, 1]``:
189
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
190
+ Defaults to `None`.
191
+
192
+ `What are attention masks? <../glossary.html#attention-mask>`__
193
+
194
+ decoder_start_token_id=None: (`optional`) int
195
+ If an encoder-decoder model starts decoding with a different token than BOS.
196
+ Defaults to `None` and is changed to `BOS` later.
197
+
198
+ use_cache: (`optional`) bool
199
+ If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
200
+
201
+ model_specific_kwargs: (`optional`) dict
202
+ Additional model specific kwargs will be forwarded to the `forward` function of the model.
203
+
204
+ Return:
205
+
206
+ output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
207
+ sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
208
+
209
+ Examples::
210
+
211
+ tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
212
+ model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
213
+ outputs = model.generate(max_length=40) # do greedy decoding
214
+ print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
215
+
216
+ tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
217
+ model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
218
+ input_context = 'The dog'
219
+ input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
220
+ outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
221
+ for i in range(3): # 3 output sequences were generated
222
+ print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
223
+
224
+ tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
225
+ model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
226
+ input_context = 'The dog'
227
+ input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
228
+ outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
229
+ for i in range(3): # 3 output sequences were generated
230
+ print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
231
+
232
+ tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
233
+ model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
234
+ input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
235
+ input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
236
+ outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
237
+ print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
238
+
239
+ tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
240
+ model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
241
+ input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
242
+ bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
243
+ input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
244
+ outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
245
+ """
246
+
247
+ # We cannot generate if the model does not have a LM head
248
+ if self.get_output_embeddings() is None:
249
+ raise AttributeError(
250
+ "You tried to generate sequences with a model that does not have a LM Head."
251
+ "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
252
+ )
253
+
254
+ max_length = max_length if max_length is not None else self.config.max_length
255
+ min_length = min_length if min_length is not None else self.config.min_length
256
+ do_sample = do_sample if do_sample is not None else self.config.do_sample
257
+ early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
258
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
259
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
260
+ temperature = temperature if temperature is not None else self.config.temperature
261
+ top_k = top_k if top_k is not None else self.config.top_k
262
+ top_p = top_p if top_p is not None else self.config.top_p
263
+ repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
264
+ bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
265
+ pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
266
+ eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
267
+ length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
268
+ no_repeat_ngram_size = (
269
+ no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
270
+ )
271
+ bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
272
+ num_return_sequences = (
273
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
274
+ )
275
+ decoder_start_token_id = (
276
+ decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
277
+ )
278
+
279
+ if input_ids is not None:
280
+ batch_size = input_ids.shape[0] # overriden by the input batch_size
281
+ else:
282
+ batch_size = 1
283
+
284
+ assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
285
+ assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
286
+ assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
287
+ assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
288
+ assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
289
+ assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
290
+ assert temperature > 0, "`temperature` should be strictly positive."
291
+ assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
292
+ assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
293
+ assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
294
+ assert input_ids is not None or (
295
+ isinstance(bos_token_id, int) and bos_token_id >= 0
296
+ ), "If input_ids is not defined, `bos_token_id` should be a positive integer."
297
+ assert pad_token_id is None or (
298
+ isinstance(pad_token_id, int) and (pad_token_id >= 0)
299
+ ), "`pad_token_id` should be a positive integer."
300
+ assert (eos_token_id is None) or (
301
+ isinstance(eos_token_id, int) and (eos_token_id >= 0)
302
+ ), "`eos_token_id` should be a positive integer."
303
+ assert length_penalty > 0, "`length_penalty` should be strictly positive."
304
+ assert (
305
+ isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
306
+ ), "`no_repeat_ngram_size` should be a positive integer."
307
+ assert (
308
+ isinstance(num_return_sequences, int) and num_return_sequences > 0
309
+ ), "`num_return_sequences` should be a strictly positive integer."
310
+ assert (
311
+ bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
312
+ ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
313
+
314
+ if input_ids is None:
315
+ assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
316
+ "you should either supply a context to complete as `input_ids` input "
317
+ "or a `bos_token_id` (integer >= 0) as a first token to start the generation."
318
+ )
319
+ input_ids = torch.full(
320
+ (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
321
+ )
322
+ else:
323
+ assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
324
+
325
+ # not allow to duplicate outputs when greedy decoding
326
+ if do_sample is False:
327
+ if num_beams == 1:
328
+ # no_beam_search greedy generation conditions
329
+ assert (
330
+ num_return_sequences == 1
331
+ ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
332
+
333
+ else:
334
+ # beam_search greedy generation conditions
335
+ assert (
336
+ num_beams >= num_return_sequences
337
+ ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
338
+
339
+ # create attention mask if necessary
340
+ # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
341
+ if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
342
+ attention_mask = input_ids.ne(pad_token_id).long()
343
+ elif attention_mask is None:
344
+ attention_mask = input_ids.new_ones(input_ids.shape)
345
+
346
+ # set pad_token_id to eos_token_id if not set. Important that this is done after
347
+ # attention_mask is created
348
+ if pad_token_id is None and eos_token_id is not None:
349
+ logger.warning(
350
+ "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
351
+ )
352
+ pad_token_id = eos_token_id
353
+
354
+ # current position and vocab size
355
+ if hasattr(self.config, "vocab_size"):
356
+ vocab_size = self.config.vocab_size
357
+ elif (
358
+ self.config.is_encoder_decoder
359
+ and hasattr(self.config, "decoder")
360
+ and hasattr(self.config.decoder, "vocab_size")
361
+ ):
362
+ vocab_size = self.config.decoder.vocab_size
363
+
364
+ # set effective batch size and effective batch multiplier according to do_sample
365
+ if do_sample:
366
+ effective_batch_size = batch_size * num_return_sequences
367
+ effective_batch_mult = num_return_sequences
368
+ else:
369
+ effective_batch_size = batch_size
370
+ effective_batch_mult = 1
371
+
372
+ if self.config.is_encoder_decoder:
373
+ if decoder_start_token_id is None:
374
+ decoder_start_token_id = bos_token_id
375
+
376
+ assert (
377
+ decoder_start_token_id is not None
378
+ ), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
379
+ assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
380
+ assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
381
+
382
+ # get encoder and store encoder outputs
383
+ encoder = self.get_encoder()
384
+
385
+ encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
386
+
387
+ # Expand input ids if num_beams > 1 or num_return_sequences > 1
388
+ if num_return_sequences > 1 or num_beams > 1:
389
+ input_ids_len = input_ids.shape[-1]
390
+ input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
391
+ attention_mask = attention_mask.unsqueeze(1).expand(
392
+ batch_size, effective_batch_mult * num_beams, input_ids_len
393
+ )
394
+
395
+ input_ids = input_ids.contiguous().view(
396
+ effective_batch_size * num_beams, input_ids_len
397
+ ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
398
+ attention_mask = attention_mask.contiguous().view(
399
+ effective_batch_size * num_beams, input_ids_len
400
+ ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
401
+
402
+ if self.config.is_encoder_decoder:
403
+ # create empty decoder_input_ids
404
+ input_ids = torch.full(
405
+ (effective_batch_size * num_beams, 1),
406
+ decoder_start_token_id,
407
+ dtype=torch.long,
408
+ device=next(self.parameters()).device,
409
+ )
410
+ cur_len = 1
411
+
412
+ assert (
413
+ batch_size == encoder_outputs[0].shape[0]
414
+ ), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
415
+
416
+ # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
417
+ expanded_batch_idxs = (
418
+ torch.arange(batch_size)
419
+ .view(-1, 1)
420
+ .repeat(1, num_beams * effective_batch_mult)
421
+ .view(-1)
422
+ .to(input_ids.device)
423
+ )
424
+ # expand encoder_outputs
425
+ encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
426
+
427
+ else:
428
+ encoder_outputs = None
429
+ cur_len = input_ids.shape[-1]
430
+
431
+ assert (
432
+ cur_len < max_length
433
+ ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
434
+
435
+ if num_beams > 1:
436
+ output = self._generate_beam_search(
437
+ input_ids,
438
+ cur_len=cur_len,
439
+ max_length=max_length,
440
+ min_length=min_length,
441
+ do_sample=do_sample,
442
+ early_stopping=early_stopping,
443
+ temperature=temperature,
444
+ top_k=top_k,
445
+ top_p=top_p,
446
+ repetition_penalty=repetition_penalty,
447
+ no_repeat_ngram_size=no_repeat_ngram_size,
448
+ bad_words_ids=bad_words_ids,
449
+ pad_token_id=pad_token_id,
450
+ eos_token_id=eos_token_id,
451
+ batch_size=effective_batch_size,
452
+ num_return_sequences=num_return_sequences,
453
+ length_penalty=length_penalty,
454
+ num_beams=num_beams,
455
+ vocab_size=vocab_size,
456
+ encoder_outputs=encoder_outputs,
457
+ attention_mask=attention_mask,
458
+ use_cache=use_cache,
459
+ model_specific_kwargs=model_specific_kwargs,
460
+ )
461
+ else:
462
+ output = self._generate_no_beam_search(
463
+ input_ids,
464
+ cur_len=cur_len,
465
+ max_length=max_length,
466
+ min_length=min_length,
467
+ do_sample=do_sample,
468
+ temperature=temperature,
469
+ top_k=top_k,
470
+ top_p=top_p,
471
+ repetition_penalty=repetition_penalty,
472
+ no_repeat_ngram_size=no_repeat_ngram_size,
473
+ bad_words_ids=bad_words_ids,
474
+ pad_token_id=pad_token_id,
475
+ eos_token_id=eos_token_id,
476
+ batch_size=effective_batch_size,
477
+ encoder_outputs=encoder_outputs,
478
+ attention_mask=attention_mask,
479
+ use_cache=use_cache,
480
+ model_specific_kwargs=model_specific_kwargs,
481
+ )
482
+
483
+ return output
484
+
485
+ def _generate_no_beam_search(
486
+ self,
487
+ input_ids,
488
+ cur_len,
489
+ max_length,
490
+ min_length,
491
+ do_sample,
492
+ temperature,
493
+ top_k,
494
+ top_p,
495
+ repetition_penalty,
496
+ no_repeat_ngram_size,
497
+ bad_words_ids,
498
+ pad_token_id,
499
+ eos_token_id,
500
+ batch_size,
501
+ encoder_outputs,
502
+ attention_mask,
503
+ use_cache,
504
+ model_specific_kwargs,
505
+ ):
506
+ """ Generate sequences for each example without beam search (num_beams == 1).
507
+ All returned sequence are generated independantly.
508
+ """
509
+ # length of generated sentences / unfinished sentences
510
+ unfinished_sents = input_ids.new(batch_size).fill_(1)
511
+ sent_lengths = input_ids.new(batch_size).fill_(max_length)
512
+
513
+ past = (encoder_outputs, None) if encoder_outputs is not None else None
514
+
515
+ while cur_len < max_length:
516
+ model_inputs = self.prepare_inputs_for_generation(
517
+ input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
518
+ )
519
+
520
+ outputs = self(**model_inputs)
521
+ next_token_logits = outputs[0][:, -1, :]
522
+
523
+ scores = self.postprocess_next_token_scores(
524
+ scores=next_token_logits,
525
+ input_ids=input_ids,
526
+ no_repeat_ngram_size=no_repeat_ngram_size,
527
+ bad_words_ids=bad_words_ids,
528
+ cur_len=cur_len,
529
+ min_length=min_length,
530
+ max_length=max_length,
531
+ eos_token_id=eos_token_id,
532
+ repetition_penalty=repetition_penalty,
533
+ batch_size=batch_size,
534
+ num_beams=1,
535
+ )
536
+
537
+ # if model has past, then set the past variable to speed up decoding
538
+ if self._use_cache(outputs, use_cache):
539
+ past = outputs[1]
540
+
541
+ if do_sample:
542
+ # Temperature (higher temperature => more likely to sample low probability tokens)
543
+ if temperature != 1.0:
544
+ scores = scores / temperature
545
+ # Top-p/top-k filtering
546
+ next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
547
+ # Sample
548
+ probs = F.softmax(next_token_logscores, dim=-1)
549
+ next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
550
+ else:
551
+ # Greedy decoding
552
+ next_token = torch.argmax(next_token_logits, dim=-1)
553
+
554
+ # update generations and finished sentences
555
+ if eos_token_id is not None:
556
+ # pad finished sentences if eos_token_id exist
557
+ tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
558
+ else:
559
+ tokens_to_add = next_token
560
+
561
+ # add token and increase length by one
562
+ input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
563
+ cur_len = cur_len + 1
564
+
565
+ if eos_token_id is not None:
566
+ eos_in_sents = tokens_to_add == eos_token_id
567
+ # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
568
+ is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
569
+ sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
570
+ # unfinished_sents is set to zero if eos in sentence
571
+ unfinished_sents.mul_((~eos_in_sents).long())
572
+
573
+ # stop when there is a </s> in each sentence, or if we exceed the maximul length
574
+ if unfinished_sents.max() == 0:
575
+ break
576
+
577
+ # extend attention_mask for new generated input if only decoder
578
+ if self.config.is_encoder_decoder is False:
579
+ attention_mask = torch.cat(
580
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
581
+ )
582
+
583
+ return input_ids
584
+
585
+ def _generate_beam_search(
586
+ self,
587
+ input_ids,
588
+ cur_len,
589
+ max_length,
590
+ min_length,
591
+ do_sample,
592
+ early_stopping,
593
+ temperature,
594
+ top_k,
595
+ top_p,
596
+ repetition_penalty,
597
+ no_repeat_ngram_size,
598
+ bad_words_ids,
599
+ pad_token_id,
600
+ eos_token_id,
601
+ batch_size,
602
+ num_return_sequences,
603
+ length_penalty,
604
+ num_beams,
605
+ vocab_size,
606
+ encoder_outputs,
607
+ attention_mask,
608
+ use_cache,
609
+ model_specific_kwargs,
610
+ ):
611
+ """ Generate sequences for each example with beam search.
612
+ """
613
+
614
+ # generated hypotheses
615
+ generated_hyps = [
616
+ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
617
+ for _ in range(batch_size)
618
+ ]
619
+
620
+ # scores for each sentence in the beam
621
+ beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
622
+
623
+ # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
624
+ if do_sample is False:
625
+ beam_scores[:, 1:] = -1e9
626
+ beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
627
+
628
+ # cache compute states
629
+ past = (encoder_outputs, None) if encoder_outputs is not None else None
630
+
631
+ # done sentences
632
+ done = [False for _ in range(batch_size)]
633
+
634
+ while cur_len < max_length:
635
+ model_inputs = self.prepare_inputs_for_generation(
636
+ input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
637
+ )
638
+ outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
639
+ next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
640
+
641
+ # if model has past, then set the past variable to speed up decoding
642
+ if self._use_cache(outputs, use_cache):
643
+ past = outputs[1]
644
+ if self.config.is_encoder_decoder and do_sample is False:
645
+ # TODO (PVP) still a bit hacky here - there might be a better solution
646
+ next_token_logits = self.adjust_logits_during_generation(
647
+ next_token_logits, cur_len=cur_len, max_length=max_length
648
+ )
649
+
650
+ scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
651
+
652
+ scores = self.postprocess_next_token_scores(
653
+ scores=scores,
654
+ input_ids=input_ids,
655
+ no_repeat_ngram_size=no_repeat_ngram_size,
656
+ bad_words_ids=bad_words_ids,
657
+ cur_len=cur_len,
658
+ min_length=min_length,
659
+ max_length=max_length,
660
+ eos_token_id=eos_token_id,
661
+ repetition_penalty=repetition_penalty,
662
+ batch_size=batch_size,
663
+ num_beams=num_beams,
664
+ )
665
+
666
+ assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
667
+ scores.shape, (batch_size * num_beams, vocab_size)
668
+ )
669
+
670
+ if do_sample:
671
+ _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
672
+ # Temperature
673
+ if temperature != 1.0:
674
+ _scores = _scores / temperature
675
+ # Top-p/top-k filtering
676
+ _scores = top_k_top_p_filtering(
677
+ _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
678
+ ) # (batch_size * num_beams, vocab_size)
679
+ # re-organize to group the beam together to sample from all beam_idxs
680
+ _scores = _scores.contiguous().view(
681
+ batch_size, num_beams * vocab_size
682
+ ) # (batch_size, num_beams * vocab_size)
683
+
684
+ # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
685
+ probs = F.softmax(_scores, dim=-1)
686
+ next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
687
+ # Compute next scores
688
+ next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
689
+ # sort the sampled vector to make sure that the first num_beams samples are the best
690
+ next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
691
+ next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
692
+
693
+ else:
694
+ next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
695
+
696
+ # re-organize to group the beam together (we are keeping top hypothesis accross beams)
697
+ next_scores = next_scores.view(
698
+ batch_size, num_beams * vocab_size
699
+ ) # (batch_size, num_beams * vocab_size)
700
+
701
+ next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
702
+
703
+ assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
704
+
705
+ # next batch beam content
706
+ next_batch_beam = []
707
+
708
+ # for each sentence
709
+ for batch_idx in range(batch_size):
710
+
711
+ # if we are done with this sentence, add a pad token
712
+ if done[batch_idx]:
713
+ assert (
714
+ len(generated_hyps[batch_idx]) >= num_beams
715
+ ), "Batch can only be done if at least {} beams have been generated".format(num_beams)
716
+ assert (
717
+ eos_token_id is not None and pad_token_id is not None
718
+ ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
719
+ next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
720
+ continue
721
+
722
+ # next sentence beam content, this will get added to next_batch_beam
723
+ next_sent_beam = []
724
+
725
+ # next tokens for this sentence
726
+ for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
727
+ zip(next_tokens[batch_idx], next_scores[batch_idx])
728
+ ):
729
+ # get beam and token IDs
730
+ beam_id = beam_token_id // vocab_size
731
+ token_id = beam_token_id % vocab_size
732
+
733
+ effective_beam_id = batch_idx * num_beams + beam_id
734
+ # add to generated hypotheses if end of sentence
735
+ if (eos_token_id is not None) and (token_id.item() == eos_token_id):
736
+ # if beam_token does not belong to top num_beams tokens, it should not be added
737
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
738
+ if is_beam_token_worse_than_top_num_beams:
739
+ continue
740
+ generated_hyps[batch_idx].add(
741
+ input_ids[effective_beam_id].clone(), beam_token_score.item(),
742
+ )
743
+ else:
744
+ # add next predicted token since it is not eos_token
745
+ next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
746
+
747
+ # once the beam for next step is full, don't add more tokens to it.
748
+ if len(next_sent_beam) == num_beams:
749
+ break
750
+
751
+ # Check if we are done so that we can save a pad step if all(done)
752
+ done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
753
+ next_scores[batch_idx].max().item(), cur_len
754
+ )
755
+
756
+ # update next beam content
757
+ assert len(next_sent_beam) == num_beams, "Beam should always be full"
758
+ next_batch_beam.extend(next_sent_beam)
759
+ assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
760
+
761
+ # stop when we are done with each sentence
762
+ if all(done):
763
+ break
764
+
765
+ # sanity check / prepare next batch
766
+ assert len(next_batch_beam) == batch_size * num_beams
767
+ beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
768
+ beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
769
+ beam_idx = input_ids.new([x[2] for x in next_batch_beam])
770
+
771
+ # re-order batch and update current length
772
+ input_ids = input_ids[beam_idx, :]
773
+ input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
774
+ cur_len = cur_len + 1
775
+
776
+ # re-order internal states
777
+ if past is not None:
778
+ past = self._reorder_cache(past, beam_idx)
779
+
780
+ # extend attention_mask for new generated input if only decoder
781
+ if self.config.is_encoder_decoder is False:
782
+ attention_mask = torch.cat(
783
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
784
+ )
785
+
786
+ # finalize all open beam hypotheses and add to generated hypotheses
787
+ for batch_idx in range(batch_size):
788
+ if done[batch_idx]:
789
+ continue
790
+
791
+ # test that beam scores match previously calculated scores if not eos and batch_idx not done
792
+ if eos_token_id is not None and all(
793
+ (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
794
+ ):
795
+ assert torch.all(
796
+ next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
797
+ ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
798
+ next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
799
+ )
800
+
801
+ # need to add best num_beams hypotheses to generated hyps
802
+ for beam_id in range(num_beams):
803
+ effective_beam_id = batch_idx * num_beams + beam_id
804
+ final_score = beam_scores[effective_beam_id].item()
805
+ final_tokens = input_ids[effective_beam_id]
806
+ generated_hyps[batch_idx].add(final_tokens, final_score)
807
+
808
+ # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
809
+ output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
810
+ output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
811
+
812
+ # select the best hypotheses
813
+ sent_lengths = input_ids.new(output_batch_size)
814
+ best = []
815
+
816
+ # retrieve best hypotheses
817
+ for i, hypotheses in enumerate(generated_hyps):
818
+ sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
819
+ for j in range(output_num_return_sequences_per_batch):
820
+ effective_batch_idx = output_num_return_sequences_per_batch * i + j
821
+ best_hyp = sorted_hyps.pop()[1]
822
+ sent_lengths[effective_batch_idx] = len(best_hyp)
823
+ best.append(best_hyp)
824
+
825
+ # shorter batches are padded
826
+ if sent_lengths.min().item() != sent_lengths.max().item():
827
+ assert pad_token_id is not None, "`Pad_token_id` has to be defined"
828
+ sent_max_len = min(sent_lengths.max().item() + 1, max_length)
829
+ decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
830
+
831
+ # fill with hypothesis and eos_token_id if necessary
832
+ for i, hypo in enumerate(best):
833
+ decoded[i, : sent_lengths[i]] = hypo
834
+ if sent_lengths[i] < max_length:
835
+ decoded[i, sent_lengths[i]] = eos_token_id
836
+ else:
837
+ # none of the hypotheses have an eos_token
838
+ assert (len(hypo) == max_length for hypo in best)
839
+ decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
840
+
841
+ return decoded
842
+
843
+ @staticmethod
844
+ def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
845
+ return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
846
+
847
+
848
+ def calc_banned_ngram_tokens(prev_input_ids: Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None:
849
+ """Copied from fairseq for no_repeat_ngram in beam_search"""
850
+ if cur_len + 1 < no_repeat_ngram_size:
851
+ # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
852
+ return [[] for _ in range(num_hypos)]
853
+ generated_ngrams = [{} for _ in range(num_hypos)]
854
+ for idx in range(num_hypos):
855
+ gen_tokens = prev_input_ids[idx].tolist()
856
+ generated_ngram = generated_ngrams[idx]
857
+ for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
858
+ prev_ngram_tuple = tuple(ngram[:-1])
859
+ generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
860
+
861
+ def _get_generated_ngrams(hypo_idx):
862
+ # Before decoding the next token, prevent decoding of ngrams that have already appeared
863
+ start_idx = cur_len + 1 - no_repeat_ngram_size
864
+ ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
865
+ return generated_ngrams[hypo_idx].get(ngram_idx, [])
866
+
867
+ banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
868
+ return banned_tokens
869
+
870
+
871
+ def calc_banned_bad_words_ids(prev_input_ids: Iterable[int], bad_words_ids: Iterable[int]) -> Iterable[int]:
872
+ banned_tokens = []
873
+
874
+ def _tokens_match(prev_tokens, tokens):
875
+ if len(tokens) == 0:
876
+ # if bad word tokens is just one token always ban it
877
+ return True
878
+ if len(tokens) > len(prev_input_ids):
879
+ # if bad word tokens are longer then prev input_ids they can't be equal
880
+ return False
881
+
882
+ if prev_tokens[-len(tokens) :] == tokens:
883
+ # if tokens match
884
+ return True
885
+ else:
886
+ return False
887
+
888
+ for prev_input_ids_slice in prev_input_ids:
889
+ banned_tokens_slice = []
890
+
891
+ for banned_token_seq in bad_words_ids:
892
+ assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
893
+ bad_words_ids
894
+ )
895
+
896
+ if _tokens_match(prev_input_ids_slice.tolist(), banned_token_seq[:-1]) is False:
897
+ # if tokens do not match continue
898
+ continue
899
+
900
+ banned_tokens_slice.append(banned_token_seq[-1])
901
+
902
+ banned_tokens.append(banned_tokens_slice)
903
+
904
+ return banned_tokens
905
+
906
+
907
+ def top_k_top_p_filtering(
908
+ logits: Tensor,
909
+ top_k: int = 0,
910
+ top_p: float = 1.0,
911
+ filter_value: float = -float("Inf"),
912
+ min_tokens_to_keep: int = 1,
913
+ ) -> Tensor:
914
+ """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
915
+ Args:
916
+ logits: logits distribution shape (batch size, vocabulary size)
917
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
918
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
919
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
920
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
921
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
922
+ """
923
+ if top_k > 0:
924
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
925
+ # Remove all tokens with a probability less than the last token of the top-k
926
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
927
+ logits[indices_to_remove] = filter_value
928
+
929
+ if top_p < 1.0:
930
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
931
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
932
+
933
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
934
+ sorted_indices_to_remove = cumulative_probs > top_p
935
+ if min_tokens_to_keep > 1:
936
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
937
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
938
+ # Shift the indices to the right to keep also the first token above the threshold
939
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
940
+ sorted_indices_to_remove[..., 0] = 0
941
+
942
+ # scatter sorted tensors to original indexing
943
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
944
+ logits[indices_to_remove] = filter_value
945
+ return logits
946
+
947
+
948
+ class BeamHypotheses(object):
949
+ def __init__(self, num_beams, max_length, length_penalty, early_stopping):
950
+ """
951
+ Initialize n-best list of hypotheses.
952
+ """
953
+ self.max_length = max_length - 1 # ignoring bos_token
954
+ self.length_penalty = length_penalty
955
+ self.early_stopping = early_stopping
956
+ self.num_beams = num_beams
957
+ self.beams = []
958
+ self.worst_score = 1e9
959
+
960
+ def __len__(self):
961
+ """
962
+ Number of hypotheses in the list.
963
+ """
964
+ return len(self.beams)
965
+
966
+ def add(self, hyp, sum_logprobs):
967
+ """
968
+ Add a new hypothesis to the list.
969
+ """
970
+ score = sum_logprobs / len(hyp) ** self.length_penalty
971
+ if len(self) < self.num_beams or score > self.worst_score:
972
+ self.beams.append((score, hyp))
973
+ if len(self) > self.num_beams:
974
+ sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
975
+ del self.beams[sorted_scores[0][1]]
976
+ self.worst_score = sorted_scores[1][0]
977
+ else:
978
+ self.worst_score = min(score, self.worst_score)
979
+
980
+ def is_done(self, best_sum_logprobs, cur_len):
981
+ """
982
+ If there are enough hypotheses and that none of the hypotheses being generated
983
+ can become better than the worst one in the heap, then we are done with this sentence.
984
+ """
985
+
986
+ if len(self) < self.num_beams:
987
+ return False
988
+ elif self.early_stopping:
989
+ return True
990
+ else:
991
+ cur_score = best_sum_logprobs / cur_len ** self.length_penalty
992
+ ret = self.worst_score >= cur_score
993
+ return ret
RIS-DMMI/bert/modeling.py ADDED
@@ -0,0 +1,986 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """The main BERT model and related functions."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import collections
22
+ import copy
23
+ import json
24
+ import math
25
+ import re
26
+ import numpy as np
27
+ import six
28
+ import tensorflow as tf
29
+
30
+
31
+ class BertConfig(object):
32
+ """Configuration for `BertModel`."""
33
+
34
+ def __init__(self,
35
+ vocab_size,
36
+ hidden_size=768,
37
+ num_hidden_layers=12,
38
+ num_attention_heads=12,
39
+ intermediate_size=3072,
40
+ hidden_act="gelu",
41
+ hidden_dropout_prob=0.1,
42
+ attention_probs_dropout_prob=0.1,
43
+ max_position_embeddings=512,
44
+ type_vocab_size=16,
45
+ initializer_range=0.02):
46
+ """Constructs BertConfig.
47
+
48
+ Args:
49
+ vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
50
+ hidden_size: Size of the encoder layers and the pooler layer.
51
+ num_hidden_layers: Number of hidden layers in the Transformer encoder.
52
+ num_attention_heads: Number of attention heads for each attention layer in
53
+ the Transformer encoder.
54
+ intermediate_size: The size of the "intermediate" (i.e., feed-forward)
55
+ layer in the Transformer encoder.
56
+ hidden_act: The non-linear activation function (function or string) in the
57
+ encoder and pooler.
58
+ hidden_dropout_prob: The dropout probability for all fully connected
59
+ layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob: The dropout ratio for the attention
61
+ probabilities.
62
+ max_position_embeddings: The maximum sequence length that this model might
63
+ ever be used with. Typically set this to something large just in case
64
+ (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size: The vocabulary size of the `token_type_ids` passed into
66
+ `BertModel`.
67
+ initializer_range: The stdev of the truncated_normal_initializer for
68
+ initializing all weight matrices.
69
+ """
70
+ self.vocab_size = vocab_size
71
+ self.hidden_size = hidden_size
72
+ self.num_hidden_layers = num_hidden_layers
73
+ self.num_attention_heads = num_attention_heads
74
+ self.hidden_act = hidden_act
75
+ self.intermediate_size = intermediate_size
76
+ self.hidden_dropout_prob = hidden_dropout_prob
77
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
78
+ self.max_position_embeddings = max_position_embeddings
79
+ self.type_vocab_size = type_vocab_size
80
+ self.initializer_range = initializer_range
81
+
82
+ @classmethod
83
+ def from_dict(cls, json_object):
84
+ """Constructs a `BertConfig` from a Python dictionary of parameters."""
85
+ config = BertConfig(vocab_size=None)
86
+ for (key, value) in six.iteritems(json_object):
87
+ config.__dict__[key] = value
88
+ return config
89
+
90
+ @classmethod
91
+ def from_json_file(cls, json_file):
92
+ """Constructs a `BertConfig` from a json file of parameters."""
93
+ with tf.gfile.GFile(json_file, "r") as reader:
94
+ text = reader.read()
95
+ return cls.from_dict(json.loads(text))
96
+
97
+ def to_dict(self):
98
+ """Serializes this instance to a Python dictionary."""
99
+ output = copy.deepcopy(self.__dict__)
100
+ return output
101
+
102
+ def to_json_string(self):
103
+ """Serializes this instance to a JSON string."""
104
+ return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
105
+
106
+
107
+ class BertModel(object):
108
+ """BERT model ("Bidirectional Encoder Representations from Transformers").
109
+
110
+ Example usage:
111
+
112
+ ```python
113
+ # Already been converted into WordPiece token ids
114
+ input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
115
+ input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
116
+ token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
117
+
118
+ config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
119
+ num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
120
+
121
+ model = modeling.BertModel(config=config, is_training=True,
122
+ input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
123
+
124
+ label_embeddings = tf.get_variable(...)
125
+ pooled_output = model.get_pooled_output()
126
+ logits = tf.matmul(pooled_output, label_embeddings)
127
+ ...
128
+ ```
129
+ """
130
+
131
+ def __init__(self,
132
+ config,
133
+ is_training,
134
+ input_ids,
135
+ input_mask=None,
136
+ token_type_ids=None,
137
+ use_one_hot_embeddings=False,
138
+ scope=None):
139
+ """Constructor for BertModel.
140
+
141
+ Args:
142
+ config: `BertConfig` instance.
143
+ is_training: bool. true for training model, false for eval model. Controls
144
+ whether dropout will be applied.
145
+ input_ids: int32 Tensor of shape [batch_size, seq_length].
146
+ input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
147
+ token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
148
+ use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
149
+ embeddings or tf.embedding_lookup() for the word embeddings.
150
+ scope: (optional) variable scope. Defaults to "bert".
151
+
152
+ Raises:
153
+ ValueError: The config is invalid or one of the input tensor shapes
154
+ is invalid.
155
+ """
156
+ config = copy.deepcopy(config)
157
+ if not is_training:
158
+ config.hidden_dropout_prob = 0.0
159
+ config.attention_probs_dropout_prob = 0.0
160
+
161
+ input_shape = get_shape_list(input_ids, expected_rank=2)
162
+ batch_size = input_shape[0]
163
+ seq_length = input_shape[1]
164
+
165
+ if input_mask is None:
166
+ input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
167
+
168
+ if token_type_ids is None:
169
+ token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
170
+
171
+ with tf.variable_scope(scope, default_name="bert"):
172
+ with tf.variable_scope("embeddings"):
173
+ # Perform embedding lookup on the word ids.
174
+ (self.embedding_output, self.embedding_table) = embedding_lookup(
175
+ input_ids=input_ids,
176
+ vocab_size=config.vocab_size,
177
+ embedding_size=config.hidden_size,
178
+ initializer_range=config.initializer_range,
179
+ word_embedding_name="word_embeddings",
180
+ use_one_hot_embeddings=use_one_hot_embeddings)
181
+
182
+ # Add positional embeddings and token type embeddings, then layer
183
+ # normalize and perform dropout.
184
+ self.embedding_output = embedding_postprocessor(
185
+ input_tensor=self.embedding_output,
186
+ use_token_type=True,
187
+ token_type_ids=token_type_ids,
188
+ token_type_vocab_size=config.type_vocab_size,
189
+ token_type_embedding_name="token_type_embeddings",
190
+ use_position_embeddings=True,
191
+ position_embedding_name="position_embeddings",
192
+ initializer_range=config.initializer_range,
193
+ max_position_embeddings=config.max_position_embeddings,
194
+ dropout_prob=config.hidden_dropout_prob)
195
+
196
+ with tf.variable_scope("encoder"):
197
+ # This converts a 2D mask of shape [batch_size, seq_length] to a 3D
198
+ # mask of shape [batch_size, seq_length, seq_length] which is used
199
+ # for the attention scores.
200
+ attention_mask = create_attention_mask_from_input_mask(
201
+ input_ids, input_mask)
202
+
203
+ # Run the stacked transformer.
204
+ # `sequence_output` shape = [batch_size, seq_length, hidden_size].
205
+ self.all_encoder_layers = transformer_model(
206
+ input_tensor=self.embedding_output,
207
+ attention_mask=attention_mask,
208
+ hidden_size=config.hidden_size,
209
+ num_hidden_layers=config.num_hidden_layers,
210
+ num_attention_heads=config.num_attention_heads,
211
+ intermediate_size=config.intermediate_size,
212
+ intermediate_act_fn=get_activation(config.hidden_act),
213
+ hidden_dropout_prob=config.hidden_dropout_prob,
214
+ attention_probs_dropout_prob=config.attention_probs_dropout_prob,
215
+ initializer_range=config.initializer_range,
216
+ do_return_all_layers=True)
217
+
218
+ self.sequence_output = self.all_encoder_layers[-1]
219
+ # The "pooler" converts the encoded sequence tensor of shape
220
+ # [batch_size, seq_length, hidden_size] to a tensor of shape
221
+ # [batch_size, hidden_size]. This is necessary for segment-level
222
+ # (or segment-pair-level) classification tasks where we need a fixed
223
+ # dimensional representation of the segment.
224
+ with tf.variable_scope("pooler"):
225
+ # We "pool" the model by simply taking the hidden state corresponding
226
+ # to the first token. We assume that this has been pre-trained
227
+ first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
228
+ self.pooled_output = tf.layers.dense(
229
+ first_token_tensor,
230
+ config.hidden_size,
231
+ activation=tf.tanh,
232
+ kernel_initializer=create_initializer(config.initializer_range))
233
+
234
+ def get_pooled_output(self):
235
+ return self.pooled_output
236
+
237
+ def get_sequence_output(self):
238
+ """Gets final hidden layer of encoder.
239
+
240
+ Returns:
241
+ float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
242
+ to the final hidden of the transformer encoder.
243
+ """
244
+ return self.sequence_output
245
+
246
+ def get_all_encoder_layers(self):
247
+ return self.all_encoder_layers
248
+
249
+ def get_embedding_output(self):
250
+ """Gets output of the embedding lookup (i.e., input to the transformer).
251
+
252
+ Returns:
253
+ float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
254
+ to the output of the embedding layer, after summing the word
255
+ embeddings with the positional embeddings and the token type embeddings,
256
+ then performing layer normalization. This is the input to the transformer.
257
+ """
258
+ return self.embedding_output
259
+
260
+ def get_embedding_table(self):
261
+ return self.embedding_table
262
+
263
+
264
+ def gelu(x):
265
+ """Gaussian Error Linear Unit.
266
+
267
+ This is a smoother version of the RELU.
268
+ Original paper: https://arxiv.org/abs/1606.08415
269
+ Args:
270
+ x: float Tensor to perform activation.
271
+
272
+ Returns:
273
+ `x` with the GELU activation applied.
274
+ """
275
+ cdf = 0.5 * (1.0 + tf.tanh(
276
+ (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
277
+ return x * cdf
278
+
279
+
280
+ def get_activation(activation_string):
281
+ """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
282
+
283
+ Args:
284
+ activation_string: String name of the activation function.
285
+
286
+ Returns:
287
+ A Python function corresponding to the activation function. If
288
+ `activation_string` is None, empty, or "linear", this will return None.
289
+ If `activation_string` is not a string, it will return `activation_string`.
290
+
291
+ Raises:
292
+ ValueError: The `activation_string` does not correspond to a known
293
+ activation.
294
+ """
295
+
296
+ # We assume that anything that"s not a string is already an activation
297
+ # function, so we just return it.
298
+ if not isinstance(activation_string, six.string_types):
299
+ return activation_string
300
+
301
+ if not activation_string:
302
+ return None
303
+
304
+ act = activation_string.lower()
305
+ if act == "linear":
306
+ return None
307
+ elif act == "relu":
308
+ return tf.nn.relu
309
+ elif act == "gelu":
310
+ return gelu
311
+ elif act == "tanh":
312
+ return tf.tanh
313
+ else:
314
+ raise ValueError("Unsupported activation: %s" % act)
315
+
316
+
317
+ def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
318
+ """Compute the union of the current variables and checkpoint variables."""
319
+ assignment_map = {}
320
+ initialized_variable_names = {}
321
+
322
+ name_to_variable = collections.OrderedDict()
323
+ for var in tvars:
324
+ name = var.name
325
+ m = re.match("^(.*):\\d+$", name)
326
+ if m is not None:
327
+ name = m.group(1)
328
+ name_to_variable[name] = var
329
+
330
+ init_vars = tf.train.list_variables(init_checkpoint)
331
+
332
+ assignment_map = collections.OrderedDict()
333
+ for x in init_vars:
334
+ (name, var) = (x[0], x[1])
335
+ if name not in name_to_variable:
336
+ continue
337
+ assignment_map[name] = name
338
+ initialized_variable_names[name] = 1
339
+ initialized_variable_names[name + ":0"] = 1
340
+
341
+ return (assignment_map, initialized_variable_names)
342
+
343
+
344
+ def dropout(input_tensor, dropout_prob):
345
+ """Perform dropout.
346
+
347
+ Args:
348
+ input_tensor: float Tensor.
349
+ dropout_prob: Python float. The probability of dropping out a value (NOT of
350
+ *keeping* a dimension as in `tf.nn.dropout`).
351
+
352
+ Returns:
353
+ A version of `input_tensor` with dropout applied.
354
+ """
355
+ if dropout_prob is None or dropout_prob == 0.0:
356
+ return input_tensor
357
+
358
+ output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
359
+ return output
360
+
361
+
362
+ def layer_norm(input_tensor, name=None):
363
+ """Run layer normalization on the last dimension of the tensor."""
364
+ return tf.contrib.layers.layer_norm(
365
+ inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
366
+
367
+
368
+ def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
369
+ """Runs layer normalization followed by dropout."""
370
+ output_tensor = layer_norm(input_tensor, name)
371
+ output_tensor = dropout(output_tensor, dropout_prob)
372
+ return output_tensor
373
+
374
+
375
+ def create_initializer(initializer_range=0.02):
376
+ """Creates a `truncated_normal_initializer` with the given range."""
377
+ return tf.truncated_normal_initializer(stddev=initializer_range)
378
+
379
+
380
+ def embedding_lookup(input_ids,
381
+ vocab_size,
382
+ embedding_size=128,
383
+ initializer_range=0.02,
384
+ word_embedding_name="word_embeddings",
385
+ use_one_hot_embeddings=False):
386
+ """Looks up words embeddings for id tensor.
387
+
388
+ Args:
389
+ input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
390
+ ids.
391
+ vocab_size: int. Size of the embedding vocabulary.
392
+ embedding_size: int. Width of the word embeddings.
393
+ initializer_range: float. Embedding initialization range.
394
+ word_embedding_name: string. Name of the embedding table.
395
+ use_one_hot_embeddings: bool. If True, use one-hot method for word
396
+ embeddings. If False, use `tf.gather()`.
397
+
398
+ Returns:
399
+ float Tensor of shape [batch_size, seq_length, embedding_size].
400
+ """
401
+ # This function assumes that the input is of shape [batch_size, seq_length,
402
+ # num_inputs].
403
+ #
404
+ # If the input is a 2D tensor of shape [batch_size, seq_length], we
405
+ # reshape to [batch_size, seq_length, 1].
406
+ if input_ids.shape.ndims == 2:
407
+ input_ids = tf.expand_dims(input_ids, axis=[-1])
408
+
409
+ embedding_table = tf.get_variable(
410
+ name=word_embedding_name,
411
+ shape=[vocab_size, embedding_size],
412
+ initializer=create_initializer(initializer_range))
413
+
414
+ flat_input_ids = tf.reshape(input_ids, [-1])
415
+ if use_one_hot_embeddings:
416
+ one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
417
+ output = tf.matmul(one_hot_input_ids, embedding_table)
418
+ else:
419
+ output = tf.gather(embedding_table, flat_input_ids)
420
+
421
+ input_shape = get_shape_list(input_ids)
422
+
423
+ output = tf.reshape(output,
424
+ input_shape[0:-1] + [input_shape[-1] * embedding_size])
425
+ return (output, embedding_table)
426
+
427
+
428
+ def embedding_postprocessor(input_tensor,
429
+ use_token_type=False,
430
+ token_type_ids=None,
431
+ token_type_vocab_size=16,
432
+ token_type_embedding_name="token_type_embeddings",
433
+ use_position_embeddings=True,
434
+ position_embedding_name="position_embeddings",
435
+ initializer_range=0.02,
436
+ max_position_embeddings=512,
437
+ dropout_prob=0.1):
438
+ """Performs various post-processing on a word embedding tensor.
439
+
440
+ Args:
441
+ input_tensor: float Tensor of shape [batch_size, seq_length,
442
+ embedding_size].
443
+ use_token_type: bool. Whether to add embeddings for `token_type_ids`.
444
+ token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
445
+ Must be specified if `use_token_type` is True.
446
+ token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
447
+ token_type_embedding_name: string. The name of the embedding table variable
448
+ for token type ids.
449
+ use_position_embeddings: bool. Whether to add position embeddings for the
450
+ position of each token in the sequence.
451
+ position_embedding_name: string. The name of the embedding table variable
452
+ for positional embeddings.
453
+ initializer_range: float. Range of the weight initialization.
454
+ max_position_embeddings: int. Maximum sequence length that might ever be
455
+ used with this model. This can be longer than the sequence length of
456
+ input_tensor, but cannot be shorter.
457
+ dropout_prob: float. Dropout probability applied to the final output tensor.
458
+
459
+ Returns:
460
+ float tensor with same shape as `input_tensor`.
461
+
462
+ Raises:
463
+ ValueError: One of the tensor shapes or input values is invalid.
464
+ """
465
+ input_shape = get_shape_list(input_tensor, expected_rank=3)
466
+ batch_size = input_shape[0]
467
+ seq_length = input_shape[1]
468
+ width = input_shape[2]
469
+
470
+ output = input_tensor
471
+
472
+ if use_token_type:
473
+ if token_type_ids is None:
474
+ raise ValueError("`token_type_ids` must be specified if"
475
+ "`use_token_type` is True.")
476
+ token_type_table = tf.get_variable(
477
+ name=token_type_embedding_name,
478
+ shape=[token_type_vocab_size, width],
479
+ initializer=create_initializer(initializer_range))
480
+ # This vocab will be small so we always do one-hot here, since it is always
481
+ # faster for a small vocabulary.
482
+ flat_token_type_ids = tf.reshape(token_type_ids, [-1])
483
+ one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
484
+ token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
485
+ token_type_embeddings = tf.reshape(token_type_embeddings,
486
+ [batch_size, seq_length, width])
487
+ output += token_type_embeddings
488
+
489
+ if use_position_embeddings:
490
+ assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
491
+ with tf.control_dependencies([assert_op]):
492
+ full_position_embeddings = tf.get_variable(
493
+ name=position_embedding_name,
494
+ shape=[max_position_embeddings, width],
495
+ initializer=create_initializer(initializer_range))
496
+ # Since the position embedding table is a learned variable, we create it
497
+ # using a (long) sequence length `max_position_embeddings`. The actual
498
+ # sequence length might be shorter than this, for faster training of
499
+ # tasks that do not have long sequences.
500
+ #
501
+ # So `full_position_embeddings` is effectively an embedding table
502
+ # for position [0, 1, 2, ..., max_position_embeddings-1], and the current
503
+ # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
504
+ # perform a slice.
505
+ position_embeddings = tf.slice(full_position_embeddings, [0, 0],
506
+ [seq_length, -1])
507
+ num_dims = len(output.shape.as_list())
508
+
509
+ # Only the last two dimensions are relevant (`seq_length` and `width`), so
510
+ # we broadcast among the first dimensions, which is typically just
511
+ # the batch size.
512
+ position_broadcast_shape = []
513
+ for _ in range(num_dims - 2):
514
+ position_broadcast_shape.append(1)
515
+ position_broadcast_shape.extend([seq_length, width])
516
+ position_embeddings = tf.reshape(position_embeddings,
517
+ position_broadcast_shape)
518
+ output += position_embeddings
519
+
520
+ output = layer_norm_and_dropout(output, dropout_prob)
521
+ return output
522
+
523
+
524
+ def create_attention_mask_from_input_mask(from_tensor, to_mask):
525
+ """Create 3D attention mask from a 2D tensor mask.
526
+
527
+ Args:
528
+ from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
529
+ to_mask: int32 Tensor of shape [batch_size, to_seq_length].
530
+
531
+ Returns:
532
+ float Tensor of shape [batch_size, from_seq_length, to_seq_length].
533
+ """
534
+ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
535
+ batch_size = from_shape[0]
536
+ from_seq_length = from_shape[1]
537
+
538
+ to_shape = get_shape_list(to_mask, expected_rank=2)
539
+ to_seq_length = to_shape[1]
540
+
541
+ to_mask = tf.cast(
542
+ tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
543
+
544
+ # We don't assume that `from_tensor` is a mask (although it could be). We
545
+ # don't actually care if we attend *from* padding tokens (only *to* padding)
546
+ # tokens so we create a tensor of all ones.
547
+ #
548
+ # `broadcast_ones` = [batch_size, from_seq_length, 1]
549
+ broadcast_ones = tf.ones(
550
+ shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
551
+
552
+ # Here we broadcast along two dimensions to create the mask.
553
+ mask = broadcast_ones * to_mask
554
+
555
+ return mask
556
+
557
+
558
+ def attention_layer(from_tensor,
559
+ to_tensor,
560
+ attention_mask=None,
561
+ num_attention_heads=1,
562
+ size_per_head=512,
563
+ query_act=None,
564
+ key_act=None,
565
+ value_act=None,
566
+ attention_probs_dropout_prob=0.0,
567
+ initializer_range=0.02,
568
+ do_return_2d_tensor=False,
569
+ batch_size=None,
570
+ from_seq_length=None,
571
+ to_seq_length=None):
572
+ """Performs multi-headed attention from `from_tensor` to `to_tensor`.
573
+
574
+ This is an implementation of multi-headed attention based on "Attention
575
+ is all you Need". If `from_tensor` and `to_tensor` are the same, then
576
+ this is self-attention. Each timestep in `from_tensor` attends to the
577
+ corresponding sequence in `to_tensor`, and returns a fixed-with vector.
578
+
579
+ This function first projects `from_tensor` into a "query" tensor and
580
+ `to_tensor` into "key" and "value" tensors. These are (effectively) a list
581
+ of tensors of length `num_attention_heads`, where each tensor is of shape
582
+ [batch_size, seq_length, size_per_head].
583
+
584
+ Then, the query and key tensors are dot-producted and scaled. These are
585
+ softmaxed to obtain attention probabilities. The value tensors are then
586
+ interpolated by these probabilities, then concatenated back to a single
587
+ tensor and returned.
588
+
589
+ In practice, the multi-headed attention are done with transposes and
590
+ reshapes rather than actual separate tensors.
591
+
592
+ Args:
593
+ from_tensor: float Tensor of shape [batch_size, from_seq_length,
594
+ from_width].
595
+ to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
596
+ attention_mask: (optional) int32 Tensor of shape [batch_size,
597
+ from_seq_length, to_seq_length]. The values should be 1 or 0. The
598
+ attention scores will effectively be set to -infinity for any positions in
599
+ the mask that are 0, and will be unchanged for positions that are 1.
600
+ num_attention_heads: int. Number of attention heads.
601
+ size_per_head: int. Size of each attention head.
602
+ query_act: (optional) Activation function for the query transform.
603
+ key_act: (optional) Activation function for the key transform.
604
+ value_act: (optional) Activation function for the value transform.
605
+ attention_probs_dropout_prob: (optional) float. Dropout probability of the
606
+ attention probabilities.
607
+ initializer_range: float. Range of the weight initializer.
608
+ do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
609
+ * from_seq_length, num_attention_heads * size_per_head]. If False, the
610
+ output will be of shape [batch_size, from_seq_length, num_attention_heads
611
+ * size_per_head].
612
+ batch_size: (Optional) int. If the input is 2D, this might be the batch size
613
+ of the 3D version of the `from_tensor` and `to_tensor`.
614
+ from_seq_length: (Optional) If the input is 2D, this might be the seq length
615
+ of the 3D version of the `from_tensor`.
616
+ to_seq_length: (Optional) If the input is 2D, this might be the seq length
617
+ of the 3D version of the `to_tensor`.
618
+
619
+ Returns:
620
+ float Tensor of shape [batch_size, from_seq_length,
621
+ num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
622
+ true, this will be of shape [batch_size * from_seq_length,
623
+ num_attention_heads * size_per_head]).
624
+
625
+ Raises:
626
+ ValueError: Any of the arguments or tensor shapes are invalid.
627
+ """
628
+
629
+ def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
630
+ seq_length, width):
631
+ output_tensor = tf.reshape(
632
+ input_tensor, [batch_size, seq_length, num_attention_heads, width])
633
+
634
+ output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
635
+ return output_tensor
636
+
637
+ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
638
+ to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
639
+
640
+ if len(from_shape) != len(to_shape):
641
+ raise ValueError(
642
+ "The rank of `from_tensor` must match the rank of `to_tensor`.")
643
+
644
+ if len(from_shape) == 3:
645
+ batch_size = from_shape[0]
646
+ from_seq_length = from_shape[1]
647
+ to_seq_length = to_shape[1]
648
+ elif len(from_shape) == 2:
649
+ if (batch_size is None or from_seq_length is None or to_seq_length is None):
650
+ raise ValueError(
651
+ "When passing in rank 2 tensors to attention_layer, the values "
652
+ "for `batch_size`, `from_seq_length`, and `to_seq_length` "
653
+ "must all be specified.")
654
+
655
+ # Scalar dimensions referenced here:
656
+ # B = batch size (number of sequences)
657
+ # F = `from_tensor` sequence length
658
+ # T = `to_tensor` sequence length
659
+ # N = `num_attention_heads`
660
+ # H = `size_per_head`
661
+
662
+ from_tensor_2d = reshape_to_matrix(from_tensor)
663
+ to_tensor_2d = reshape_to_matrix(to_tensor)
664
+
665
+ # `query_layer` = [B*F, N*H]
666
+ query_layer = tf.layers.dense(
667
+ from_tensor_2d,
668
+ num_attention_heads * size_per_head,
669
+ activation=query_act,
670
+ name="query",
671
+ kernel_initializer=create_initializer(initializer_range))
672
+
673
+ # `key_layer` = [B*T, N*H]
674
+ key_layer = tf.layers.dense(
675
+ to_tensor_2d,
676
+ num_attention_heads * size_per_head,
677
+ activation=key_act,
678
+ name="key",
679
+ kernel_initializer=create_initializer(initializer_range))
680
+
681
+ # `value_layer` = [B*T, N*H]
682
+ value_layer = tf.layers.dense(
683
+ to_tensor_2d,
684
+ num_attention_heads * size_per_head,
685
+ activation=value_act,
686
+ name="value",
687
+ kernel_initializer=create_initializer(initializer_range))
688
+
689
+ # `query_layer` = [B, N, F, H]
690
+ query_layer = transpose_for_scores(query_layer, batch_size,
691
+ num_attention_heads, from_seq_length,
692
+ size_per_head)
693
+
694
+ # `key_layer` = [B, N, T, H]
695
+ key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
696
+ to_seq_length, size_per_head)
697
+
698
+ # Take the dot product between "query" and "key" to get the raw
699
+ # attention scores.
700
+ # `attention_scores` = [B, N, F, T]
701
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
702
+ attention_scores = tf.multiply(attention_scores,
703
+ 1.0 / math.sqrt(float(size_per_head)))
704
+
705
+ if attention_mask is not None:
706
+ # `attention_mask` = [B, 1, F, T]
707
+ attention_mask = tf.expand_dims(attention_mask, axis=[1])
708
+
709
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
710
+ # masked positions, this operation will create a tensor which is 0.0 for
711
+ # positions we want to attend and -10000.0 for masked positions.
712
+ adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
713
+
714
+ # Since we are adding it to the raw scores before the softmax, this is
715
+ # effectively the same as removing these entirely.
716
+ attention_scores += adder
717
+
718
+ # Normalize the attention scores to probabilities.
719
+ # `attention_probs` = [B, N, F, T]
720
+ attention_probs = tf.nn.softmax(attention_scores)
721
+
722
+ # This is actually dropping out entire tokens to attend to, which might
723
+ # seem a bit unusual, but is taken from the original Transformer paper.
724
+ attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
725
+
726
+ # `value_layer` = [B, T, N, H]
727
+ value_layer = tf.reshape(
728
+ value_layer,
729
+ [batch_size, to_seq_length, num_attention_heads, size_per_head])
730
+
731
+ # `value_layer` = [B, N, T, H]
732
+ value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
733
+
734
+ # `context_layer` = [B, N, F, H]
735
+ context_layer = tf.matmul(attention_probs, value_layer)
736
+
737
+ # `context_layer` = [B, F, N, H]
738
+ context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
739
+
740
+ if do_return_2d_tensor:
741
+ # `context_layer` = [B*F, N*H]
742
+ context_layer = tf.reshape(
743
+ context_layer,
744
+ [batch_size * from_seq_length, num_attention_heads * size_per_head])
745
+ else:
746
+ # `context_layer` = [B, F, N*H]
747
+ context_layer = tf.reshape(
748
+ context_layer,
749
+ [batch_size, from_seq_length, num_attention_heads * size_per_head])
750
+
751
+ return context_layer
752
+
753
+
754
+ def transformer_model(input_tensor,
755
+ attention_mask=None,
756
+ hidden_size=768,
757
+ num_hidden_layers=12,
758
+ num_attention_heads=12,
759
+ intermediate_size=3072,
760
+ intermediate_act_fn=gelu,
761
+ hidden_dropout_prob=0.1,
762
+ attention_probs_dropout_prob=0.1,
763
+ initializer_range=0.02,
764
+ do_return_all_layers=False):
765
+ """Multi-headed, multi-layer Transformer from "Attention is All You Need".
766
+
767
+ This is almost an exact implementation of the original Transformer encoder.
768
+
769
+ See the original paper:
770
+ https://arxiv.org/abs/1706.03762
771
+
772
+ Also see:
773
+ https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
774
+
775
+ Args:
776
+ input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
777
+ attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
778
+ seq_length], with 1 for positions that can be attended to and 0 in
779
+ positions that should not be.
780
+ hidden_size: int. Hidden size of the Transformer.
781
+ num_hidden_layers: int. Number of layers (blocks) in the Transformer.
782
+ num_attention_heads: int. Number of attention heads in the Transformer.
783
+ intermediate_size: int. The size of the "intermediate" (a.k.a., feed
784
+ forward) layer.
785
+ intermediate_act_fn: function. The non-linear activation function to apply
786
+ to the output of the intermediate/feed-forward layer.
787
+ hidden_dropout_prob: float. Dropout probability for the hidden layers.
788
+ attention_probs_dropout_prob: float. Dropout probability of the attention
789
+ probabilities.
790
+ initializer_range: float. Range of the initializer (stddev of truncated
791
+ normal).
792
+ do_return_all_layers: Whether to also return all layers or just the final
793
+ layer.
794
+
795
+ Returns:
796
+ float Tensor of shape [batch_size, seq_length, hidden_size], the final
797
+ hidden layer of the Transformer.
798
+
799
+ Raises:
800
+ ValueError: A Tensor shape or parameter is invalid.
801
+ """
802
+ if hidden_size % num_attention_heads != 0:
803
+ raise ValueError(
804
+ "The hidden size (%d) is not a multiple of the number of attention "
805
+ "heads (%d)" % (hidden_size, num_attention_heads))
806
+
807
+ attention_head_size = int(hidden_size / num_attention_heads)
808
+ input_shape = get_shape_list(input_tensor, expected_rank=3)
809
+ batch_size = input_shape[0]
810
+ seq_length = input_shape[1]
811
+ input_width = input_shape[2]
812
+
813
+ # The Transformer performs sum residuals on all layers so the input needs
814
+ # to be the same as the hidden size.
815
+ if input_width != hidden_size:
816
+ raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
817
+ (input_width, hidden_size))
818
+
819
+ # We keep the representation as a 2D tensor to avoid re-shaping it back and
820
+ # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
821
+ # the GPU/CPU but may not be free on the TPU, so we want to minimize them to
822
+ # help the optimizer.
823
+ prev_output = reshape_to_matrix(input_tensor)
824
+
825
+ all_layer_outputs = []
826
+ for layer_idx in range(num_hidden_layers):
827
+ with tf.variable_scope("layer_%d" % layer_idx):
828
+ layer_input = prev_output
829
+
830
+ with tf.variable_scope("attention"):
831
+ attention_heads = []
832
+ with tf.variable_scope("self"):
833
+ attention_head = attention_layer(
834
+ from_tensor=layer_input,
835
+ to_tensor=layer_input,
836
+ attention_mask=attention_mask,
837
+ num_attention_heads=num_attention_heads,
838
+ size_per_head=attention_head_size,
839
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
840
+ initializer_range=initializer_range,
841
+ do_return_2d_tensor=True,
842
+ batch_size=batch_size,
843
+ from_seq_length=seq_length,
844
+ to_seq_length=seq_length)
845
+ attention_heads.append(attention_head)
846
+
847
+ attention_output = None
848
+ if len(attention_heads) == 1:
849
+ attention_output = attention_heads[0]
850
+ else:
851
+ # In the case where we have other sequences, we just concatenate
852
+ # them to the self-attention head before the projection.
853
+ attention_output = tf.concat(attention_heads, axis=-1)
854
+
855
+ # Run a linear projection of `hidden_size` then add a residual
856
+ # with `layer_input`.
857
+ with tf.variable_scope("output"):
858
+ attention_output = tf.layers.dense(
859
+ attention_output,
860
+ hidden_size,
861
+ kernel_initializer=create_initializer(initializer_range))
862
+ attention_output = dropout(attention_output, hidden_dropout_prob)
863
+ attention_output = layer_norm(attention_output + layer_input)
864
+
865
+ # The activation is only applied to the "intermediate" hidden layer.
866
+ with tf.variable_scope("intermediate"):
867
+ intermediate_output = tf.layers.dense(
868
+ attention_output,
869
+ intermediate_size,
870
+ activation=intermediate_act_fn,
871
+ kernel_initializer=create_initializer(initializer_range))
872
+
873
+ # Down-project back to `hidden_size` then add the residual.
874
+ with tf.variable_scope("output"):
875
+ layer_output = tf.layers.dense(
876
+ intermediate_output,
877
+ hidden_size,
878
+ kernel_initializer=create_initializer(initializer_range))
879
+ layer_output = dropout(layer_output, hidden_dropout_prob)
880
+ layer_output = layer_norm(layer_output + attention_output)
881
+ prev_output = layer_output
882
+ all_layer_outputs.append(layer_output)
883
+
884
+ if do_return_all_layers:
885
+ final_outputs = []
886
+ for layer_output in all_layer_outputs:
887
+ final_output = reshape_from_matrix(layer_output, input_shape)
888
+ final_outputs.append(final_output)
889
+ return final_outputs
890
+ else:
891
+ final_output = reshape_from_matrix(prev_output, input_shape)
892
+ return final_output
893
+
894
+
895
+ def get_shape_list(tensor, expected_rank=None, name=None):
896
+ """Returns a list of the shape of tensor, preferring static dimensions.
897
+
898
+ Args:
899
+ tensor: A tf.Tensor object to find the shape of.
900
+ expected_rank: (optional) int. The expected rank of `tensor`. If this is
901
+ specified and the `tensor` has a different rank, and exception will be
902
+ thrown.
903
+ name: Optional name of the tensor for the error message.
904
+
905
+ Returns:
906
+ A list of dimensions of the shape of tensor. All static dimensions will
907
+ be returned as python integers, and dynamic dimensions will be returned
908
+ as tf.Tensor scalars.
909
+ """
910
+ if name is None:
911
+ name = tensor.name
912
+
913
+ if expected_rank is not None:
914
+ assert_rank(tensor, expected_rank, name)
915
+
916
+ shape = tensor.shape.as_list()
917
+
918
+ non_static_indexes = []
919
+ for (index, dim) in enumerate(shape):
920
+ if dim is None:
921
+ non_static_indexes.append(index)
922
+
923
+ if not non_static_indexes:
924
+ return shape
925
+
926
+ dyn_shape = tf.shape(tensor)
927
+ for index in non_static_indexes:
928
+ shape[index] = dyn_shape[index]
929
+ return shape
930
+
931
+
932
+ def reshape_to_matrix(input_tensor):
933
+ """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
934
+ ndims = input_tensor.shape.ndims
935
+ if ndims < 2:
936
+ raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
937
+ (input_tensor.shape))
938
+ if ndims == 2:
939
+ return input_tensor
940
+
941
+ width = input_tensor.shape[-1]
942
+ output_tensor = tf.reshape(input_tensor, [-1, width])
943
+ return output_tensor
944
+
945
+
946
+ def reshape_from_matrix(output_tensor, orig_shape_list):
947
+ """Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
948
+ if len(orig_shape_list) == 2:
949
+ return output_tensor
950
+
951
+ output_shape = get_shape_list(output_tensor)
952
+
953
+ orig_dims = orig_shape_list[0:-1]
954
+ width = output_shape[-1]
955
+
956
+ return tf.reshape(output_tensor, orig_dims + [width])
957
+
958
+
959
+ def assert_rank(tensor, expected_rank, name=None):
960
+ """Raises an exception if the tensor rank is not of the expected rank.
961
+
962
+ Args:
963
+ tensor: A tf.Tensor to check the rank of.
964
+ expected_rank: Python integer or list of integers, expected rank.
965
+ name: Optional name of the tensor for the error message.
966
+
967
+ Raises:
968
+ ValueError: If the expected shape doesn't match the actual shape.
969
+ """
970
+ if name is None:
971
+ name = tensor.name
972
+
973
+ expected_rank_dict = {}
974
+ if isinstance(expected_rank, six.integer_types):
975
+ expected_rank_dict[expected_rank] = True
976
+ else:
977
+ for x in expected_rank:
978
+ expected_rank_dict[x] = True
979
+
980
+ actual_rank = tensor.shape.ndims
981
+ if actual_rank not in expected_rank_dict:
982
+ scope_name = tf.get_variable_scope().name
983
+ raise ValueError(
984
+ "For the tensor `%s` in scope `%s`, the actual rank "
985
+ "`%d` (shape = %s) is not equal to the expected rank `%s`" %
986
+ (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
RIS-DMMI/bert/modeling_bert.py ADDED
@@ -0,0 +1,1569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch BERT model. """
17
+
18
+
19
+ import logging
20
+ import math
21
+ import os
22
+ import warnings
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import CrossEntropyLoss, MSELoss
28
+
29
+ from .activations import gelu, gelu_new, swish
30
+ from .configuration_bert import BertConfig
31
+ from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable
32
+ from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
33
+
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+ _TOKENIZER_FOR_DOC = "BertTokenizer"
38
+
39
+ BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
40
+ "bert-base-uncased",
41
+ "bert-large-uncased",
42
+ "bert-base-cased",
43
+ "bert-large-cased",
44
+ "bert-base-multilingual-uncased",
45
+ "bert-base-multilingual-cased",
46
+ "bert-base-chinese",
47
+ "bert-base-german-cased",
48
+ "bert-large-uncased-whole-word-masking",
49
+ "bert-large-cased-whole-word-masking",
50
+ "bert-large-uncased-whole-word-masking-finetuned-squad",
51
+ "bert-large-cased-whole-word-masking-finetuned-squad",
52
+ "bert-base-cased-finetuned-mrpc",
53
+ "bert-base-german-dbmdz-cased",
54
+ "bert-base-german-dbmdz-uncased",
55
+ "cl-tohoku/bert-base-japanese",
56
+ "cl-tohoku/bert-base-japanese-whole-word-masking",
57
+ "cl-tohoku/bert-base-japanese-char",
58
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking",
59
+ "TurkuNLP/bert-base-finnish-cased-v1",
60
+ "TurkuNLP/bert-base-finnish-uncased-v1",
61
+ "wietsedv/bert-base-dutch-cased",
62
+ # See all BERT models at https://huggingface.co/models?filter=bert
63
+ ]
64
+
65
+
66
+ def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
67
+ """ Load tf checkpoints in a pytorch model.
68
+ """
69
+ try:
70
+ import re
71
+ import numpy as np
72
+ import tensorflow as tf
73
+ except ImportError:
74
+ logger.error(
75
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
76
+ "https://www.tensorflow.org/install/ for installation instructions."
77
+ )
78
+ raise
79
+ tf_path = os.path.abspath(tf_checkpoint_path)
80
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
81
+ # Load weights from TF model
82
+ init_vars = tf.train.list_variables(tf_path)
83
+ names = []
84
+ arrays = []
85
+ for name, shape in init_vars:
86
+ logger.info("Loading TF weight {} with shape {}".format(name, shape))
87
+ array = tf.train.load_variable(tf_path, name)
88
+ names.append(name)
89
+ arrays.append(array)
90
+
91
+ for name, array in zip(names, arrays):
92
+ name = name.split("/")
93
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
94
+ # which are not required for using pretrained model
95
+ if any(
96
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
97
+ for n in name
98
+ ):
99
+ logger.info("Skipping {}".format("/".join(name)))
100
+ continue
101
+ pointer = model
102
+ for m_name in name:
103
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
104
+ scope_names = re.split(r"_(\d+)", m_name)
105
+ else:
106
+ scope_names = [m_name]
107
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
108
+ pointer = getattr(pointer, "weight")
109
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
110
+ pointer = getattr(pointer, "bias")
111
+ elif scope_names[0] == "output_weights":
112
+ pointer = getattr(pointer, "weight")
113
+ elif scope_names[0] == "squad":
114
+ pointer = getattr(pointer, "classifier")
115
+ else:
116
+ try:
117
+ pointer = getattr(pointer, scope_names[0])
118
+ except AttributeError:
119
+ logger.info("Skipping {}".format("/".join(name)))
120
+ continue
121
+ if len(scope_names) >= 2:
122
+ num = int(scope_names[1])
123
+ pointer = pointer[num]
124
+ if m_name[-11:] == "_embeddings":
125
+ pointer = getattr(pointer, "weight")
126
+ elif m_name == "kernel":
127
+ array = np.transpose(array)
128
+ try:
129
+ assert pointer.shape == array.shape
130
+ except AssertionError as e:
131
+ e.args += (pointer.shape, array.shape)
132
+ raise
133
+ logger.info("Initialize PyTorch weight {}".format(name))
134
+ pointer.data = torch.from_numpy(array)
135
+ return model
136
+
137
+
138
+ def mish(x):
139
+ return x * torch.tanh(nn.functional.softplus(x))
140
+
141
+
142
+ ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
143
+
144
+
145
+ BertLayerNorm = torch.nn.LayerNorm
146
+
147
+
148
+ class BertEmbeddings(nn.Module):
149
+ """Construct the embeddings from word, position and token_type embeddings.
150
+ """
151
+
152
+ def __init__(self, config):
153
+ super().__init__()
154
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
155
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
156
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
157
+
158
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
159
+ # any TensorFlow checkpoint file
160
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
161
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
162
+
163
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
164
+ if input_ids is not None:
165
+ input_shape = input_ids.size()
166
+ else:
167
+ input_shape = inputs_embeds.size()[:-1]
168
+
169
+ seq_length = input_shape[1]
170
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
171
+ if position_ids is None:
172
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
173
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
174
+ if token_type_ids is None:
175
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
176
+
177
+ if inputs_embeds is None:
178
+ inputs_embeds = self.word_embeddings(input_ids)
179
+ position_embeddings = self.position_embeddings(position_ids)
180
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
181
+
182
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
183
+ embeddings = self.LayerNorm(embeddings)
184
+ embeddings = self.dropout(embeddings)
185
+ return embeddings
186
+
187
+
188
+ class BertSelfAttention(nn.Module):
189
+ def __init__(self, config):
190
+ super().__init__()
191
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
192
+ raise ValueError(
193
+ "The hidden size (%d) is not a multiple of the number of attention "
194
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads)
195
+ )
196
+
197
+ self.num_attention_heads = config.num_attention_heads
198
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
199
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
200
+
201
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
202
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
203
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
204
+
205
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
206
+
207
+ def transpose_for_scores(self, x):
208
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
209
+ x = x.view(*new_x_shape)
210
+ return x.permute(0, 2, 1, 3)
211
+
212
+ def forward(
213
+ self,
214
+ hidden_states,
215
+ attention_mask=None,
216
+ head_mask=None,
217
+ encoder_hidden_states=None,
218
+ encoder_attention_mask=None,
219
+ output_attentions=False,
220
+ ):
221
+ mixed_query_layer = self.query(hidden_states)
222
+
223
+ # If this is instantiated as a cross-attention module, the keys
224
+ # and values come from an encoder; the attention mask needs to be
225
+ # such that the encoder's padding tokens are not attended to.
226
+ if encoder_hidden_states is not None:
227
+ mixed_key_layer = self.key(encoder_hidden_states)
228
+ mixed_value_layer = self.value(encoder_hidden_states)
229
+ attention_mask = encoder_attention_mask
230
+ else:
231
+ mixed_key_layer = self.key(hidden_states)
232
+ mixed_value_layer = self.value(hidden_states)
233
+
234
+ query_layer = self.transpose_for_scores(mixed_query_layer)
235
+ key_layer = self.transpose_for_scores(mixed_key_layer)
236
+ value_layer = self.transpose_for_scores(mixed_value_layer)
237
+
238
+ # Take the dot product between "query" and "key" to get the raw attention scores.
239
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
240
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
241
+ if attention_mask is not None:
242
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
243
+ attention_scores = attention_scores + attention_mask
244
+
245
+ # Normalize the attention scores to probabilities.
246
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
247
+
248
+ # This is actually dropping out entire tokens to attend to, which might
249
+ # seem a bit unusual, but is taken from the original Transformer paper.
250
+ attention_probs = self.dropout(attention_probs)
251
+
252
+ # Mask heads if we want to
253
+ if head_mask is not None:
254
+ attention_probs = attention_probs * head_mask
255
+
256
+ context_layer = torch.matmul(attention_probs, value_layer)
257
+
258
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
259
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
260
+ context_layer = context_layer.view(*new_context_layer_shape)
261
+
262
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
263
+ return outputs
264
+
265
+
266
+ class BertSelfOutput(nn.Module):
267
+ def __init__(self, config):
268
+ super().__init__()
269
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
270
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
271
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
272
+
273
+ def forward(self, hidden_states, input_tensor):
274
+ hidden_states = self.dense(hidden_states)
275
+ hidden_states = self.dropout(hidden_states)
276
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
277
+ return hidden_states
278
+
279
+
280
+ class BertAttention(nn.Module):
281
+ def __init__(self, config):
282
+ super().__init__()
283
+ self.self = BertSelfAttention(config)
284
+ self.output = BertSelfOutput(config)
285
+ self.pruned_heads = set()
286
+
287
+ def prune_heads(self, heads):
288
+ if len(heads) == 0:
289
+ return
290
+ heads, index = find_pruneable_heads_and_indices(
291
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
292
+ )
293
+
294
+ # Prune linear layers
295
+ self.self.query = prune_linear_layer(self.self.query, index)
296
+ self.self.key = prune_linear_layer(self.self.key, index)
297
+ self.self.value = prune_linear_layer(self.self.value, index)
298
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
299
+
300
+ # Update hyper params and store pruned heads
301
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
302
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
303
+ self.pruned_heads = self.pruned_heads.union(heads)
304
+
305
+ def forward(
306
+ self,
307
+ hidden_states,
308
+ attention_mask=None,
309
+ head_mask=None,
310
+ encoder_hidden_states=None,
311
+ encoder_attention_mask=None,
312
+ output_attentions=False,
313
+ ):
314
+ self_outputs = self.self(
315
+ hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions,
316
+ )
317
+ attention_output = self.output(self_outputs[0], hidden_states)
318
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
319
+ return outputs
320
+
321
+
322
+ class BertIntermediate(nn.Module):
323
+ def __init__(self, config):
324
+ super().__init__()
325
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
326
+ if isinstance(config.hidden_act, str):
327
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
328
+ else:
329
+ self.intermediate_act_fn = config.hidden_act
330
+
331
+ def forward(self, hidden_states):
332
+ hidden_states = self.dense(hidden_states)
333
+ hidden_states = self.intermediate_act_fn(hidden_states)
334
+ return hidden_states
335
+
336
+
337
+ class BertOutput(nn.Module):
338
+ def __init__(self, config):
339
+ super().__init__()
340
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
341
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
342
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
343
+
344
+ def forward(self, hidden_states, input_tensor):
345
+ hidden_states = self.dense(hidden_states)
346
+ hidden_states = self.dropout(hidden_states)
347
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
348
+ return hidden_states
349
+
350
+
351
+ class BertLayer(nn.Module):
352
+ def __init__(self, config):
353
+ super().__init__()
354
+ self.attention = BertAttention(config)
355
+ self.is_decoder = config.is_decoder
356
+ if self.is_decoder:
357
+ self.crossattention = BertAttention(config)
358
+ self.intermediate = BertIntermediate(config)
359
+ self.output = BertOutput(config)
360
+
361
+ def forward(
362
+ self,
363
+ hidden_states,
364
+ attention_mask=None,
365
+ head_mask=None,
366
+ encoder_hidden_states=None,
367
+ encoder_attention_mask=None,
368
+ output_attentions=False,
369
+ ):
370
+ self_attention_outputs = self.attention(
371
+ hidden_states, attention_mask, head_mask, output_attentions=output_attentions,
372
+ )
373
+ attention_output = self_attention_outputs[0]
374
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
375
+
376
+ if self.is_decoder and encoder_hidden_states is not None:
377
+ cross_attention_outputs = self.crossattention(
378
+ attention_output,
379
+ attention_mask,
380
+ head_mask,
381
+ encoder_hidden_states,
382
+ encoder_attention_mask,
383
+ output_attentions,
384
+ )
385
+ attention_output = cross_attention_outputs[0]
386
+ outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
387
+
388
+ intermediate_output = self.intermediate(attention_output)
389
+ layer_output = self.output(intermediate_output, attention_output)
390
+ outputs = (layer_output,) + outputs
391
+ return outputs
392
+
393
+
394
+ class BertEncoder(nn.Module):
395
+ def __init__(self, config):
396
+ super().__init__()
397
+ self.config = config
398
+ self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
399
+
400
+ def forward(
401
+ self,
402
+ hidden_states,
403
+ attention_mask=None,
404
+ head_mask=None,
405
+ encoder_hidden_states=None,
406
+ encoder_attention_mask=None,
407
+ output_attentions=False,
408
+ output_hidden_states=False,
409
+ ):
410
+ all_hidden_states = ()
411
+ all_attentions = ()
412
+ for i, layer_module in enumerate(self.layer):
413
+ if output_hidden_states:
414
+ all_hidden_states = all_hidden_states + (hidden_states,)
415
+
416
+ if getattr(self.config, "gradient_checkpointing", False):
417
+
418
+ def create_custom_forward(module):
419
+ def custom_forward(*inputs):
420
+ return module(*inputs, output_attentions)
421
+
422
+ return custom_forward
423
+
424
+ layer_outputs = torch.utils.checkpoint.checkpoint(
425
+ create_custom_forward(layer_module),
426
+ hidden_states,
427
+ attention_mask,
428
+ head_mask[i],
429
+ encoder_hidden_states,
430
+ encoder_attention_mask,
431
+ )
432
+ else:
433
+ layer_outputs = layer_module(
434
+ hidden_states,
435
+ attention_mask,
436
+ head_mask[i],
437
+ encoder_hidden_states,
438
+ encoder_attention_mask,
439
+ output_attentions,
440
+ )
441
+ hidden_states = layer_outputs[0]
442
+
443
+ if output_attentions:
444
+ all_attentions = all_attentions + (layer_outputs[1],)
445
+
446
+ # Add last layer
447
+ if output_hidden_states:
448
+ all_hidden_states = all_hidden_states + (hidden_states,)
449
+
450
+ outputs = (hidden_states,)
451
+ if output_hidden_states:
452
+ outputs = outputs + (all_hidden_states,)
453
+ if output_attentions:
454
+ outputs = outputs + (all_attentions,)
455
+ return outputs # last-layer hidden state, (all hidden states), (all attentions)
456
+
457
+
458
+ class BertPooler(nn.Module):
459
+ def __init__(self, config):
460
+ super().__init__()
461
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
462
+ self.activation = nn.Tanh()
463
+
464
+ def forward(self, hidden_states):
465
+ # We "pool" the model by simply taking the hidden state corresponding
466
+ # to the first token.
467
+ first_token_tensor = hidden_states[:, 0]
468
+ pooled_output = self.dense(first_token_tensor)
469
+ pooled_output = self.activation(pooled_output)
470
+ return pooled_output
471
+
472
+
473
+ class BertPredictionHeadTransform(nn.Module):
474
+ def __init__(self, config):
475
+ super().__init__()
476
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
477
+ if isinstance(config.hidden_act, str):
478
+ self.transform_act_fn = ACT2FN[config.hidden_act]
479
+ else:
480
+ self.transform_act_fn = config.hidden_act
481
+ self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
482
+
483
+ def forward(self, hidden_states):
484
+ hidden_states = self.dense(hidden_states)
485
+ hidden_states = self.transform_act_fn(hidden_states)
486
+ hidden_states = self.LayerNorm(hidden_states)
487
+ return hidden_states
488
+
489
+
490
+ class BertLMPredictionHead(nn.Module):
491
+ def __init__(self, config):
492
+ super().__init__()
493
+ self.transform = BertPredictionHeadTransform(config)
494
+
495
+ # The output weights are the same as the input embeddings, but there is
496
+ # an output-only bias for each token.
497
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
498
+
499
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
500
+
501
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
502
+ self.decoder.bias = self.bias
503
+
504
+ def forward(self, hidden_states):
505
+ hidden_states = self.transform(hidden_states)
506
+ hidden_states = self.decoder(hidden_states)
507
+ return hidden_states
508
+
509
+
510
+ class BertOnlyMLMHead(nn.Module):
511
+ def __init__(self, config):
512
+ super().__init__()
513
+ self.predictions = BertLMPredictionHead(config)
514
+
515
+ def forward(self, sequence_output):
516
+ prediction_scores = self.predictions(sequence_output)
517
+ return prediction_scores
518
+
519
+
520
+ class BertOnlyNSPHead(nn.Module):
521
+ def __init__(self, config):
522
+ super().__init__()
523
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
524
+
525
+ def forward(self, pooled_output):
526
+ seq_relationship_score = self.seq_relationship(pooled_output)
527
+ return seq_relationship_score
528
+
529
+
530
+ class BertPreTrainingHeads(nn.Module):
531
+ def __init__(self, config):
532
+ super().__init__()
533
+ self.predictions = BertLMPredictionHead(config)
534
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
535
+
536
+ def forward(self, sequence_output, pooled_output):
537
+ prediction_scores = self.predictions(sequence_output)
538
+ seq_relationship_score = self.seq_relationship(pooled_output)
539
+ return prediction_scores, seq_relationship_score
540
+
541
+
542
+ class BertPreTrainedModel(PreTrainedModel):
543
+ """ An abstract class to handle weights initialization and
544
+ a simple interface for downloading and loading pretrained models.
545
+ """
546
+
547
+ config_class = BertConfig
548
+ load_tf_weights = load_tf_weights_in_bert
549
+ base_model_prefix = "bert"
550
+
551
+ def _init_weights(self, module):
552
+ """ Initialize the weights """
553
+ if isinstance(module, (nn.Linear, nn.Embedding)):
554
+ # Slightly different from the TF version which uses truncated_normal for initialization
555
+ # cf https://github.com/pytorch/pytorch/pull/5617
556
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
557
+ elif isinstance(module, BertLayerNorm):
558
+ module.bias.data.zero_()
559
+ module.weight.data.fill_(1.0)
560
+ if isinstance(module, nn.Linear) and module.bias is not None:
561
+ module.bias.data.zero_()
562
+
563
+
564
+ BERT_START_DOCSTRING = r"""
565
+ This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
566
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
567
+ usage and behavior.
568
+
569
+ Parameters:
570
+ config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
571
+ Initializing with a config file does not load the weights associated with the model, only the configuration.
572
+ Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
573
+ """
574
+
575
+ BERT_INPUTS_DOCSTRING = r"""
576
+ Args:
577
+ input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
578
+ Indices of input sequence tokens in the vocabulary.
579
+
580
+ Indices can be obtained using :class:`transformers.BertTokenizer`.
581
+ See :func:`transformers.PreTrainedTokenizer.encode` and
582
+ :func:`transformers.PreTrainedTokenizer.__call__` for details.
583
+
584
+ `What are input IDs? <../glossary.html#input-ids>`__
585
+ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
586
+ Mask to avoid performing attention on padding token indices.
587
+ Mask values selected in ``[0, 1]``:
588
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
589
+
590
+ `What are attention masks? <../glossary.html#attention-mask>`__
591
+ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
592
+ Segment token indices to indicate first and second portions of the inputs.
593
+ Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
594
+ corresponds to a `sentence B` token
595
+
596
+ `What are token type IDs? <../glossary.html#token-type-ids>`_
597
+ position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
598
+ Indices of positions of each input sequence tokens in the position embeddings.
599
+ Selected in the range ``[0, config.max_position_embeddings - 1]``.
600
+
601
+ `What are position IDs? <../glossary.html#position-ids>`_
602
+ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
603
+ Mask to nullify selected heads of the self-attention modules.
604
+ Mask values selected in ``[0, 1]``:
605
+ :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
606
+ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
607
+ Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
608
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
609
+ than the model's internal embedding lookup matrix.
610
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
611
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
612
+ if the model is configured as a decoder.
613
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
614
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask
615
+ is used in the cross-attention if the model is configured as a decoder.
616
+ Mask values selected in ``[0, 1]``:
617
+ ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
618
+ output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
619
+ If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
620
+ """
621
+
622
+
623
+ @add_start_docstrings(
624
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
625
+ BERT_START_DOCSTRING,
626
+ )
627
+ class BertModel(BertPreTrainedModel):
628
+ """
629
+
630
+ The model can behave as an encoder (with only self-attention) as well
631
+ as a decoder, in which case a layer of cross-attention is added between
632
+ the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
633
+ Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
634
+
635
+ To behave as an decoder the model needs to be initialized with the
636
+ :obj:`is_decoder` argument of the configuration set to :obj:`True`; an
637
+ :obj:`encoder_hidden_states` is expected as an input to the forward pass.
638
+
639
+ .. _`Attention is all you need`:
640
+ https://arxiv.org/abs/1706.03762
641
+
642
+ """
643
+
644
+ def __init__(self, config):
645
+ super().__init__(config)
646
+ self.config = config
647
+
648
+ self.embeddings = BertEmbeddings(config)
649
+ self.encoder = BertEncoder(config)
650
+ self.pooler = BertPooler(config)
651
+
652
+ self.init_weights()
653
+
654
+ def get_input_embeddings(self):
655
+ return self.embeddings.word_embeddings
656
+
657
+ def set_input_embeddings(self, value):
658
+ self.embeddings.word_embeddings = value
659
+
660
+ def _prune_heads(self, heads_to_prune):
661
+ """ Prunes heads of the model.
662
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
663
+ See base class PreTrainedModel
664
+ """
665
+ for layer, heads in heads_to_prune.items():
666
+ self.encoder.layer[layer].attention.prune_heads(heads)
667
+
668
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
669
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
670
+ def forward(
671
+ self,
672
+ input_ids=None,
673
+ attention_mask=None,
674
+ token_type_ids=None,
675
+ position_ids=None,
676
+ head_mask=None,
677
+ inputs_embeds=None,
678
+ encoder_hidden_states=None,
679
+ encoder_attention_mask=None,
680
+ output_attentions=None,
681
+ output_hidden_states=None,
682
+ ):
683
+ r"""
684
+ Return:
685
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
686
+ last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
687
+ Sequence of hidden-states at the output of the last layer of the model.
688
+ pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
689
+ Last layer hidden-state of the first token of the sequence (classification token)
690
+ further processed by a Linear layer and a Tanh activation function. The Linear
691
+ layer weights are trained from the next sentence prediction (classification)
692
+ objective during pre-training.
693
+
694
+ This output is usually *not* a good summary
695
+ of the semantic content of the input, you're often better with averaging or pooling
696
+ the sequence of hidden-states for the whole input sequence.
697
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
698
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
699
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
700
+
701
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
702
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
703
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
704
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
705
+
706
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
707
+ heads.
708
+ """
709
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
710
+ output_hidden_states = (
711
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
712
+ )
713
+
714
+ if input_ids is not None and inputs_embeds is not None:
715
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
716
+ elif input_ids is not None:
717
+ input_shape = input_ids.size()
718
+ elif inputs_embeds is not None:
719
+ input_shape = inputs_embeds.size()[:-1]
720
+ else:
721
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
722
+
723
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
724
+
725
+ if attention_mask is None:
726
+ attention_mask = torch.ones(input_shape, device=device)
727
+ if token_type_ids is None:
728
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
729
+
730
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
731
+ # ourselves in which case we just need to make it broadcastable to all heads.
732
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
733
+
734
+ # If a 2D ou 3D attention mask is provided for the cross-attention
735
+ # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
736
+ if self.config.is_decoder and encoder_hidden_states is not None:
737
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
738
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
739
+ if encoder_attention_mask is None:
740
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
741
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
742
+ else:
743
+ encoder_extended_attention_mask = None
744
+
745
+ # Prepare head mask if needed
746
+ # 1.0 in head_mask indicate we keep the head
747
+ # attention_probs has shape bsz x n_heads x N x N
748
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
749
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
750
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
751
+
752
+ embedding_output = self.embeddings(
753
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
754
+ )
755
+ encoder_outputs = self.encoder(
756
+ embedding_output,
757
+ attention_mask=extended_attention_mask,
758
+ head_mask=head_mask,
759
+ encoder_hidden_states=encoder_hidden_states,
760
+ encoder_attention_mask=encoder_extended_attention_mask,
761
+ output_attentions=output_attentions,
762
+ output_hidden_states=output_hidden_states,
763
+ )
764
+ sequence_output = encoder_outputs[0]
765
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
766
+
767
+ outputs = (sequence_output, pooled_output,) + encoder_outputs[
768
+ 1:
769
+ ] # add hidden_states and attentions if they are here
770
+ return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
771
+
772
+
773
+ @add_start_docstrings(
774
+ """Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
775
+ a `next sentence prediction (classification)` head. """,
776
+ BERT_START_DOCSTRING,
777
+ )
778
+ class BertForPreTraining(BertPreTrainedModel):
779
+ def __init__(self, config):
780
+ super().__init__(config)
781
+
782
+ self.bert = BertModel(config)
783
+ self.cls = BertPreTrainingHeads(config)
784
+
785
+ self.init_weights()
786
+
787
+ def get_output_embeddings(self):
788
+ return self.cls.predictions.decoder
789
+
790
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
791
+ def forward(
792
+ self,
793
+ input_ids=None,
794
+ attention_mask=None,
795
+ token_type_ids=None,
796
+ position_ids=None,
797
+ head_mask=None,
798
+ inputs_embeds=None,
799
+ labels=None,
800
+ next_sentence_label=None,
801
+ output_attentions=None,
802
+ output_hidden_states=None,
803
+ **kwargs
804
+ ):
805
+ r"""
806
+ labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
807
+ Labels for computing the masked language modeling loss.
808
+ Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
809
+ Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
810
+ in ``[0, ..., config.vocab_size]``
811
+ next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
812
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
813
+ Indices should be in ``[0, 1]``.
814
+ ``0`` indicates sequence B is a continuation of sequence A,
815
+ ``1`` indicates sequence B is a random sequence.
816
+ kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
817
+ Used to hide legacy arguments that have been deprecated.
818
+
819
+ Returns:
820
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
821
+ loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
822
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
823
+ prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
824
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
825
+ seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
826
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False
827
+ continuation before SoftMax).
828
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
829
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
830
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
831
+
832
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
833
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
834
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
835
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
836
+
837
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
838
+ heads.
839
+
840
+
841
+ Examples::
842
+
843
+ >>> from transformers import BertTokenizer, BertForPreTraining
844
+ >>> import torch
845
+
846
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
847
+ >>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
848
+
849
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
850
+ >>> outputs = model(**inputs)
851
+
852
+ >>> prediction_scores, seq_relationship_scores = outputs[:2]
853
+
854
+ """
855
+ if "masked_lm_labels" in kwargs:
856
+ warnings.warn(
857
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
858
+ DeprecationWarning,
859
+ )
860
+ labels = kwargs.pop("masked_lm_labels")
861
+ assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
862
+
863
+ outputs = self.bert(
864
+ input_ids,
865
+ attention_mask=attention_mask,
866
+ token_type_ids=token_type_ids,
867
+ position_ids=position_ids,
868
+ head_mask=head_mask,
869
+ inputs_embeds=inputs_embeds,
870
+ output_attentions=output_attentions,
871
+ output_hidden_states=output_hidden_states,
872
+ )
873
+
874
+ sequence_output, pooled_output = outputs[:2]
875
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
876
+
877
+ outputs = (prediction_scores, seq_relationship_score,) + outputs[
878
+ 2:
879
+ ] # add hidden states and attention if they are here
880
+
881
+ if labels is not None and next_sentence_label is not None:
882
+ loss_fct = CrossEntropyLoss()
883
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
884
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
885
+ total_loss = masked_lm_loss + next_sentence_loss
886
+ outputs = (total_loss,) + outputs
887
+
888
+ return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
889
+
890
+
891
+ @add_start_docstrings(
892
+ """Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
893
+ )
894
+ class BertLMHeadModel(BertPreTrainedModel):
895
+ def __init__(self, config):
896
+ super().__init__(config)
897
+ assert config.is_decoder, "If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True`."
898
+
899
+ self.bert = BertModel(config)
900
+ self.cls = BertOnlyMLMHead(config)
901
+
902
+ self.init_weights()
903
+
904
+ def get_output_embeddings(self):
905
+ return self.cls.predictions.decoder
906
+
907
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
908
+ def forward(
909
+ self,
910
+ input_ids=None,
911
+ attention_mask=None,
912
+ token_type_ids=None,
913
+ position_ids=None,
914
+ head_mask=None,
915
+ inputs_embeds=None,
916
+ labels=None,
917
+ encoder_hidden_states=None,
918
+ encoder_attention_mask=None,
919
+ output_attentions=None,
920
+ output_hidden_states=None,
921
+ **kwargs
922
+ ):
923
+ r"""
924
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
925
+ Labels for computing the left-to-right language modeling loss (next word prediction).
926
+ Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
927
+ Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
928
+ in ``[0, ..., config.vocab_size]``
929
+ kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
930
+ Used to hide legacy arguments that have been deprecated.
931
+
932
+ Returns:
933
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
934
+ ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
935
+ Next token prediction loss.
936
+ prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
937
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
938
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
939
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
940
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
941
+
942
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
943
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
944
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
945
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
946
+
947
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
948
+ heads.
949
+
950
+ Example::
951
+
952
+ >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
953
+ >>> import torch
954
+
955
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
956
+ >>> config = BertConfig.from_pretrained("bert-base-cased")
957
+ >>> config.is_decoder = True
958
+ >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
959
+
960
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
961
+ >>> outputs = model(**inputs)
962
+
963
+ >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
964
+ """
965
+
966
+ outputs = self.bert(
967
+ input_ids,
968
+ attention_mask=attention_mask,
969
+ token_type_ids=token_type_ids,
970
+ position_ids=position_ids,
971
+ head_mask=head_mask,
972
+ inputs_embeds=inputs_embeds,
973
+ encoder_hidden_states=encoder_hidden_states,
974
+ encoder_attention_mask=encoder_attention_mask,
975
+ output_attentions=output_attentions,
976
+ output_hidden_states=output_hidden_states,
977
+ )
978
+
979
+ sequence_output = outputs[0]
980
+ prediction_scores = self.cls(sequence_output)
981
+
982
+ outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
983
+
984
+ if labels is not None:
985
+ # we are doing next-token prediction; shift prediction scores and input ids by one
986
+ prediction_scores = prediction_scores[:, :-1, :].contiguous()
987
+ labels = labels[:, 1:].contiguous()
988
+ loss_fct = CrossEntropyLoss()
989
+ ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
990
+ outputs = (ltr_lm_loss,) + outputs
991
+
992
+ return outputs # (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
993
+
994
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
995
+ input_shape = input_ids.shape
996
+
997
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
998
+ if attention_mask is None:
999
+ attention_mask = input_ids.new_ones(input_shape)
1000
+
1001
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1002
+
1003
+
1004
+ @add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
1005
+ class BertForMaskedLM(BertPreTrainedModel):
1006
+ def __init__(self, config):
1007
+ super().__init__(config)
1008
+ assert (
1009
+ not config.is_decoder
1010
+ ), "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention."
1011
+
1012
+ self.bert = BertModel(config)
1013
+ self.cls = BertOnlyMLMHead(config)
1014
+
1015
+ self.init_weights()
1016
+
1017
+ def get_output_embeddings(self):
1018
+ return self.cls.predictions.decoder
1019
+
1020
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
1021
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
1022
+ def forward(
1023
+ self,
1024
+ input_ids=None,
1025
+ attention_mask=None,
1026
+ token_type_ids=None,
1027
+ position_ids=None,
1028
+ head_mask=None,
1029
+ inputs_embeds=None,
1030
+ labels=None,
1031
+ encoder_hidden_states=None,
1032
+ encoder_attention_mask=None,
1033
+ output_attentions=None,
1034
+ output_hidden_states=None,
1035
+ **kwargs
1036
+ ):
1037
+ r"""
1038
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
1039
+ Labels for computing the masked language modeling loss.
1040
+ Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
1041
+ Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
1042
+ in ``[0, ..., config.vocab_size]``
1043
+ kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
1044
+ Used to hide legacy arguments that have been deprecated.
1045
+
1046
+ Returns:
1047
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1048
+ masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
1049
+ Masked language modeling loss.
1050
+ prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
1051
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1052
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1053
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1054
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1055
+
1056
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1057
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1058
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1059
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1060
+
1061
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1062
+ heads.
1063
+ """
1064
+ if "masked_lm_labels" in kwargs:
1065
+ warnings.warn(
1066
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
1067
+ DeprecationWarning,
1068
+ )
1069
+ labels = kwargs.pop("masked_lm_labels")
1070
+ assert "lm_labels" not in kwargs, "Use `BertWithLMHead` for autoregressive language modeling task."
1071
+ assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
1072
+
1073
+ outputs = self.bert(
1074
+ input_ids,
1075
+ attention_mask=attention_mask,
1076
+ token_type_ids=token_type_ids,
1077
+ position_ids=position_ids,
1078
+ head_mask=head_mask,
1079
+ inputs_embeds=inputs_embeds,
1080
+ encoder_hidden_states=encoder_hidden_states,
1081
+ encoder_attention_mask=encoder_attention_mask,
1082
+ output_attentions=output_attentions,
1083
+ output_hidden_states=output_hidden_states,
1084
+ )
1085
+
1086
+ sequence_output = outputs[0]
1087
+ prediction_scores = self.cls(sequence_output)
1088
+
1089
+ outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
1090
+
1091
+ if labels is not None:
1092
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1093
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1094
+ outputs = (masked_lm_loss,) + outputs
1095
+
1096
+ return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
1097
+
1098
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
1099
+ input_shape = input_ids.shape
1100
+ effective_batch_size = input_shape[0]
1101
+
1102
+ # add a dummy token
1103
+ assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
1104
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
1105
+ dummy_token = torch.full(
1106
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
1107
+ )
1108
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
1109
+
1110
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
1111
+
1112
+
1113
+ @add_start_docstrings(
1114
+ """Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
1115
+ )
1116
+ class BertForNextSentencePrediction(BertPreTrainedModel):
1117
+ def __init__(self, config):
1118
+ super().__init__(config)
1119
+
1120
+ self.bert = BertModel(config)
1121
+ self.cls = BertOnlyNSPHead(config)
1122
+
1123
+ self.init_weights()
1124
+
1125
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
1126
+ def forward(
1127
+ self,
1128
+ input_ids=None,
1129
+ attention_mask=None,
1130
+ token_type_ids=None,
1131
+ position_ids=None,
1132
+ head_mask=None,
1133
+ inputs_embeds=None,
1134
+ next_sentence_label=None,
1135
+ output_attentions=None,
1136
+ output_hidden_states=None,
1137
+ ):
1138
+ r"""
1139
+ next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
1140
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
1141
+ Indices should be in ``[0, 1]``.
1142
+ ``0`` indicates sequence B is a continuation of sequence A,
1143
+ ``1`` indicates sequence B is a random sequence.
1144
+
1145
+ Returns:
1146
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1147
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
1148
+ Next sequence prediction (classification) loss.
1149
+ seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
1150
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
1151
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1152
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1153
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1154
+
1155
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1156
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1157
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1158
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1159
+
1160
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1161
+ heads.
1162
+
1163
+ Examples::
1164
+
1165
+ >>> from transformers import BertTokenizer, BertForNextSentencePrediction
1166
+ >>> import torch
1167
+
1168
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
1169
+ >>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
1170
+
1171
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1172
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
1173
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
1174
+
1175
+ >>> loss, logits = model(**encoding, next_sentence_label=torch.LongTensor([1]))
1176
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
1177
+ """
1178
+
1179
+ outputs = self.bert(
1180
+ input_ids,
1181
+ attention_mask=attention_mask,
1182
+ token_type_ids=token_type_ids,
1183
+ position_ids=position_ids,
1184
+ head_mask=head_mask,
1185
+ inputs_embeds=inputs_embeds,
1186
+ output_attentions=output_attentions,
1187
+ output_hidden_states=output_hidden_states,
1188
+ )
1189
+
1190
+ pooled_output = outputs[1]
1191
+
1192
+ seq_relationship_score = self.cls(pooled_output)
1193
+
1194
+ outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
1195
+ if next_sentence_label is not None:
1196
+ loss_fct = CrossEntropyLoss()
1197
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
1198
+ outputs = (next_sentence_loss,) + outputs
1199
+
1200
+ return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
1201
+
1202
+
1203
+ @add_start_docstrings(
1204
+ """Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
1205
+ the pooled output) e.g. for GLUE tasks. """,
1206
+ BERT_START_DOCSTRING,
1207
+ )
1208
+ class BertForSequenceClassification(BertPreTrainedModel):
1209
+ def __init__(self, config):
1210
+ super().__init__(config)
1211
+ self.num_labels = config.num_labels
1212
+
1213
+ self.bert = BertModel(config)
1214
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1215
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1216
+
1217
+ self.init_weights()
1218
+
1219
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
1220
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
1221
+ def forward(
1222
+ self,
1223
+ input_ids=None,
1224
+ attention_mask=None,
1225
+ token_type_ids=None,
1226
+ position_ids=None,
1227
+ head_mask=None,
1228
+ inputs_embeds=None,
1229
+ labels=None,
1230
+ output_attentions=None,
1231
+ output_hidden_states=None,
1232
+ ):
1233
+ r"""
1234
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
1235
+ Labels for computing the sequence classification/regression loss.
1236
+ Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
1237
+ If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1238
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1239
+
1240
+ Returns:
1241
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1242
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
1243
+ Classification (or regression if config.num_labels==1) loss.
1244
+ logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
1245
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
1246
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1247
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1248
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1249
+
1250
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1251
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1252
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1253
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1254
+
1255
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1256
+ heads.
1257
+ """
1258
+
1259
+ outputs = self.bert(
1260
+ input_ids,
1261
+ attention_mask=attention_mask,
1262
+ token_type_ids=token_type_ids,
1263
+ position_ids=position_ids,
1264
+ head_mask=head_mask,
1265
+ inputs_embeds=inputs_embeds,
1266
+ output_attentions=output_attentions,
1267
+ output_hidden_states=output_hidden_states,
1268
+ )
1269
+
1270
+ pooled_output = outputs[1]
1271
+
1272
+ pooled_output = self.dropout(pooled_output)
1273
+ logits = self.classifier(pooled_output)
1274
+
1275
+ outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
1276
+
1277
+ if labels is not None:
1278
+ if self.num_labels == 1:
1279
+ # We are doing regression
1280
+ loss_fct = MSELoss()
1281
+ loss = loss_fct(logits.view(-1), labels.view(-1))
1282
+ else:
1283
+ loss_fct = CrossEntropyLoss()
1284
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1285
+ outputs = (loss,) + outputs
1286
+
1287
+ return outputs # (loss), logits, (hidden_states), (attentions)
1288
+
1289
+
1290
+ @add_start_docstrings(
1291
+ """Bert Model with a multiple choice classification head on top (a linear layer on top of
1292
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
1293
+ BERT_START_DOCSTRING,
1294
+ )
1295
+ class BertForMultipleChoice(BertPreTrainedModel):
1296
+ def __init__(self, config):
1297
+ super().__init__(config)
1298
+
1299
+ self.bert = BertModel(config)
1300
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1301
+ self.classifier = nn.Linear(config.hidden_size, 1)
1302
+
1303
+ self.init_weights()
1304
+
1305
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
1306
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
1307
+ def forward(
1308
+ self,
1309
+ input_ids=None,
1310
+ attention_mask=None,
1311
+ token_type_ids=None,
1312
+ position_ids=None,
1313
+ head_mask=None,
1314
+ inputs_embeds=None,
1315
+ labels=None,
1316
+ output_attentions=None,
1317
+ output_hidden_states=None,
1318
+ ):
1319
+ r"""
1320
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
1321
+ Labels for computing the multiple choice classification loss.
1322
+ Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension
1323
+ of the input tensors. (see `input_ids` above)
1324
+
1325
+ Returns:
1326
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1327
+ loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
1328
+ Classification loss.
1329
+ classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
1330
+ `num_choices` is the second dimension of the input tensors. (see `input_ids` above).
1331
+
1332
+ Classification scores (before SoftMax).
1333
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1334
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1335
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1336
+
1337
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1338
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1339
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1340
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1341
+
1342
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1343
+ heads.
1344
+ """
1345
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1346
+
1347
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1348
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1349
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
1350
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
1351
+ inputs_embeds = (
1352
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1353
+ if inputs_embeds is not None
1354
+ else None
1355
+ )
1356
+
1357
+ outputs = self.bert(
1358
+ input_ids,
1359
+ attention_mask=attention_mask,
1360
+ token_type_ids=token_type_ids,
1361
+ position_ids=position_ids,
1362
+ head_mask=head_mask,
1363
+ inputs_embeds=inputs_embeds,
1364
+ output_attentions=output_attentions,
1365
+ output_hidden_states=output_hidden_states,
1366
+ )
1367
+
1368
+ pooled_output = outputs[1]
1369
+
1370
+ pooled_output = self.dropout(pooled_output)
1371
+ logits = self.classifier(pooled_output)
1372
+ reshaped_logits = logits.view(-1, num_choices)
1373
+
1374
+ outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
1375
+
1376
+ if labels is not None:
1377
+ loss_fct = CrossEntropyLoss()
1378
+ loss = loss_fct(reshaped_logits, labels)
1379
+ outputs = (loss,) + outputs
1380
+
1381
+ return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
1382
+
1383
+
1384
+ @add_start_docstrings(
1385
+ """Bert Model with a token classification head on top (a linear layer on top of
1386
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
1387
+ BERT_START_DOCSTRING,
1388
+ )
1389
+ class BertForTokenClassification(BertPreTrainedModel):
1390
+ def __init__(self, config):
1391
+ super().__init__(config)
1392
+ self.num_labels = config.num_labels
1393
+
1394
+ self.bert = BertModel(config)
1395
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
1396
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1397
+
1398
+ self.init_weights()
1399
+
1400
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
1401
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
1402
+ def forward(
1403
+ self,
1404
+ input_ids=None,
1405
+ attention_mask=None,
1406
+ token_type_ids=None,
1407
+ position_ids=None,
1408
+ head_mask=None,
1409
+ inputs_embeds=None,
1410
+ labels=None,
1411
+ output_attentions=None,
1412
+ output_hidden_states=None,
1413
+ ):
1414
+ r"""
1415
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
1416
+ Labels for computing the token classification loss.
1417
+ Indices should be in ``[0, ..., config.num_labels - 1]``.
1418
+
1419
+ Returns:
1420
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1421
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
1422
+ Classification loss.
1423
+ scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
1424
+ Classification scores (before SoftMax).
1425
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1426
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1427
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1428
+
1429
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1430
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1431
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1432
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1433
+
1434
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1435
+ heads.
1436
+ """
1437
+
1438
+ outputs = self.bert(
1439
+ input_ids,
1440
+ attention_mask=attention_mask,
1441
+ token_type_ids=token_type_ids,
1442
+ position_ids=position_ids,
1443
+ head_mask=head_mask,
1444
+ inputs_embeds=inputs_embeds,
1445
+ output_attentions=output_attentions,
1446
+ output_hidden_states=output_hidden_states,
1447
+ )
1448
+
1449
+ sequence_output = outputs[0]
1450
+
1451
+ sequence_output = self.dropout(sequence_output)
1452
+ logits = self.classifier(sequence_output)
1453
+
1454
+ outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
1455
+ if labels is not None:
1456
+ loss_fct = CrossEntropyLoss()
1457
+ # Only keep active parts of the loss
1458
+ if attention_mask is not None:
1459
+ active_loss = attention_mask.view(-1) == 1
1460
+ active_logits = logits.view(-1, self.num_labels)
1461
+ active_labels = torch.where(
1462
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1463
+ )
1464
+ loss = loss_fct(active_logits, active_labels)
1465
+ else:
1466
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1467
+ outputs = (loss,) + outputs
1468
+
1469
+ return outputs # (loss), scores, (hidden_states), (attentions)
1470
+
1471
+
1472
+ @add_start_docstrings(
1473
+ """Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1474
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
1475
+ BERT_START_DOCSTRING,
1476
+ )
1477
+ class BertForQuestionAnswering(BertPreTrainedModel):
1478
+ def __init__(self, config):
1479
+ super().__init__(config)
1480
+ self.num_labels = config.num_labels
1481
+
1482
+ self.bert = BertModel(config)
1483
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1484
+
1485
+ self.init_weights()
1486
+
1487
+ @add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
1488
+ @add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="bert-base-uncased")
1489
+ def forward(
1490
+ self,
1491
+ input_ids=None,
1492
+ attention_mask=None,
1493
+ token_type_ids=None,
1494
+ position_ids=None,
1495
+ head_mask=None,
1496
+ inputs_embeds=None,
1497
+ start_positions=None,
1498
+ end_positions=None,
1499
+ output_attentions=None,
1500
+ output_hidden_states=None,
1501
+ ):
1502
+ r"""
1503
+ start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
1504
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1505
+ Positions are clamped to the length of the sequence (`sequence_length`).
1506
+ Position outside of the sequence are not taken into account for computing the loss.
1507
+ end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
1508
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1509
+ Positions are clamped to the length of the sequence (`sequence_length`).
1510
+ Position outside of the sequence are not taken into account for computing the loss.
1511
+
1512
+ Returns:
1513
+ :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
1514
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
1515
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
1516
+ start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
1517
+ Span-start scores (before SoftMax).
1518
+ end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
1519
+ Span-end scores (before SoftMax).
1520
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
1521
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
1522
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
1523
+
1524
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
1525
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
1526
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
1527
+ :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
1528
+
1529
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
1530
+ heads.
1531
+ """
1532
+
1533
+ outputs = self.bert(
1534
+ input_ids,
1535
+ attention_mask=attention_mask,
1536
+ token_type_ids=token_type_ids,
1537
+ position_ids=position_ids,
1538
+ head_mask=head_mask,
1539
+ inputs_embeds=inputs_embeds,
1540
+ output_attentions=output_attentions,
1541
+ output_hidden_states=output_hidden_states,
1542
+ )
1543
+
1544
+ sequence_output = outputs[0]
1545
+
1546
+ logits = self.qa_outputs(sequence_output)
1547
+ start_logits, end_logits = logits.split(1, dim=-1)
1548
+ start_logits = start_logits.squeeze(-1)
1549
+ end_logits = end_logits.squeeze(-1)
1550
+
1551
+ outputs = (start_logits, end_logits,) + outputs[2:]
1552
+ if start_positions is not None and end_positions is not None:
1553
+ # If we are on multi-GPU, split add a dimension
1554
+ if len(start_positions.size()) > 1:
1555
+ start_positions = start_positions.squeeze(-1)
1556
+ if len(end_positions.size()) > 1:
1557
+ end_positions = end_positions.squeeze(-1)
1558
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1559
+ ignored_index = start_logits.size(1)
1560
+ start_positions.clamp_(0, ignored_index)
1561
+ end_positions.clamp_(0, ignored_index)
1562
+
1563
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1564
+ start_loss = loss_fct(start_logits, start_positions)
1565
+ end_loss = loss_fct(end_logits, end_positions)
1566
+ total_loss = (start_loss + end_loss) / 2
1567
+ outputs = (total_loss,) + outputs
1568
+
1569
+ return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
RIS-DMMI/bert/modeling_test.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from __future__ import absolute_import
16
+ from __future__ import division
17
+ from __future__ import print_function
18
+
19
+ import collections
20
+ import json
21
+ import random
22
+ import re
23
+
24
+ import modeling
25
+ import six
26
+ import tensorflow as tf
27
+
28
+
29
+ class BertModelTest(tf.test.TestCase):
30
+
31
+ class BertModelTester(object):
32
+
33
+ def __init__(self,
34
+ parent,
35
+ batch_size=13,
36
+ seq_length=7,
37
+ is_training=True,
38
+ use_input_mask=True,
39
+ use_token_type_ids=True,
40
+ vocab_size=99,
41
+ hidden_size=32,
42
+ num_hidden_layers=5,
43
+ num_attention_heads=4,
44
+ intermediate_size=37,
45
+ hidden_act="gelu",
46
+ hidden_dropout_prob=0.1,
47
+ attention_probs_dropout_prob=0.1,
48
+ max_position_embeddings=512,
49
+ type_vocab_size=16,
50
+ initializer_range=0.02,
51
+ scope=None):
52
+ self.parent = parent
53
+ self.batch_size = batch_size
54
+ self.seq_length = seq_length
55
+ self.is_training = is_training
56
+ self.use_input_mask = use_input_mask
57
+ self.use_token_type_ids = use_token_type_ids
58
+ self.vocab_size = vocab_size
59
+ self.hidden_size = hidden_size
60
+ self.num_hidden_layers = num_hidden_layers
61
+ self.num_attention_heads = num_attention_heads
62
+ self.intermediate_size = intermediate_size
63
+ self.hidden_act = hidden_act
64
+ self.hidden_dropout_prob = hidden_dropout_prob
65
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
66
+ self.max_position_embeddings = max_position_embeddings
67
+ self.type_vocab_size = type_vocab_size
68
+ self.initializer_range = initializer_range
69
+ self.scope = scope
70
+
71
+ def create_model(self):
72
+ input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length],
73
+ self.vocab_size)
74
+
75
+ input_mask = None
76
+ if self.use_input_mask:
77
+ input_mask = BertModelTest.ids_tensor(
78
+ [self.batch_size, self.seq_length], vocab_size=2)
79
+
80
+ token_type_ids = None
81
+ if self.use_token_type_ids:
82
+ token_type_ids = BertModelTest.ids_tensor(
83
+ [self.batch_size, self.seq_length], self.type_vocab_size)
84
+
85
+ config = modeling.BertConfig(
86
+ vocab_size=self.vocab_size,
87
+ hidden_size=self.hidden_size,
88
+ num_hidden_layers=self.num_hidden_layers,
89
+ num_attention_heads=self.num_attention_heads,
90
+ intermediate_size=self.intermediate_size,
91
+ hidden_act=self.hidden_act,
92
+ hidden_dropout_prob=self.hidden_dropout_prob,
93
+ attention_probs_dropout_prob=self.attention_probs_dropout_prob,
94
+ max_position_embeddings=self.max_position_embeddings,
95
+ type_vocab_size=self.type_vocab_size,
96
+ initializer_range=self.initializer_range)
97
+
98
+ model = modeling.BertModel(
99
+ config=config,
100
+ is_training=self.is_training,
101
+ input_ids=input_ids,
102
+ input_mask=input_mask,
103
+ token_type_ids=token_type_ids,
104
+ scope=self.scope)
105
+
106
+ outputs = {
107
+ "embedding_output": model.get_embedding_output(),
108
+ "sequence_output": model.get_sequence_output(),
109
+ "pooled_output": model.get_pooled_output(),
110
+ "all_encoder_layers": model.get_all_encoder_layers(),
111
+ }
112
+ return outputs
113
+
114
+ def check_output(self, result):
115
+ self.parent.assertAllEqual(
116
+ result["embedding_output"].shape,
117
+ [self.batch_size, self.seq_length, self.hidden_size])
118
+
119
+ self.parent.assertAllEqual(
120
+ result["sequence_output"].shape,
121
+ [self.batch_size, self.seq_length, self.hidden_size])
122
+
123
+ self.parent.assertAllEqual(result["pooled_output"].shape,
124
+ [self.batch_size, self.hidden_size])
125
+
126
+ def test_default(self):
127
+ self.run_tester(BertModelTest.BertModelTester(self))
128
+
129
+ def test_config_to_json_string(self):
130
+ config = modeling.BertConfig(vocab_size=99, hidden_size=37)
131
+ obj = json.loads(config.to_json_string())
132
+ self.assertEqual(obj["vocab_size"], 99)
133
+ self.assertEqual(obj["hidden_size"], 37)
134
+
135
+ def run_tester(self, tester):
136
+ with self.test_session() as sess:
137
+ ops = tester.create_model()
138
+ init_op = tf.group(tf.global_variables_initializer(),
139
+ tf.local_variables_initializer())
140
+ sess.run(init_op)
141
+ output_result = sess.run(ops)
142
+ tester.check_output(output_result)
143
+
144
+ self.assert_all_tensors_reachable(sess, [init_op, ops])
145
+
146
+ @classmethod
147
+ def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
148
+ """Creates a random int32 tensor of the shape within the vocab size."""
149
+ if rng is None:
150
+ rng = random.Random()
151
+
152
+ total_dims = 1
153
+ for dim in shape:
154
+ total_dims *= dim
155
+
156
+ values = []
157
+ for _ in range(total_dims):
158
+ values.append(rng.randint(0, vocab_size - 1))
159
+
160
+ return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
161
+
162
+ def assert_all_tensors_reachable(self, sess, outputs):
163
+ """Checks that all the tensors in the graph are reachable from outputs."""
164
+ graph = sess.graph
165
+
166
+ ignore_strings = [
167
+ "^.*/assert_less_equal/.*$",
168
+ "^.*/dilation_rate$",
169
+ "^.*/Tensordot/concat$",
170
+ "^.*/Tensordot/concat/axis$",
171
+ "^testing/.*$",
172
+ ]
173
+
174
+ ignore_regexes = [re.compile(x) for x in ignore_strings]
175
+
176
+ unreachable = self.get_unreachable_ops(graph, outputs)
177
+ filtered_unreachable = []
178
+ for x in unreachable:
179
+ do_ignore = False
180
+ for r in ignore_regexes:
181
+ m = r.match(x.name)
182
+ if m is not None:
183
+ do_ignore = True
184
+ if do_ignore:
185
+ continue
186
+ filtered_unreachable.append(x)
187
+ unreachable = filtered_unreachable
188
+
189
+ self.assertEqual(
190
+ len(unreachable), 0, "The following ops are unreachable: %s" %
191
+ (" ".join([x.name for x in unreachable])))
192
+
193
+ @classmethod
194
+ def get_unreachable_ops(cls, graph, outputs):
195
+ """Finds all of the tensors in graph that are unreachable from outputs."""
196
+ outputs = cls.flatten_recursive(outputs)
197
+ output_to_op = collections.defaultdict(list)
198
+ op_to_all = collections.defaultdict(list)
199
+ assign_out_to_in = collections.defaultdict(list)
200
+
201
+ for op in graph.get_operations():
202
+ for x in op.inputs:
203
+ op_to_all[op.name].append(x.name)
204
+ for y in op.outputs:
205
+ output_to_op[y.name].append(op.name)
206
+ op_to_all[op.name].append(y.name)
207
+ if str(op.type) == "Assign":
208
+ for y in op.outputs:
209
+ for x in op.inputs:
210
+ assign_out_to_in[y.name].append(x.name)
211
+
212
+ assign_groups = collections.defaultdict(list)
213
+ for out_name in assign_out_to_in.keys():
214
+ name_group = assign_out_to_in[out_name]
215
+ for n1 in name_group:
216
+ assign_groups[n1].append(out_name)
217
+ for n2 in name_group:
218
+ if n1 != n2:
219
+ assign_groups[n1].append(n2)
220
+
221
+ seen_tensors = {}
222
+ stack = [x.name for x in outputs]
223
+ while stack:
224
+ name = stack.pop()
225
+ if name in seen_tensors:
226
+ continue
227
+ seen_tensors[name] = True
228
+
229
+ if name in output_to_op:
230
+ for op_name in output_to_op[name]:
231
+ if op_name in op_to_all:
232
+ for input_name in op_to_all[op_name]:
233
+ if input_name not in stack:
234
+ stack.append(input_name)
235
+
236
+ expanded_names = []
237
+ if name in assign_groups:
238
+ for assign_name in assign_groups[name]:
239
+ expanded_names.append(assign_name)
240
+
241
+ for expanded_name in expanded_names:
242
+ if expanded_name not in stack:
243
+ stack.append(expanded_name)
244
+
245
+ unreachable_ops = []
246
+ for op in graph.get_operations():
247
+ is_unreachable = False
248
+ all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
249
+ for name in all_names:
250
+ if name not in seen_tensors:
251
+ is_unreachable = True
252
+ if is_unreachable:
253
+ unreachable_ops.append(op)
254
+ return unreachable_ops
255
+
256
+ @classmethod
257
+ def flatten_recursive(cls, item):
258
+ """Flattens (potentially nested) a tuple/dictionary/list to a list."""
259
+ output = []
260
+ if isinstance(item, list):
261
+ output.extend(item)
262
+ elif isinstance(item, tuple):
263
+ output.extend(list(item))
264
+ elif isinstance(item, dict):
265
+ for (_, v) in six.iteritems(item):
266
+ output.append(v)
267
+ else:
268
+ return [item]
269
+
270
+ flat_output = []
271
+ for x in output:
272
+ flat_output.extend(cls.flatten_recursive(x))
273
+ return flat_output
274
+
275
+
276
+ if __name__ == "__main__":
277
+ tf.test.main()
RIS-DMMI/bert/modeling_utils.py ADDED
@@ -0,0 +1,1268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import inspect
18
+ import logging
19
+ import os
20
+ from typing import Callable, Dict, List, Optional, Tuple
21
+
22
+ import torch
23
+ from torch import Tensor, device, dtype, nn
24
+ from torch.nn import CrossEntropyLoss
25
+ from torch.nn import functional as F
26
+
27
+ from .activations import get_activation
28
+ from .configuration_utils import PretrainedConfig
29
+ from .file_utils import (
30
+ DUMMY_INPUTS,
31
+ TF2_WEIGHTS_NAME,
32
+ TF_WEIGHTS_NAME,
33
+ WEIGHTS_NAME,
34
+ cached_path,
35
+ hf_bucket_url,
36
+ is_remote_url,
37
+ )
38
+ from .generation_utils import GenerationMixin
39
+
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ try:
45
+ from torch.nn import Identity
46
+ except ImportError:
47
+ # Older PyTorch compatibility
48
+ class Identity(nn.Module):
49
+ r"""A placeholder identity operator that is argument-insensitive.
50
+ """
51
+
52
+ def __init__(self, *args, **kwargs):
53
+ super().__init__()
54
+
55
+ def forward(self, input):
56
+ return input
57
+
58
+
59
+ def find_pruneable_heads_and_indices(
60
+ heads: List, n_heads: int, head_size: int, already_pruned_heads: set
61
+ ) -> Tuple[set, "torch.LongTensor"]:
62
+ mask = torch.ones(n_heads, head_size)
63
+ heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
64
+ for head in heads:
65
+ # Compute how many pruned heads are before the head and move the index accordingly
66
+ head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
67
+ mask[head] = 0
68
+ mask = mask.view(-1).contiguous().eq(1)
69
+ index: torch.LongTensor = torch.arange(len(mask))[mask].long()
70
+ return heads, index
71
+
72
+
73
+ class ModuleUtilsMixin:
74
+ """
75
+ A few utilities for torch.nn.Modules, to be used as a mixin.
76
+ """
77
+
78
+ def num_parameters(self, only_trainable: bool = False) -> int:
79
+ """
80
+ Get number of (optionally, trainable) parameters in the module.
81
+ """
82
+ params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
83
+ return sum(p.numel() for p in params)
84
+
85
+ @staticmethod
86
+ def _hook_rss_memory_pre_forward(module, *args, **kwargs):
87
+ try:
88
+ import psutil
89
+ except (ImportError):
90
+ raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
91
+
92
+ process = psutil.Process(os.getpid())
93
+ mem = process.memory_info()
94
+ module.mem_rss_pre_forward = mem.rss
95
+ return None
96
+
97
+ @staticmethod
98
+ def _hook_rss_memory_post_forward(module, *args, **kwargs):
99
+ try:
100
+ import psutil
101
+ except (ImportError):
102
+ raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
103
+
104
+ process = psutil.Process(os.getpid())
105
+ mem = process.memory_info()
106
+ module.mem_rss_post_forward = mem.rss
107
+ mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
108
+ module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
109
+ return None
110
+
111
+ def add_memory_hooks(self):
112
+ """ Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
113
+ Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero with `model.reset_memory_hooks_state()`
114
+ """
115
+ for module in self.modules():
116
+ module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
117
+ module.register_forward_hook(self._hook_rss_memory_post_forward)
118
+ self.reset_memory_hooks_state()
119
+
120
+ def reset_memory_hooks_state(self):
121
+ for module in self.modules():
122
+ module.mem_rss_diff = 0
123
+ module.mem_rss_post_forward = 0
124
+ module.mem_rss_pre_forward = 0
125
+
126
+ @property
127
+ def device(self) -> device:
128
+ """
129
+ Get torch.device from module, assuming that the whole module has one device.
130
+ """
131
+ try:
132
+ return next(self.parameters()).device
133
+ except StopIteration:
134
+ # For nn.DataParallel compatibility in PyTorch 1.5
135
+
136
+ def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
137
+ tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
138
+ return tuples
139
+
140
+ gen = self._named_members(get_members_fn=find_tensor_attributes)
141
+ first_tuple = next(gen)
142
+ return first_tuple[1].device
143
+
144
+ @property
145
+ def dtype(self) -> dtype:
146
+ """
147
+ Get torch.dtype from module, assuming that the whole module has one dtype.
148
+ """
149
+ try:
150
+ return next(self.parameters()).dtype
151
+ except StopIteration:
152
+ # For nn.DataParallel compatibility in PyTorch 1.5
153
+
154
+ def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
155
+ tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
156
+ return tuples
157
+
158
+ gen = self._named_members(get_members_fn=find_tensor_attributes)
159
+ first_tuple = next(gen)
160
+ return first_tuple[1].dtype
161
+
162
+ def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
163
+ """type: torch.Tensor -> torch.Tensor"""
164
+ if encoder_attention_mask.dim() == 3:
165
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
166
+ if encoder_attention_mask.dim() == 2:
167
+ encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
168
+ # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
169
+ # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
170
+ # /transformer/transformer_layers.py#L270
171
+ # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
172
+ # encoder_extended_attention_mask.transpose(-1, -2))
173
+ encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
174
+
175
+ if self.dtype == torch.float16:
176
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
177
+ elif self.dtype == torch.float32:
178
+ encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
179
+ else:
180
+ raise ValueError(
181
+ "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
182
+ self.dtype
183
+ )
184
+ )
185
+
186
+ return encoder_extended_attention_mask
187
+
188
+ def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple, device: device) -> Tensor:
189
+ """Makes broadcastable attention mask and causal mask so that future and maked tokens are ignored.
190
+
191
+ Arguments:
192
+ attention_mask: torch.Tensor with 1 indicating tokens to ATTEND to
193
+ input_shape: tuple, shape of input_ids
194
+ device: torch.Device, usually self.device
195
+
196
+ Returns:
197
+ torch.Tensor with dtype of attention_mask.dtype
198
+ """
199
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
200
+ # ourselves in which case we just need to make it broadcastable to all heads.
201
+ if attention_mask.dim() == 3:
202
+ extended_attention_mask = attention_mask[:, None, :, :]
203
+ elif attention_mask.dim() == 2:
204
+ # Provided a padding mask of dimensions [batch_size, seq_length]
205
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
206
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
207
+ if self.config.is_decoder:
208
+ batch_size, seq_length = input_shape
209
+ seq_ids = torch.arange(seq_length, device=device)
210
+ causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
211
+ # causal and attention masks must have same type with pytorch version < 1.3
212
+ causal_mask = causal_mask.to(attention_mask.dtype)
213
+ extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
214
+ else:
215
+ extended_attention_mask = attention_mask[:, None, None, :]
216
+ else:
217
+ raise ValueError(
218
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
219
+ input_shape, attention_mask.shape
220
+ )
221
+ )
222
+
223
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
224
+ # masked positions, this operation will create a tensor which is 0.0 for
225
+ # positions we want to attend and -10000.0 for masked positions.
226
+ # Since we are adding it to the raw scores before the softmax, this is
227
+ # effectively the same as removing these entirely.
228
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
229
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
230
+ return extended_attention_mask
231
+
232
+ def get_head_mask(self, head_mask: Tensor, num_hidden_layers: int, is_attention_chunked: bool = False) -> Tensor:
233
+ """
234
+ # Prepare head mask if needed
235
+ # 1.0 in head_mask indicate we keep the head
236
+ attention_probs has shape bsz x n_heads x N x N
237
+ Arguments:
238
+ head_mask: torch.Tensor or None: has shape [num_heads] or [num_hidden_layers x num_heads]
239
+ num_hidden_layers: int
240
+ Returns:
241
+ Tensor of shape shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
242
+ or list with [None] for each layer
243
+ """
244
+ if head_mask is not None:
245
+ head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
246
+ if is_attention_chunked is True:
247
+ head_mask = head_mask.unsqueeze(-1)
248
+ else:
249
+ head_mask = [None] * num_hidden_layers
250
+
251
+ return head_mask
252
+
253
+ def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
254
+ """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
255
+ if head_mask.dim() == 1:
256
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
257
+ head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
258
+ elif head_mask.dim() == 2:
259
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
260
+ assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
261
+ head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility
262
+ return head_mask
263
+
264
+
265
+ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
266
+ r""" Base class for all models.
267
+
268
+ :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
269
+ as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
270
+
271
+ Class attributes (overridden by derived classes):
272
+ - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
273
+ - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
274
+
275
+ - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
276
+ - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
277
+ - ``path``: a path (string) to the TensorFlow checkpoint.
278
+
279
+ - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
280
+ """
281
+ config_class = None
282
+ base_model_prefix = ""
283
+
284
+ @property
285
+ def dummy_inputs(self):
286
+ """ Dummy inputs to do a forward pass in the network.
287
+
288
+ Returns:
289
+ torch.Tensor with dummy inputs
290
+ """
291
+ return {"input_ids": torch.tensor(DUMMY_INPUTS)}
292
+
293
+ def __init__(self, config, *inputs, **kwargs):
294
+ super().__init__()
295
+ if not isinstance(config, PretrainedConfig):
296
+ raise ValueError(
297
+ "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
298
+ "To create a model from a pretrained model use "
299
+ "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
300
+ self.__class__.__name__, self.__class__.__name__
301
+ )
302
+ )
303
+ # Save config in model
304
+ self.config = config
305
+
306
+ @property
307
+ def base_model(self):
308
+ return getattr(self, self.base_model_prefix, self)
309
+
310
+ def get_input_embeddings(self):
311
+ """
312
+ Returns the model's input embeddings.
313
+
314
+ Returns:
315
+ :obj:`nn.Module`:
316
+ A torch module mapping vocabulary to hidden states.
317
+ """
318
+ base_model = getattr(self, self.base_model_prefix, self)
319
+ if base_model is not self:
320
+ return base_model.get_input_embeddings()
321
+ else:
322
+ raise NotImplementedError
323
+
324
+ def set_input_embeddings(self, value: nn.Module):
325
+ """
326
+ Set model's input embeddings
327
+
328
+ Args:
329
+ value (:obj:`nn.Module`):
330
+ A module mapping vocabulary to hidden states.
331
+ """
332
+ base_model = getattr(self, self.base_model_prefix, self)
333
+ if base_model is not self:
334
+ base_model.set_input_embeddings(value)
335
+ else:
336
+ raise NotImplementedError
337
+
338
+ def get_output_embeddings(self):
339
+ """
340
+ Returns the model's output embeddings.
341
+
342
+ Returns:
343
+ :obj:`nn.Module`:
344
+ A torch module mapping hidden states to vocabulary.
345
+ """
346
+ return None # Overwrite for models with output embeddings
347
+
348
+ def tie_weights(self):
349
+ """
350
+ Tie the weights between the input embeddings and the output embeddings.
351
+ If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
352
+ the weights instead.
353
+ """
354
+ output_embeddings = self.get_output_embeddings()
355
+ if output_embeddings is not None:
356
+ self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
357
+
358
+ def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
359
+ """ Tie or clone module weights depending of whether we are using TorchScript or not
360
+ """
361
+ if self.config.torchscript:
362
+ output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
363
+ else:
364
+ output_embeddings.weight = input_embeddings.weight
365
+
366
+ if getattr(output_embeddings, "bias", None) is not None:
367
+ output_embeddings.bias.data = torch.nn.functional.pad(
368
+ output_embeddings.bias.data,
369
+ (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
370
+ "constant",
371
+ 0,
372
+ )
373
+ if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
374
+ output_embeddings.out_features = input_embeddings.num_embeddings
375
+
376
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None):
377
+ """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
378
+ Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
379
+
380
+ Arguments:
381
+
382
+ new_num_tokens: (`optional`) int:
383
+ New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
384
+ If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
385
+
386
+ Return: ``torch.nn.Embeddings``
387
+ Pointer to the input tokens Embeddings Module of the model
388
+ """
389
+ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
390
+ model_embeds = base_model._resize_token_embeddings(new_num_tokens)
391
+ if new_num_tokens is None:
392
+ return model_embeds
393
+
394
+ # Update base model and current model config
395
+ self.config.vocab_size = new_num_tokens
396
+ base_model.vocab_size = new_num_tokens
397
+
398
+ # Tie weights again if needed
399
+ self.tie_weights()
400
+
401
+ return model_embeds
402
+
403
+ def _resize_token_embeddings(self, new_num_tokens):
404
+ old_embeddings = self.get_input_embeddings()
405
+ new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
406
+ self.set_input_embeddings(new_embeddings)
407
+ return self.get_input_embeddings()
408
+
409
+ def _get_resized_embeddings(
410
+ self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
411
+ ) -> torch.nn.Embedding:
412
+ """ Build a resized Embedding Module from a provided token Embedding Module.
413
+ Increasing the size will add newly initialized vectors at the end
414
+ Reducing the size will remove vectors from the end
415
+
416
+ Args:
417
+ old_embeddings: ``torch.nn.Embedding``
418
+ Old embeddings to be resized.
419
+ new_num_tokens: (`optional`) int
420
+ New number of tokens in the embedding matrix.
421
+ Increasing the size will add newly initialized vectors at the end
422
+ Reducing the size will remove vectors from the end
423
+ If not provided or None: return the provided token Embedding Module.
424
+ Return: ``torch.nn.Embedding``
425
+ Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
426
+ """
427
+ if new_num_tokens is None:
428
+ return old_embeddings
429
+
430
+ old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
431
+ if old_num_tokens == new_num_tokens:
432
+ return old_embeddings
433
+
434
+ # Build new embeddings
435
+ new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
436
+ new_embeddings.to(old_embeddings.weight.device)
437
+
438
+ # initialize all new embeddings (in particular added tokens)
439
+ self._init_weights(new_embeddings)
440
+
441
+ # Copy token embeddings from the previous weights
442
+ num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
443
+ new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
444
+
445
+ return new_embeddings
446
+
447
+ def init_weights(self):
448
+ """ Initialize and prunes weights if needed. """
449
+ # Initialize weights
450
+ self.apply(self._init_weights)
451
+
452
+ # Prune heads if needed
453
+ if self.config.pruned_heads:
454
+ self.prune_heads(self.config.pruned_heads)
455
+
456
+ # Tie weights if needed
457
+ self.tie_weights()
458
+
459
+ def prune_heads(self, heads_to_prune: Dict):
460
+ """ Prunes heads of the base model.
461
+
462
+ Arguments:
463
+
464
+ heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
465
+ E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
466
+ """
467
+ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
468
+ for layer, heads in heads_to_prune.items():
469
+ union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
470
+ self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
471
+
472
+ self.base_model._prune_heads(heads_to_prune)
473
+
474
+ def save_pretrained(self, save_directory):
475
+ """ Save a model and its configuration file to a directory, so that it
476
+ can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
477
+
478
+ Arguments:
479
+ save_directory: directory to which to save.
480
+ """
481
+ if os.path.isfile(save_directory):
482
+ logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
483
+ return
484
+ os.makedirs(save_directory, exist_ok=True)
485
+
486
+ # Only save the model itself if we are using distributed training
487
+ model_to_save = self.module if hasattr(self, "module") else self
488
+
489
+ # Attach architecture to the config
490
+ model_to_save.config.architectures = [model_to_save.__class__.__name__]
491
+
492
+ # If we save using the predefined names, we can load using `from_pretrained`
493
+ output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
494
+
495
+ if getattr(self.config, "xla_device", False):
496
+ import torch_xla.core.xla_model as xm
497
+
498
+ if xm.is_master_ordinal():
499
+ # Save configuration file
500
+ model_to_save.config.save_pretrained(save_directory)
501
+ # xm.save takes care of saving only from master
502
+ xm.save(model_to_save.state_dict(), output_model_file)
503
+ else:
504
+ model_to_save.config.save_pretrained(save_directory)
505
+ torch.save(model_to_save.state_dict(), output_model_file)
506
+
507
+ logger.info("Model weights saved in {}".format(output_model_file))
508
+
509
+ @classmethod
510
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
511
+ r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
512
+
513
+ The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
514
+ To train the model, you should first set it back in training mode with ``model.train()``
515
+
516
+ The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
517
+ It is up to you to train those weights with a downstream fine-tuning task.
518
+
519
+ The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
520
+
521
+ Parameters:
522
+ pretrained_model_name_or_path: either:
523
+ - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
524
+ - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
525
+ - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
526
+ - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
527
+ - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
528
+
529
+ model_args: (`optional`) Sequence of positional arguments:
530
+ All remaning positional arguments will be passed to the underlying model's ``__init__`` method
531
+
532
+ config: (`optional`) one of:
533
+ - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
534
+ - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
535
+
536
+ Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
537
+ - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
538
+ - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
539
+ - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
540
+
541
+ state_dict: (`optional`) dict:
542
+ an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
543
+ This option can be used if you want to create a model from a pretrained configuration but load your own weights.
544
+ In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
545
+
546
+ cache_dir: (`optional`) string:
547
+ Path to a directory in which a downloaded pre-trained model
548
+ configuration should be cached if the standard cache should not be used.
549
+
550
+ force_download: (`optional`) boolean, default False:
551
+ Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
552
+
553
+ resume_download: (`optional`) boolean, default False:
554
+ Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
555
+
556
+ proxies: (`optional`) dict, default None:
557
+ A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
558
+ The proxies are used on each request.
559
+
560
+ output_loading_info: (`optional`) boolean:
561
+ Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
562
+
563
+ kwargs: (`optional`) Remaining dictionary of keyword arguments:
564
+ Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
565
+
566
+ - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
567
+ - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
568
+
569
+ Examples::
570
+
571
+ # For example purposes. Not runnable.
572
+ model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
573
+ model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
574
+ model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
575
+ assert model.config.output_attention == True
576
+ # Loading from a TF checkpoint file instead of a PyTorch model (slower)
577
+ config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
578
+ model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
579
+
580
+ """
581
+ config = kwargs.pop("config", None)
582
+ state_dict = kwargs.pop("state_dict", None)
583
+ cache_dir = kwargs.pop("cache_dir", None)
584
+ from_tf = kwargs.pop("from_tf", False)
585
+ force_download = kwargs.pop("force_download", False)
586
+ resume_download = kwargs.pop("resume_download", False)
587
+ proxies = kwargs.pop("proxies", None)
588
+ output_loading_info = kwargs.pop("output_loading_info", False)
589
+ local_files_only = kwargs.pop("local_files_only", False)
590
+ use_cdn = kwargs.pop("use_cdn", True)
591
+
592
+ # Load config if we don't provide a configuration
593
+ if not isinstance(config, PretrainedConfig):
594
+ config_path = config if config is not None else pretrained_model_name_or_path
595
+ config, model_kwargs = cls.config_class.from_pretrained(
596
+ config_path,
597
+ *model_args,
598
+ cache_dir=cache_dir,
599
+ return_unused_kwargs=True,
600
+ force_download=force_download,
601
+ resume_download=resume_download,
602
+ proxies=proxies,
603
+ local_files_only=local_files_only,
604
+ **kwargs,
605
+ )
606
+ else:
607
+ model_kwargs = kwargs
608
+
609
+ # Load model
610
+ if pretrained_model_name_or_path is not None:
611
+ if os.path.isdir(pretrained_model_name_or_path):
612
+ if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
613
+ # Load from a TF 1.0 checkpoint
614
+ archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
615
+ elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
616
+ # Load from a TF 2.0 checkpoint
617
+ archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
618
+ elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
619
+ # Load from a PyTorch checkpoint
620
+ archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
621
+ else:
622
+ raise EnvironmentError(
623
+ "Error no file named {} found in directory {} or `from_tf` set to False".format(
624
+ [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
625
+ pretrained_model_name_or_path,
626
+ )
627
+ )
628
+ elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
629
+ archive_file = pretrained_model_name_or_path
630
+ elif os.path.isfile(pretrained_model_name_or_path + ".index"):
631
+ assert (
632
+ from_tf
633
+ ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
634
+ pretrained_model_name_or_path + ".index"
635
+ )
636
+ archive_file = pretrained_model_name_or_path + ".index"
637
+ else:
638
+ archive_file = hf_bucket_url(
639
+ pretrained_model_name_or_path,
640
+ filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
641
+ use_cdn=use_cdn,
642
+ )
643
+
644
+ try:
645
+ # Load from URL or cache if already cached
646
+ resolved_archive_file = cached_path(
647
+ archive_file,
648
+ cache_dir=cache_dir,
649
+ force_download=force_download,
650
+ proxies=proxies,
651
+ resume_download=resume_download,
652
+ local_files_only=local_files_only,
653
+ )
654
+ if resolved_archive_file is None:
655
+ raise EnvironmentError
656
+ except EnvironmentError:
657
+ msg = (
658
+ f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
659
+ f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
660
+ f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
661
+ )
662
+ raise EnvironmentError(msg)
663
+
664
+ if resolved_archive_file == archive_file:
665
+ logger.info("loading weights file {}".format(archive_file))
666
+ else:
667
+ logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
668
+ else:
669
+ resolved_archive_file = None
670
+
671
+ # Instantiate model.
672
+ model = cls(config, *model_args, **model_kwargs)
673
+
674
+ if state_dict is None and not from_tf:
675
+ try:
676
+ state_dict = torch.load(resolved_archive_file, map_location="cpu")
677
+ except Exception:
678
+ raise OSError(
679
+ "Unable to load weights from pytorch checkpoint file. "
680
+ "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
681
+ )
682
+
683
+ missing_keys = []
684
+ unexpected_keys = []
685
+ error_msgs = []
686
+
687
+ if from_tf:
688
+ if resolved_archive_file.endswith(".index"):
689
+ # Load from a TensorFlow 1.X checkpoint - provided by original authors
690
+ model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
691
+ else:
692
+ # Load from our TensorFlow 2.0 checkpoints
693
+ try:
694
+ from transformers import load_tf2_checkpoint_in_pytorch_model
695
+
696
+ model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
697
+ except ImportError:
698
+ logger.error(
699
+ "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
700
+ "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
701
+ )
702
+ raise
703
+ else:
704
+ # Convert old format to new format if needed from a PyTorch state_dict
705
+ old_keys = []
706
+ new_keys = []
707
+ for key in state_dict.keys():
708
+ new_key = None
709
+ if "gamma" in key:
710
+ new_key = key.replace("gamma", "weight")
711
+ if "beta" in key:
712
+ new_key = key.replace("beta", "bias")
713
+ if new_key:
714
+ old_keys.append(key)
715
+ new_keys.append(new_key)
716
+ for old_key, new_key in zip(old_keys, new_keys):
717
+ state_dict[new_key] = state_dict.pop(old_key)
718
+
719
+ # copy state_dict so _load_from_state_dict can modify it
720
+ metadata = getattr(state_dict, "_metadata", None)
721
+ state_dict = state_dict.copy()
722
+ if metadata is not None:
723
+ state_dict._metadata = metadata
724
+
725
+ ##############################################################################################
726
+ # Print out state_dict's contents: keys
727
+ '''
728
+ for key, _ in state_dict.items():
729
+ print(key)
730
+ '''
731
+ ##############################################################################################
732
+
733
+
734
+ # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
735
+ # so we need to apply the function recursively.
736
+ def load(module: nn.Module, prefix=""):
737
+ local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
738
+ module._load_from_state_dict(
739
+ state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
740
+ )
741
+ for name, child in module._modules.items():
742
+ if child is not None:
743
+ load(child, prefix + name + ".")
744
+
745
+ # Make sure we are able to load base models as well as derived models (with heads)
746
+ start_prefix = ""
747
+ model_to_load = model
748
+ has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
749
+ if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
750
+ start_prefix = cls.base_model_prefix + "."
751
+ if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
752
+ model_to_load = getattr(model, cls.base_model_prefix)
753
+
754
+ load(model_to_load, prefix=start_prefix)
755
+
756
+ if model.__class__.__name__ != model_to_load.__class__.__name__:
757
+ base_model_state_dict = model_to_load.state_dict().keys()
758
+ head_model_state_dict_without_base_prefix = [
759
+ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
760
+ ]
761
+
762
+ missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
763
+
764
+ if len(unexpected_keys) > 0:
765
+ logger.warning(
766
+ f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
767
+ f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
768
+ f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
769
+ f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n"
770
+ f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
771
+ f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
772
+ )
773
+ else:
774
+ logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
775
+ if len(missing_keys) > 0:
776
+ logger.warning(
777
+ f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
778
+ f"and are newly initialized: {missing_keys}\n"
779
+ f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
780
+ )
781
+ else:
782
+ logger.info(
783
+ f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
784
+ f"If your task is similar to the task the model of the ckeckpoint was trained on, "
785
+ f"you can already use {model.__class__.__name__} for predictions without further training."
786
+ )
787
+ if len(error_msgs) > 0:
788
+ raise RuntimeError(
789
+ "Error(s) in loading state_dict for {}:\n\t{}".format(
790
+ model.__class__.__name__, "\n\t".join(error_msgs)
791
+ )
792
+ )
793
+ model.tie_weights() # make sure token embedding weights are still tied if needed
794
+
795
+ # Set model in evaluation mode to deactivate DropOut modules by default
796
+ model.eval()
797
+
798
+ if output_loading_info:
799
+ loading_info = {
800
+ "missing_keys": missing_keys,
801
+ "unexpected_keys": unexpected_keys,
802
+ "error_msgs": error_msgs,
803
+ }
804
+ return model, loading_info
805
+
806
+ if hasattr(config, "xla_device") and config.xla_device:
807
+ import torch_xla.core.xla_model as xm
808
+
809
+ model = xm.send_cpu_data_to_device(model, xm.xla_device())
810
+ model.to(xm.xla_device())
811
+
812
+ return model
813
+
814
+
815
+ class Conv1D(nn.Module):
816
+ def __init__(self, nf, nx):
817
+ """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
818
+ Basically works like a Linear layer but the weights are transposed
819
+ """
820
+ super().__init__()
821
+ self.nf = nf
822
+ w = torch.empty(nx, nf)
823
+ nn.init.normal_(w, std=0.02)
824
+ self.weight = nn.Parameter(w)
825
+ self.bias = nn.Parameter(torch.zeros(nf))
826
+
827
+ def forward(self, x):
828
+ size_out = x.size()[:-1] + (self.nf,)
829
+ x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
830
+ x = x.view(*size_out)
831
+ return x
832
+
833
+
834
+ class PoolerStartLogits(nn.Module):
835
+ """ Compute SQuAD start_logits from sequence hidden states. """
836
+
837
+ def __init__(self, config):
838
+ super().__init__()
839
+ self.dense = nn.Linear(config.hidden_size, 1)
840
+
841
+ def forward(self, hidden_states, p_mask=None):
842
+ """ Args:
843
+ **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
844
+ invalid position mask such as query and special symbols (PAD, SEP, CLS)
845
+ 1.0 means token should be masked.
846
+ """
847
+ x = self.dense(hidden_states).squeeze(-1)
848
+
849
+ if p_mask is not None:
850
+ if next(self.parameters()).dtype == torch.float16:
851
+ x = x * (1 - p_mask) - 65500 * p_mask
852
+ else:
853
+ x = x * (1 - p_mask) - 1e30 * p_mask
854
+
855
+ return x
856
+
857
+
858
+ class PoolerEndLogits(nn.Module):
859
+ """ Compute SQuAD end_logits from sequence hidden states and start token hidden state.
860
+ """
861
+
862
+ def __init__(self, config):
863
+ super().__init__()
864
+ self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
865
+ self.activation = nn.Tanh()
866
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
867
+ self.dense_1 = nn.Linear(config.hidden_size, 1)
868
+
869
+ def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
870
+ """ Args:
871
+ One of ``start_states``, ``start_positions`` should be not None.
872
+ If both are set, ``start_positions`` overrides ``start_states``.
873
+
874
+ **start_states**: ``torch.LongTensor`` of shape identical to hidden_states
875
+ hidden states of the first tokens for the labeled span.
876
+ **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
877
+ position of the first token for the labeled span:
878
+ **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
879
+ Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
880
+ 1.0 means token should be masked.
881
+ """
882
+ assert (
883
+ start_states is not None or start_positions is not None
884
+ ), "One of start_states, start_positions should be not None"
885
+ if start_positions is not None:
886
+ slen, hsz = hidden_states.shape[-2:]
887
+ start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
888
+ start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
889
+ start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
890
+
891
+ x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
892
+ x = self.activation(x)
893
+ x = self.LayerNorm(x)
894
+ x = self.dense_1(x).squeeze(-1)
895
+
896
+ if p_mask is not None:
897
+ if next(self.parameters()).dtype == torch.float16:
898
+ x = x * (1 - p_mask) - 65500 * p_mask
899
+ else:
900
+ x = x * (1 - p_mask) - 1e30 * p_mask
901
+
902
+ return x
903
+
904
+
905
+ class PoolerAnswerClass(nn.Module):
906
+ """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
907
+
908
+ def __init__(self, config):
909
+ super().__init__()
910
+ self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
911
+ self.activation = nn.Tanh()
912
+ self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
913
+
914
+ def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
915
+ """
916
+ Args:
917
+ One of ``start_states``, ``start_positions`` should be not None.
918
+ If both are set, ``start_positions`` overrides ``start_states``.
919
+
920
+ **start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
921
+ hidden states of the first tokens for the labeled span.
922
+ **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
923
+ position of the first token for the labeled span.
924
+ **cls_index**: torch.LongTensor of shape ``(batch_size,)``
925
+ position of the CLS token. If None, take the last token.
926
+
927
+ note(Original repo):
928
+ no dependency on end_feature so that we can obtain one single `cls_logits`
929
+ for each sample
930
+ """
931
+ hsz = hidden_states.shape[-1]
932
+ assert (
933
+ start_states is not None or start_positions is not None
934
+ ), "One of start_states, start_positions should be not None"
935
+ if start_positions is not None:
936
+ start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
937
+ start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
938
+
939
+ if cls_index is not None:
940
+ cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
941
+ cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
942
+ else:
943
+ cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
944
+
945
+ x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
946
+ x = self.activation(x)
947
+ x = self.dense_1(x).squeeze(-1)
948
+
949
+ return x
950
+
951
+
952
+ class SQuADHead(nn.Module):
953
+ r""" A SQuAD head inspired by XLNet.
954
+
955
+ Parameters:
956
+ config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
957
+
958
+ Inputs:
959
+ **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
960
+ hidden states of sequence tokens
961
+ **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
962
+ position of the first token for the labeled span.
963
+ **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
964
+ position of the last token for the labeled span.
965
+ **cls_index**: torch.LongTensor of shape ``(batch_size,)``
966
+ position of the CLS token. If None, take the last token.
967
+ **is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
968
+ Whether the question has a possible answer in the paragraph or not.
969
+ **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
970
+ Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
971
+ 1.0 means token should be masked.
972
+
973
+ Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
974
+ **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
975
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
976
+ **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
977
+ ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
978
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
979
+ **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
980
+ ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
981
+ Indices for the top config.start_n_top start token possibilities (beam-search).
982
+ **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
983
+ ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
984
+ Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
985
+ **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
986
+ ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
987
+ Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
988
+ **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
989
+ ``torch.FloatTensor`` of shape ``(batch_size,)``
990
+ Log probabilities for the ``is_impossible`` label of the answers.
991
+ """
992
+
993
+ def __init__(self, config):
994
+ super().__init__()
995
+ self.start_n_top = config.start_n_top
996
+ self.end_n_top = config.end_n_top
997
+
998
+ self.start_logits = PoolerStartLogits(config)
999
+ self.end_logits = PoolerEndLogits(config)
1000
+ self.answer_class = PoolerAnswerClass(config)
1001
+
1002
+ def forward(
1003
+ self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None,
1004
+ ):
1005
+ outputs = ()
1006
+
1007
+ start_logits = self.start_logits(hidden_states, p_mask=p_mask)
1008
+
1009
+ if start_positions is not None and end_positions is not None:
1010
+ # If we are on multi-GPU, let's remove the dimension added by batch splitting
1011
+ for x in (start_positions, end_positions, cls_index, is_impossible):
1012
+ if x is not None and x.dim() > 1:
1013
+ x.squeeze_(-1)
1014
+
1015
+ # during training, compute the end logits based on the ground truth of the start position
1016
+ end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
1017
+
1018
+ loss_fct = CrossEntropyLoss()
1019
+ start_loss = loss_fct(start_logits, start_positions)
1020
+ end_loss = loss_fct(end_logits, end_positions)
1021
+ total_loss = (start_loss + end_loss) / 2
1022
+
1023
+ if cls_index is not None and is_impossible is not None:
1024
+ # Predict answerability from the representation of CLS and START
1025
+ cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
1026
+ loss_fct_cls = nn.BCEWithLogitsLoss()
1027
+ cls_loss = loss_fct_cls(cls_logits, is_impossible)
1028
+
1029
+ # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
1030
+ total_loss += cls_loss * 0.5
1031
+
1032
+ outputs = (total_loss,) + outputs
1033
+
1034
+ else:
1035
+ # during inference, compute the end logits based on beam search
1036
+ bsz, slen, hsz = hidden_states.size()
1037
+ start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
1038
+
1039
+ start_top_log_probs, start_top_index = torch.topk(
1040
+ start_log_probs, self.start_n_top, dim=-1
1041
+ ) # shape (bsz, start_n_top)
1042
+ start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
1043
+ start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
1044
+ start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
1045
+
1046
+ hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
1047
+ start_states
1048
+ ) # shape (bsz, slen, start_n_top, hsz)
1049
+ p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
1050
+ end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1051
+ end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
1052
+
1053
+ end_top_log_probs, end_top_index = torch.topk(
1054
+ end_log_probs, self.end_n_top, dim=1
1055
+ ) # shape (bsz, end_n_top, start_n_top)
1056
+ end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
1057
+ end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
1058
+
1059
+ start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
1060
+ cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
1061
+
1062
+ outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits,) + outputs
1063
+
1064
+ # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
1065
+ # or (if labels are provided) (total_loss,)
1066
+ return outputs
1067
+
1068
+
1069
+ class SequenceSummary(nn.Module):
1070
+ r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
1071
+ Args of the config class:
1072
+ summary_type:
1073
+ - 'last' => [default] take the last token hidden state (like XLNet)
1074
+ - 'first' => take the first token hidden state (like Bert)
1075
+ - 'mean' => take the mean of all tokens hidden states
1076
+ - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
1077
+ - 'attn' => Not implemented now, use multi-head attention
1078
+ summary_use_proj: Add a projection after the vector extraction
1079
+ summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
1080
+ summary_activation: 'tanh' or another string => add an activation to the output, Other => no activation. Default
1081
+ summary_first_dropout: Add a dropout before the projection and activation
1082
+ summary_last_dropout: Add a dropout after the projection and activation
1083
+ """
1084
+
1085
+ def __init__(self, config: PretrainedConfig):
1086
+ super().__init__()
1087
+
1088
+ self.summary_type = getattr(config, "summary_type", "last")
1089
+ if self.summary_type == "attn":
1090
+ # We should use a standard multi-head attention module with absolute positional embedding for that.
1091
+ # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
1092
+ # We can probably just use the multi-head attention module of PyTorch >=1.1.0
1093
+ raise NotImplementedError
1094
+
1095
+ self.summary = Identity()
1096
+ if hasattr(config, "summary_use_proj") and config.summary_use_proj:
1097
+ if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1098
+ num_classes = config.num_labels
1099
+ else:
1100
+ num_classes = config.hidden_size
1101
+ self.summary = nn.Linear(config.hidden_size, num_classes)
1102
+
1103
+ activation_string = getattr(config, "summary_activation", None)
1104
+ self.activation: Callable = (get_activation(activation_string) if activation_string else Identity())
1105
+
1106
+ self.first_dropout = Identity()
1107
+ if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1108
+ self.first_dropout = nn.Dropout(config.summary_first_dropout)
1109
+
1110
+ self.last_dropout = Identity()
1111
+ if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1112
+ self.last_dropout = nn.Dropout(config.summary_last_dropout)
1113
+
1114
+ def forward(self, hidden_states, cls_index=None):
1115
+ """ hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
1116
+ cls_index: [optional] position of the classification token if summary_type == 'cls_index',
1117
+ shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
1118
+ if summary_type == 'cls_index' and cls_index is None:
1119
+ we take the last token of the sequence as classification token
1120
+ """
1121
+ if self.summary_type == "last":
1122
+ output = hidden_states[:, -1]
1123
+ elif self.summary_type == "first":
1124
+ output = hidden_states[:, 0]
1125
+ elif self.summary_type == "mean":
1126
+ output = hidden_states.mean(dim=1)
1127
+ elif self.summary_type == "cls_index":
1128
+ if cls_index is None:
1129
+ cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long,)
1130
+ else:
1131
+ cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1132
+ cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
1133
+ # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1134
+ output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
1135
+ elif self.summary_type == "attn":
1136
+ raise NotImplementedError
1137
+
1138
+ output = self.first_dropout(output)
1139
+ output = self.summary(output)
1140
+ output = self.activation(output)
1141
+ output = self.last_dropout(output)
1142
+
1143
+ return output
1144
+
1145
+
1146
+ def prune_linear_layer(layer, index, dim=0):
1147
+ """ Prune a linear layer (a model parameters) to keep only entries in index.
1148
+ Return the pruned layer as a new layer with requires_grad=True.
1149
+ Used to remove heads.
1150
+ """
1151
+ index = index.to(layer.weight.device)
1152
+ W = layer.weight.index_select(dim, index).clone().detach()
1153
+ if layer.bias is not None:
1154
+ if dim == 1:
1155
+ b = layer.bias.clone().detach()
1156
+ else:
1157
+ b = layer.bias[index].clone().detach()
1158
+ new_size = list(layer.weight.size())
1159
+ new_size[dim] = len(index)
1160
+ new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
1161
+ new_layer.weight.requires_grad = False
1162
+ new_layer.weight.copy_(W.contiguous())
1163
+ new_layer.weight.requires_grad = True
1164
+ if layer.bias is not None:
1165
+ new_layer.bias.requires_grad = False
1166
+ new_layer.bias.copy_(b.contiguous())
1167
+ new_layer.bias.requires_grad = True
1168
+ return new_layer
1169
+
1170
+
1171
+ def prune_conv1d_layer(layer, index, dim=1):
1172
+ """ Prune a Conv1D layer (a model parameters) to keep only entries in index.
1173
+ A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
1174
+ Return the pruned layer as a new layer with requires_grad=True.
1175
+ Used to remove heads.
1176
+ """
1177
+ index = index.to(layer.weight.device)
1178
+ W = layer.weight.index_select(dim, index).clone().detach()
1179
+ if dim == 0:
1180
+ b = layer.bias.clone().detach()
1181
+ else:
1182
+ b = layer.bias[index].clone().detach()
1183
+ new_size = list(layer.weight.size())
1184
+ new_size[dim] = len(index)
1185
+ new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
1186
+ new_layer.weight.requires_grad = False
1187
+ new_layer.weight.copy_(W.contiguous())
1188
+ new_layer.weight.requires_grad = True
1189
+ new_layer.bias.requires_grad = False
1190
+ new_layer.bias.copy_(b.contiguous())
1191
+ new_layer.bias.requires_grad = True
1192
+ return new_layer
1193
+
1194
+
1195
+ def prune_layer(layer, index, dim=None):
1196
+ """ Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
1197
+ Return the pruned layer as a new layer with requires_grad=True.
1198
+ Used to remove heads.
1199
+ """
1200
+ if isinstance(layer, nn.Linear):
1201
+ return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
1202
+ elif isinstance(layer, Conv1D):
1203
+ return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
1204
+ else:
1205
+ raise ValueError("Can't prune layer of class {}".format(layer.__class__))
1206
+
1207
+
1208
+ def apply_chunking_to_forward(
1209
+ chunk_size: int, chunk_dim: int, forward_fn: Callable[..., torch.Tensor], *input_tensors
1210
+ ) -> torch.Tensor:
1211
+ """
1212
+ This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension `chunk_dim`.
1213
+ It then applies a layer `forward_fn` to each chunk independently to save memory.
1214
+ If the `forward_fn` is independent across the `chunk_dim` this function will yield the
1215
+ same result as not applying it.
1216
+
1217
+ Args:
1218
+ chunk_size: int - the chunk size of a chunked tensor. `num_chunks` = `len(input_tensors[0]) / chunk_size`
1219
+ chunk_dim: int - the dimension over which the input_tensors should be chunked
1220
+ forward_fn: fn - the forward fn of the model
1221
+ input_tensors: tuple(torch.Tensor) - the input tensors of `forward_fn` which are chunked
1222
+ Returns:
1223
+ a Tensor with the same shape the foward_fn would have given if applied
1224
+
1225
+
1226
+ Examples::
1227
+
1228
+ # rename the usual forward() fn to forward_chunk()
1229
+ def forward_chunk(self, hidden_states):
1230
+ hidden_states = self.decoder(hidden_states)
1231
+ return hidden_states
1232
+
1233
+ # implement a chunked forward function
1234
+ def forward(self, hidden_states):
1235
+ return apply_chunking_to_forward(self.chunk_size_lm_head, self.seq_len_dim, self.forward_chunk, hidden_states)
1236
+ """
1237
+
1238
+ assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
1239
+ tensor_shape = input_tensors[0].shape
1240
+ assert all(
1241
+ input_tensor.shape == tensor_shape for input_tensor in input_tensors
1242
+ ), "All input tenors have to be of the same shape"
1243
+
1244
+ # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
1245
+ num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
1246
+ assert num_args_in_forward_chunk_fn == len(
1247
+ input_tensors
1248
+ ), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
1249
+ num_args_in_forward_chunk_fn, len(input_tensors)
1250
+ )
1251
+
1252
+ if chunk_size > 0:
1253
+ assert (
1254
+ input_tensors[0].shape[chunk_dim] % chunk_size == 0
1255
+ ), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
1256
+ input_tensors[0].shape[chunk_dim], chunk_size
1257
+ )
1258
+
1259
+ num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
1260
+
1261
+ # chunk input tensor into tuples
1262
+ input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
1263
+ # apply forward fn to every tuple
1264
+ output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
1265
+ # concatenate output at same dimension
1266
+ return torch.cat(output_chunks, dim=chunk_dim)
1267
+
1268
+ return forward_fn(*input_tensors)
RIS-DMMI/bert/multilingual.md ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Models
2
+
3
+ There are two multilingual models currently available. We do not plan to release
4
+ more single-language models, but we may release `BERT-Large` versions of these
5
+ two in the future:
6
+
7
+ * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**:
8
+ 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
9
+ * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)**:
10
+ 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
11
+ * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**:
12
+ Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M
13
+ parameters
14
+
15
+ **The `Multilingual Cased (New)` model also fixes normalization issues in many
16
+ languages, so it is recommended in languages with non-Latin alphabets (and is
17
+ often better for most languages with Latin alphabets). When using this model,
18
+ make sure to pass `--do_lower_case=false` to `run_pretraining.py` and other
19
+ scripts.**
20
+
21
+ See the [list of languages](#list-of-languages) that the Multilingual model
22
+ supports. The Multilingual model does include Chinese (and English), but if your
23
+ fine-tuning data is Chinese-only, then the Chinese model will likely produce
24
+ better results.
25
+
26
+ ## Results
27
+
28
+ To evaluate these systems, we use the
29
+ [XNLI dataset](https://github.com/facebookresearch/XNLI) dataset, which is a
30
+ version of [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) where the
31
+ dev and test sets have been translated (by humans) into 15 languages. Note that
32
+ the training set was *machine* translated (we used the translations provided by
33
+ XNLI, not Google NMT). For clarity, we only report on 6 languages below:
34
+
35
+ <!-- mdformat off(no table) -->
36
+
37
+ | System | English | Chinese | Spanish | German | Arabic | Urdu |
38
+ | --------------------------------- | -------- | -------- | -------- | -------- | -------- | -------- |
39
+ | XNLI Baseline - Translate Train | 73.7 | 67.0 | 68.8 | 66.5 | 65.8 | 56.6 |
40
+ | XNLI Baseline - Translate Test | 73.7 | 68.3 | 70.7 | 68.7 | 66.8 | 59.3 |
41
+ | BERT - Translate Train Cased | **81.9** | **76.6** | **77.8** | **75.9** | **70.7** | 61.6 |
42
+ | BERT - Translate Train Uncased | 81.4 | 74.2 | 77.3 | 75.2 | 70.5 | 61.7 |
43
+ | BERT - Translate Test Uncased | 81.4 | 70.1 | 74.9 | 74.4 | 70.4 | **62.1** |
44
+ | BERT - Zero Shot Uncased | 81.4 | 63.8 | 74.3 | 70.5 | 62.1 | 58.3 |
45
+
46
+ <!-- mdformat on -->
47
+
48
+ The first two rows are baselines from the XNLI paper and the last three rows are
49
+ our results with BERT.
50
+
51
+ **Translate Train** means that the MultiNLI training set was machine translated
52
+ from English into the foreign language. So training and evaluation were both
53
+ done in the foreign language. Unfortunately, training was done on
54
+ machine-translated data, so it is impossible to quantify how much of the lower
55
+ accuracy (compared to English) is due to the quality of the machine translation
56
+ vs. the quality of the pre-trained model.
57
+
58
+ **Translate Test** means that the XNLI test set was machine translated from the
59
+ foreign language into English. So training and evaluation were both done on
60
+ English. However, test evaluation was done on machine-translated English, so the
61
+ accuracy depends on the quality of the machine translation system.
62
+
63
+ **Zero Shot** means that the Multilingual BERT system was fine-tuned on English
64
+ MultiNLI, and then evaluated on the foreign language XNLI test. In this case,
65
+ machine translation was not involved at all in either the pre-training or
66
+ fine-tuning.
67
+
68
+ Note that the English result is worse than the 84.2 MultiNLI baseline because
69
+ this training used Multilingual BERT rather than English-only BERT. This implies
70
+ that for high-resource languages, the Multilingual model is somewhat worse than
71
+ a single-language model. However, it is not feasible for us to train and
72
+ maintain dozens of single-language models. Therefore, if your goal is to maximize
73
+ performance with a language other than English or Chinese, you might find it
74
+ beneficial to run pre-training for additional steps starting from our
75
+ Multilingual model on data from your language of interest.
76
+
77
+ Here is a comparison of training Chinese models with the Multilingual
78
+ `BERT-Base` and Chinese-only `BERT-Base`:
79
+
80
+ System | Chinese
81
+ ----------------------- | -------
82
+ XNLI Baseline | 67.0
83
+ BERT Multilingual Model | 74.2
84
+ BERT Chinese-only Model | 77.2
85
+
86
+ Similar to English, the single-language model does 3% better than the
87
+ Multilingual model.
88
+
89
+ ## Fine-tuning Example
90
+
91
+ The multilingual model does **not** require any special consideration or API
92
+ changes. We did update the implementation of `BasicTokenizer` in
93
+ `tokenization.py` to support Chinese character tokenization, so please update if
94
+ you forked it. However, we did not change the tokenization API.
95
+
96
+ To test the new models, we did modify `run_classifier.py` to add support for the
97
+ [XNLI dataset](https://github.com/facebookresearch/XNLI). This is a 15-language
98
+ version of MultiNLI where the dev/test sets have been human-translated, and the
99
+ training set has been machine-translated.
100
+
101
+ To run the fine-tuning code, please download the
102
+ [XNLI dev/test set](https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip) and the
103
+ [XNLI machine-translated training set](https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip)
104
+ and then unpack both .zip files into some directory `$XNLI_DIR`.
105
+
106
+ To run fine-tuning on XNLI. The language is hard-coded into `run_classifier.py`
107
+ (Chinese by default), so please modify `XnliProcessor` if you want to run on
108
+ another language.
109
+
110
+ This is a large dataset, so this will training will take a few hours on a GPU
111
+ (or about 30 minutes on a Cloud TPU). To run an experiment quickly for
112
+ debugging, just set `num_train_epochs` to a small value like `0.1`.
113
+
114
+ ```shell
115
+ export BERT_BASE_DIR=/path/to/bert/chinese_L-12_H-768_A-12 # or multilingual_L-12_H-768_A-12
116
+ export XNLI_DIR=/path/to/xnli
117
+
118
+ python run_classifier.py \
119
+ --task_name=XNLI \
120
+ --do_train=true \
121
+ --do_eval=true \
122
+ --data_dir=$XNLI_DIR \
123
+ --vocab_file=$BERT_BASE_DIR/vocab.txt \
124
+ --bert_config_file=$BERT_BASE_DIR/bert_config.json \
125
+ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
126
+ --max_seq_length=128 \
127
+ --train_batch_size=32 \
128
+ --learning_rate=5e-5 \
129
+ --num_train_epochs=2.0 \
130
+ --output_dir=/tmp/xnli_output/
131
+ ```
132
+
133
+ With the Chinese-only model, the results should look something like this:
134
+
135
+ ```
136
+ ***** Eval results *****
137
+ eval_accuracy = 0.774116
138
+ eval_loss = 0.83554
139
+ global_step = 24543
140
+ loss = 0.74603
141
+ ```
142
+
143
+ ## Details
144
+
145
+ ### Data Source and Sampling
146
+
147
+ The languages chosen were the
148
+ [top 100 languages with the largest Wikipedias](https://meta.wikimedia.org/wiki/List_of_Wikipedias).
149
+ The entire Wikipedia dump for each language (excluding user and talk pages) was
150
+ taken as the training data for each language
151
+
152
+ However, the size of the Wikipedia for a given language varies greatly, and
153
+ therefore low-resource languages may be "under-represented" in terms of the
154
+ neural network model (under the assumption that languages are "competing" for
155
+ limited model capacity to some extent). At the same time, we also don't want
156
+ to overfit the model by performing thousands of epochs over a tiny Wikipedia
157
+ for a particular language.
158
+
159
+ To balance these two factors, we performed exponentially smoothed weighting of
160
+ the data during pre-training data creation (and WordPiece vocab creation). In
161
+ other words, let's say that the probability of a language is *P(L)*, e.g.,
162
+ *P(English) = 0.21* means that after concatenating all of the Wikipedias
163
+ together, 21% of our data is English. We exponentiate each probability by some
164
+ factor *S* and then re-normalize, and sample from that distribution. In our case
165
+ we use *S=0.7*. So, high-resource languages like English will be under-sampled,
166
+ and low-resource languages like Icelandic will be over-sampled. E.g., in the
167
+ original distribution English would be sampled 1000x more than Icelandic, but
168
+ after smoothing it's only sampled 100x more.
169
+
170
+ ### Tokenization
171
+
172
+ For tokenization, we use a 110k shared WordPiece vocabulary. The word counts are
173
+ weighted the same way as the data, so low-resource languages are upweighted by
174
+ some factor. We intentionally do *not* use any marker to denote the input
175
+ language (so that zero-shot training can work).
176
+
177
+ Because Chinese (and Japanese Kanji and Korean Hanja) does not have whitespace
178
+ characters, we add spaces around every character in the
179
+ [CJK Unicode range](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_\(Unicode_block\))
180
+ before applying WordPiece. This means that Chinese is effectively
181
+ character-tokenized. Note that the CJK Unicode block only includes
182
+ Chinese-origin characters and does *not* include Hangul Korean or
183
+ Katakana/Hiragana Japanese, which are tokenized with whitespace+WordPiece like
184
+ all other languages.
185
+
186
+ For all other languages, we apply the
187
+ [same recipe as English](https://github.com/google-research/bert#tokenization):
188
+ (a) lower casing+accent removal, (b) punctuation splitting, (c) whitespace
189
+ tokenization. We understand that accent markers have substantial meaning in some
190
+ languages, but felt that the benefits of reducing the effective vocabulary make
191
+ up for this. Generally the strong contextual models of BERT should make up for
192
+ any ambiguity introduced by stripping accent markers.
193
+
194
+ ### List of Languages
195
+
196
+ The multilingual model supports the following languages. These languages were
197
+ chosen because they are the top 100 languages with the largest Wikipedias:
198
+
199
+ * Afrikaans
200
+ * Albanian
201
+ * Arabic
202
+ * Aragonese
203
+ * Armenian
204
+ * Asturian
205
+ * Azerbaijani
206
+ * Bashkir
207
+ * Basque
208
+ * Bavarian
209
+ * Belarusian
210
+ * Bengali
211
+ * Bishnupriya Manipuri
212
+ * Bosnian
213
+ * Breton
214
+ * Bulgarian
215
+ * Burmese
216
+ * Catalan
217
+ * Cebuano
218
+ * Chechen
219
+ * Chinese (Simplified)
220
+ * Chinese (Traditional)
221
+ * Chuvash
222
+ * Croatian
223
+ * Czech
224
+ * Danish
225
+ * Dutch
226
+ * English
227
+ * Estonian
228
+ * Finnish
229
+ * French
230
+ * Galician
231
+ * Georgian
232
+ * German
233
+ * Greek
234
+ * Gujarati
235
+ * Haitian
236
+ * Hebrew
237
+ * Hindi
238
+ * Hungarian
239
+ * Icelandic
240
+ * Ido
241
+ * Indonesian
242
+ * Irish
243
+ * Italian
244
+ * Japanese
245
+ * Javanese
246
+ * Kannada
247
+ * Kazakh
248
+ * Kirghiz
249
+ * Korean
250
+ * Latin
251
+ * Latvian
252
+ * Lithuanian
253
+ * Lombard
254
+ * Low Saxon
255
+ * Luxembourgish
256
+ * Macedonian
257
+ * Malagasy
258
+ * Malay
259
+ * Malayalam
260
+ * Marathi
261
+ * Minangkabau
262
+ * Nepali
263
+ * Newar
264
+ * Norwegian (Bokmal)
265
+ * Norwegian (Nynorsk)
266
+ * Occitan
267
+ * Persian (Farsi)
268
+ * Piedmontese
269
+ * Polish
270
+ * Portuguese
271
+ * Punjabi
272
+ * Romanian
273
+ * Russian
274
+ * Scots
275
+ * Serbian
276
+ * Serbo-Croatian
277
+ * Sicilian
278
+ * Slovak
279
+ * Slovenian
280
+ * South Azerbaijani
281
+ * Spanish
282
+ * Sundanese
283
+ * Swahili
284
+ * Swedish
285
+ * Tagalog
286
+ * Tajik
287
+ * Tamil
288
+ * Tatar
289
+ * Telugu
290
+ * Turkish
291
+ * Ukrainian
292
+ * Urdu
293
+ * Uzbek
294
+ * Vietnamese
295
+ * Volapük
296
+ * Waray-Waray
297
+ * Welsh
298
+ * West Frisian
299
+ * Western Punjabi
300
+ * Yoruba
301
+
302
+ The **Multilingual Cased (New)** release contains additionally **Thai** and
303
+ **Mongolian**, which were not included in the original release.
RIS-DMMI/bert/optimization.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Functions and classes related to optimization (weight updates)."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import re
22
+ import tensorflow as tf
23
+
24
+
25
+ def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
26
+ """Creates an optimizer training op."""
27
+ global_step = tf.train.get_or_create_global_step()
28
+
29
+ learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
30
+
31
+ # Implements linear decay of the learning rate.
32
+ learning_rate = tf.train.polynomial_decay(
33
+ learning_rate,
34
+ global_step,
35
+ num_train_steps,
36
+ end_learning_rate=0.0,
37
+ power=1.0,
38
+ cycle=False)
39
+
40
+ # Implements linear warmup. I.e., if global_step < num_warmup_steps, the
41
+ # learning rate will be `global_step/num_warmup_steps * init_lr`.
42
+ if num_warmup_steps:
43
+ global_steps_int = tf.cast(global_step, tf.int32)
44
+ warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
45
+
46
+ global_steps_float = tf.cast(global_steps_int, tf.float32)
47
+ warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
48
+
49
+ warmup_percent_done = global_steps_float / warmup_steps_float
50
+ warmup_learning_rate = init_lr * warmup_percent_done
51
+
52
+ is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
53
+ learning_rate = (
54
+ (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
55
+
56
+ # It is recommended that you use this optimizer for fine tuning, since this
57
+ # is how the model was trained (note that the Adam m/v variables are NOT
58
+ # loaded from init_checkpoint.)
59
+ optimizer = AdamWeightDecayOptimizer(
60
+ learning_rate=learning_rate,
61
+ weight_decay_rate=0.01,
62
+ beta_1=0.9,
63
+ beta_2=0.999,
64
+ epsilon=1e-6,
65
+ exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
66
+
67
+ if use_tpu:
68
+ optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
69
+
70
+ tvars = tf.trainable_variables()
71
+ grads = tf.gradients(loss, tvars)
72
+
73
+ # This is how the model was pre-trained.
74
+ (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
75
+
76
+ train_op = optimizer.apply_gradients(
77
+ zip(grads, tvars), global_step=global_step)
78
+
79
+ # Normally the global step update is done inside of `apply_gradients`.
80
+ # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
81
+ # a different optimizer, you should probably take this line out.
82
+ new_global_step = global_step + 1
83
+ train_op = tf.group(train_op, [global_step.assign(new_global_step)])
84
+ return train_op
85
+
86
+
87
+ class AdamWeightDecayOptimizer(tf.train.Optimizer):
88
+ """A basic Adam optimizer that includes "correct" L2 weight decay."""
89
+
90
+ def __init__(self,
91
+ learning_rate,
92
+ weight_decay_rate=0.0,
93
+ beta_1=0.9,
94
+ beta_2=0.999,
95
+ epsilon=1e-6,
96
+ exclude_from_weight_decay=None,
97
+ name="AdamWeightDecayOptimizer"):
98
+ """Constructs a AdamWeightDecayOptimizer."""
99
+ super(AdamWeightDecayOptimizer, self).__init__(False, name)
100
+
101
+ self.learning_rate = learning_rate
102
+ self.weight_decay_rate = weight_decay_rate
103
+ self.beta_1 = beta_1
104
+ self.beta_2 = beta_2
105
+ self.epsilon = epsilon
106
+ self.exclude_from_weight_decay = exclude_from_weight_decay
107
+
108
+ def apply_gradients(self, grads_and_vars, global_step=None, name=None):
109
+ """See base class."""
110
+ assignments = []
111
+ for (grad, param) in grads_and_vars:
112
+ if grad is None or param is None:
113
+ continue
114
+
115
+ param_name = self._get_variable_name(param.name)
116
+
117
+ m = tf.get_variable(
118
+ name=param_name + "/adam_m",
119
+ shape=param.shape.as_list(),
120
+ dtype=tf.float32,
121
+ trainable=False,
122
+ initializer=tf.zeros_initializer())
123
+ v = tf.get_variable(
124
+ name=param_name + "/adam_v",
125
+ shape=param.shape.as_list(),
126
+ dtype=tf.float32,
127
+ trainable=False,
128
+ initializer=tf.zeros_initializer())
129
+
130
+ # Standard Adam update.
131
+ next_m = (
132
+ tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
133
+ next_v = (
134
+ tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
135
+ tf.square(grad)))
136
+
137
+ update = next_m / (tf.sqrt(next_v) + self.epsilon)
138
+
139
+ # Just adding the square of the weights to the loss function is *not*
140
+ # the correct way of using L2 regularization/weight decay with Adam,
141
+ # since that will interact with the m and v parameters in strange ways.
142
+ #
143
+ # Instead we want ot decay the weights in a manner that doesn't interact
144
+ # with the m/v parameters. This is equivalent to adding the square
145
+ # of the weights to the loss with plain (non-momentum) SGD.
146
+ if self._do_use_weight_decay(param_name):
147
+ update += self.weight_decay_rate * param
148
+
149
+ update_with_lr = self.learning_rate * update
150
+
151
+ next_param = param - update_with_lr
152
+
153
+ assignments.extend(
154
+ [param.assign(next_param),
155
+ m.assign(next_m),
156
+ v.assign(next_v)])
157
+ return tf.group(*assignments, name=name)
158
+
159
+ def _do_use_weight_decay(self, param_name):
160
+ """Whether to use L2 weight decay for `param_name`."""
161
+ if not self.weight_decay_rate:
162
+ return False
163
+ if self.exclude_from_weight_decay:
164
+ for r in self.exclude_from_weight_decay:
165
+ if re.search(r, param_name) is not None:
166
+ return False
167
+ return True
168
+
169
+ def _get_variable_name(self, param_name):
170
+ """Get the variable name from the tensor name."""
171
+ m = re.match("^(.*):\\d+$", param_name)
172
+ if m is not None:
173
+ param_name = m.group(1)
174
+ return param_name
RIS-DMMI/bert/optimization_test.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from __future__ import absolute_import
16
+ from __future__ import division
17
+ from __future__ import print_function
18
+
19
+ import optimization
20
+ import tensorflow as tf
21
+
22
+
23
+ class OptimizationTest(tf.test.TestCase):
24
+
25
+ def test_adam(self):
26
+ with self.test_session() as sess:
27
+ w = tf.get_variable(
28
+ "w",
29
+ shape=[3],
30
+ initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
31
+ x = tf.constant([0.4, 0.2, -0.5])
32
+ loss = tf.reduce_mean(tf.square(x - w))
33
+ tvars = tf.trainable_variables()
34
+ grads = tf.gradients(loss, tvars)
35
+ global_step = tf.train.get_or_create_global_step()
36
+ optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
37
+ train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
38
+ init_op = tf.group(tf.global_variables_initializer(),
39
+ tf.local_variables_initializer())
40
+ sess.run(init_op)
41
+ for _ in range(100):
42
+ sess.run(train_op)
43
+ w_np = sess.run(w)
44
+ self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
45
+
46
+
47
+ if __name__ == "__main__":
48
+ tf.test.main()
RIS-DMMI/bert/predicting_movie_reviews_with_bert_on_tf_hub.ipynb ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "name": "Predicting Movie Reviews with BERT on TF Hub.ipynb",
7
+ "version": "0.3.2",
8
+ "provenance": [],
9
+ "collapsed_sections": []
10
+ },
11
+ "kernelspec": {
12
+ "name": "python3",
13
+ "display_name": "Python 3"
14
+ },
15
+ "accelerator": "GPU"
16
+ },
17
+ "cells": [
18
+ {
19
+ "metadata": {
20
+ "id": "j0a4mTk9o1Qg",
21
+ "colab_type": "code",
22
+ "colab": {}
23
+ },
24
+ "cell_type": "code",
25
+ "source": [
26
+ "# Copyright 2019 Google Inc.\n",
27
+ "\n",
28
+ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
29
+ "# you may not use this file except in compliance with the License.\n",
30
+ "# You may obtain a copy of the License at\n",
31
+ "\n",
32
+ "# http://www.apache.org/licenses/LICENSE-2.0\n",
33
+ "\n",
34
+ "# Unless required by applicable law or agreed to in writing, software\n",
35
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
36
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
37
+ "# See the License for the specific language governing permissions and\n",
38
+ "# limitations under the License."
39
+ ],
40
+ "execution_count": 0,
41
+ "outputs": []
42
+ },
43
+ {
44
+ "metadata": {
45
+ "id": "dCpvgG0vwXAZ",
46
+ "colab_type": "text"
47
+ },
48
+ "cell_type": "markdown",
49
+ "source": [
50
+ "#Predicting Movie Review Sentiment with BERT on TF Hub"
51
+ ]
52
+ },
53
+ {
54
+ "metadata": {
55
+ "id": "xiYrZKaHwV81",
56
+ "colab_type": "text"
57
+ },
58
+ "cell_type": "markdown",
59
+ "source": [
60
+ "If you’ve been following Natural Language Processing over the past year, you’ve probably heard of BERT: Bidirectional Encoder Representations from Transformers. It’s a neural network architecture designed by Google researchers that’s totally transformed what’s state-of-the-art for NLP tasks, like text classification, translation, summarization, and question answering.\n",
61
+ "\n",
62
+ "Now that BERT's been added to [TF Hub](https://www.tensorflow.org/hub) as a loadable module, it's easy(ish) to add into existing Tensorflow text pipelines. In an existing pipeline, BERT can replace text embedding layers like ELMO and GloVE. Alternatively, [finetuning](http://wiki.fast.ai/index.php/Fine_tuning) BERT can provide both an accuracy boost and faster training time in many cases.\n",
63
+ "\n",
64
+ "Here, we'll train a model to predict whether an IMDB movie review is positive or negative using BERT in Tensorflow with tf hub. Some code was adapted from [this colab notebook](https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb). Let's get started!"
65
+ ]
66
+ },
67
+ {
68
+ "metadata": {
69
+ "id": "hsZvic2YxnTz",
70
+ "colab_type": "code",
71
+ "colab": {}
72
+ },
73
+ "cell_type": "code",
74
+ "source": [
75
+ "from sklearn.model_selection import train_test_split\n",
76
+ "import pandas as pd\n",
77
+ "import tensorflow as tf\n",
78
+ "import tensorflow_hub as hub\n",
79
+ "from datetime import datetime"
80
+ ],
81
+ "execution_count": 0,
82
+ "outputs": []
83
+ },
84
+ {
85
+ "metadata": {
86
+ "id": "cp5wfXDx5SPH",
87
+ "colab_type": "text"
88
+ },
89
+ "cell_type": "markdown",
90
+ "source": [
91
+ "In addition to the standard libraries we imported above, we'll need to install BERT's python package."
92
+ ]
93
+ },
94
+ {
95
+ "metadata": {
96
+ "id": "jviywGyWyKsA",
97
+ "colab_type": "code",
98
+ "outputId": "166f3005-d219-404f-b201-2a0b75480360",
99
+ "colab": {
100
+ "base_uri": "https://localhost:8080/",
101
+ "height": 51
102
+ }
103
+ },
104
+ "cell_type": "code",
105
+ "source": [
106
+ "!pip install bert-tensorflow"
107
+ ],
108
+ "execution_count": 38,
109
+ "outputs": [
110
+ {
111
+ "output_type": "stream",
112
+ "text": [
113
+ "Requirement already satisfied: bert-tensorflow in /usr/local/lib/python3.6/dist-packages (1.0.1)\n",
114
+ "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from bert-tensorflow) (1.11.0)\n"
115
+ ],
116
+ "name": "stdout"
117
+ }
118
+ ]
119
+ },
120
+ {
121
+ "metadata": {
122
+ "id": "hhbGEfwgdEtw",
123
+ "colab_type": "code",
124
+ "colab": {}
125
+ },
126
+ "cell_type": "code",
127
+ "source": [
128
+ "import bert\n",
129
+ "from bert import run_classifier\n",
130
+ "from bert import optimization\n",
131
+ "from bert import tokenization"
132
+ ],
133
+ "execution_count": 0,
134
+ "outputs": []
135
+ },
136
+ {
137
+ "metadata": {
138
+ "id": "KVB3eOcjxxm1",
139
+ "colab_type": "text"
140
+ },
141
+ "cell_type": "markdown",
142
+ "source": [
143
+ "Below, we'll set an output directory location to store our model output and checkpoints. This can be a local directory, in which case you'd set OUTPUT_DIR to the name of the directory you'd like to create. If you're running this code in Google's hosted Colab, the directory won't persist after the Colab session ends.\n",
144
+ "\n",
145
+ "Alternatively, if you're a GCP user, you can store output in a GCP bucket. To do that, set a directory name in OUTPUT_DIR and the name of the GCP bucket in the BUCKET field.\n",
146
+ "\n",
147
+ "Set DO_DELETE to rewrite the OUTPUT_DIR if it exists. Otherwise, Tensorflow will load existing model checkpoints from that directory (if they exist)."
148
+ ]
149
+ },
150
+ {
151
+ "metadata": {
152
+ "id": "US_EAnICvP7f",
153
+ "colab_type": "code",
154
+ "outputId": "7780a032-31d4-4794-e6aa-664a5d2ae7dd",
155
+ "cellView": "form",
156
+ "colab": {
157
+ "base_uri": "https://localhost:8080/",
158
+ "height": 34
159
+ }
160
+ },
161
+ "cell_type": "code",
162
+ "source": [
163
+ "# Set the output directory for saving model file\n",
164
+ "# Optionally, set a GCP bucket location\n",
165
+ "\n",
166
+ "OUTPUT_DIR = 'OUTPUT_DIR_NAME'#@param {type:\"string\"}\n",
167
+ "#@markdown Whether or not to clear/delete the directory and create a new one\n",
168
+ "DO_DELETE = False #@param {type:\"boolean\"}\n",
169
+ "#@markdown Set USE_BUCKET and BUCKET if you want to (optionally) store model output on GCP bucket.\n",
170
+ "USE_BUCKET = True #@param {type:\"boolean\"}\n",
171
+ "BUCKET = 'BUCKET_NAME' #@param {type:\"string\"}\n",
172
+ "\n",
173
+ "if USE_BUCKET:\n",
174
+ " OUTPUT_DIR = 'gs://{}/{}'.format(BUCKET, OUTPUT_DIR)\n",
175
+ " from google.colab import auth\n",
176
+ " auth.authenticate_user()\n",
177
+ "\n",
178
+ "if DO_DELETE:\n",
179
+ " try:\n",
180
+ " tf.gfile.DeleteRecursively(OUTPUT_DIR)\n",
181
+ " except:\n",
182
+ " # Doesn't matter if the directory didn't exist\n",
183
+ " pass\n",
184
+ "tf.gfile.MakeDirs(OUTPUT_DIR)\n",
185
+ "print('***** Model output directory: {} *****'.format(OUTPUT_DIR))\n"
186
+ ],
187
+ "execution_count": 40,
188
+ "outputs": [
189
+ {
190
+ "output_type": "stream",
191
+ "text": [
192
+ "***** Model output directory: gs://bert-tfhub/aclImdb_v1 *****\n"
193
+ ],
194
+ "name": "stdout"
195
+ }
196
+ ]
197
+ },
198
+ {
199
+ "metadata": {
200
+ "id": "pmFYvkylMwXn",
201
+ "colab_type": "text"
202
+ },
203
+ "cell_type": "markdown",
204
+ "source": [
205
+ "#Data"
206
+ ]
207
+ },
208
+ {
209
+ "metadata": {
210
+ "id": "MC_w8SRqN0fr",
211
+ "colab_type": "text"
212
+ },
213
+ "cell_type": "markdown",
214
+ "source": [
215
+ "First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this Tensorflow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub)."
216
+ ]
217
+ },
218
+ {
219
+ "metadata": {
220
+ "id": "fom_ff20gyy6",
221
+ "colab_type": "code",
222
+ "colab": {}
223
+ },
224
+ "cell_type": "code",
225
+ "source": [
226
+ "from tensorflow import keras\n",
227
+ "import os\n",
228
+ "import re\n",
229
+ "\n",
230
+ "# Load all files from a directory in a DataFrame.\n",
231
+ "def load_directory_data(directory):\n",
232
+ " data = {}\n",
233
+ " data[\"sentence\"] = []\n",
234
+ " data[\"sentiment\"] = []\n",
235
+ " for file_path in os.listdir(directory):\n",
236
+ " with tf.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\n",
237
+ " data[\"sentence\"].append(f.read())\n",
238
+ " data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\n",
239
+ " return pd.DataFrame.from_dict(data)\n",
240
+ "\n",
241
+ "# Merge positive and negative examples, add a polarity column and shuffle.\n",
242
+ "def load_dataset(directory):\n",
243
+ " pos_df = load_directory_data(os.path.join(directory, \"pos\"))\n",
244
+ " neg_df = load_directory_data(os.path.join(directory, \"neg\"))\n",
245
+ " pos_df[\"polarity\"] = 1\n",
246
+ " neg_df[\"polarity\"] = 0\n",
247
+ " return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\n",
248
+ "\n",
249
+ "# Download and process the dataset files.\n",
250
+ "def download_and_load_datasets(force_download=False):\n",
251
+ " dataset = tf.keras.utils.get_file(\n",
252
+ " fname=\"aclImdb.tar.gz\", \n",
253
+ " origin=\"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\", \n",
254
+ " extract=True)\n",
255
+ " \n",
256
+ " train_df = load_dataset(os.path.join(os.path.dirname(dataset), \n",
257
+ " \"aclImdb\", \"train\"))\n",
258
+ " test_df = load_dataset(os.path.join(os.path.dirname(dataset), \n",
259
+ " \"aclImdb\", \"test\"))\n",
260
+ " \n",
261
+ " return train_df, test_df\n"
262
+ ],
263
+ "execution_count": 0,
264
+ "outputs": []
265
+ },
266
+ {
267
+ "metadata": {
268
+ "id": "2abfwdn-g135",
269
+ "colab_type": "code",
270
+ "colab": {}
271
+ },
272
+ "cell_type": "code",
273
+ "source": [
274
+ "train, test = download_and_load_datasets()"
275
+ ],
276
+ "execution_count": 0,
277
+ "outputs": []
278
+ },
279
+ {
280
+ "metadata": {
281
+ "id": "XA8WHJgzhIZf",
282
+ "colab_type": "text"
283
+ },
284
+ "cell_type": "markdown",
285
+ "source": [
286
+ "To keep training fast, we'll take a sample of 5000 train and test examples, respectively."
287
+ ]
288
+ },
289
+ {
290
+ "metadata": {
291
+ "id": "lw_F488eixTV",
292
+ "colab_type": "code",
293
+ "colab": {}
294
+ },
295
+ "cell_type": "code",
296
+ "source": [
297
+ "train = train.sample(5000)\n",
298
+ "test = test.sample(5000)"
299
+ ],
300
+ "execution_count": 0,
301
+ "outputs": []
302
+ },
303
+ {
304
+ "metadata": {
305
+ "id": "prRQM8pDi8xI",
306
+ "colab_type": "code",
307
+ "outputId": "34445cb8-2be0-4379-fdbc-7794091f6049",
308
+ "colab": {
309
+ "base_uri": "https://localhost:8080/",
310
+ "height": 34
311
+ }
312
+ },
313
+ "cell_type": "code",
314
+ "source": [
315
+ "train.columns"
316
+ ],
317
+ "execution_count": 44,
318
+ "outputs": [
319
+ {
320
+ "output_type": "execute_result",
321
+ "data": {
322
+ "text/plain": [
323
+ "Index(['sentence', 'sentiment', 'polarity'], dtype='object')"
324
+ ]
325
+ },
326
+ "metadata": {
327
+ "tags": []
328
+ },
329
+ "execution_count": 44
330
+ }
331
+ ]
332
+ },
333
+ {
334
+ "metadata": {
335
+ "id": "sfRnHSz3iSXz",
336
+ "colab_type": "text"
337
+ },
338
+ "cell_type": "markdown",
339
+ "source": [
340
+ "For us, our input data is the 'sentence' column and our label is the 'polarity' column (0, 1 for negative and positive, respecitvely)"
341
+ ]
342
+ },
343
+ {
344
+ "metadata": {
345
+ "id": "IuMOGwFui4it",
346
+ "colab_type": "code",
347
+ "colab": {}
348
+ },
349
+ "cell_type": "code",
350
+ "source": [
351
+ "DATA_COLUMN = 'sentence'\n",
352
+ "LABEL_COLUMN = 'polarity'\n",
353
+ "# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'\n",
354
+ "label_list = [0, 1]"
355
+ ],
356
+ "execution_count": 0,
357
+ "outputs": []
358
+ },
359
+ {
360
+ "metadata": {
361
+ "id": "V399W0rqNJ-Z",
362
+ "colab_type": "text"
363
+ },
364
+ "cell_type": "markdown",
365
+ "source": [
366
+ "#Data Preprocessing\n",
367
+ "We'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.\n",
368
+ "\n",
369
+ "- `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe. \n",
370
+ "- `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.\n",
371
+ "- `label` is the label for our example, i.e. True, False"
372
+ ]
373
+ },
374
+ {
375
+ "metadata": {
376
+ "id": "p9gEt5SmM6i6",
377
+ "colab_type": "code",
378
+ "colab": {}
379
+ },
380
+ "cell_type": "code",
381
+ "source": [
382
+ "# Use the InputExample class from BERT's run_classifier code to create examples from the data\n",
383
+ "train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example\n",
384
+ " text_a = x[DATA_COLUMN], \n",
385
+ " text_b = None, \n",
386
+ " label = x[LABEL_COLUMN]), axis = 1)\n",
387
+ "\n",
388
+ "test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None, \n",
389
+ " text_a = x[DATA_COLUMN], \n",
390
+ " text_b = None, \n",
391
+ " label = x[LABEL_COLUMN]), axis = 1)"
392
+ ],
393
+ "execution_count": 0,
394
+ "outputs": []
395
+ },
396
+ {
397
+ "metadata": {
398
+ "id": "SCZWZtKxObjh",
399
+ "colab_type": "text"
400
+ },
401
+ "cell_type": "markdown",
402
+ "source": [
403
+ "Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):\n",
404
+ "\n",
405
+ "\n",
406
+ "1. Lowercase our text (if we're using a BERT lowercase model)\n",
407
+ "2. Tokenize it (i.e. \"sally says hi\" -> [\"sally\", \"says\", \"hi\"])\n",
408
+ "3. Break words into WordPieces (i.e. \"calling\" -> [\"call\", \"##ing\"])\n",
409
+ "4. Map our words to indexes using a vocab file that BERT provides\n",
410
+ "5. Add special \"CLS\" and \"SEP\" tokens (see the [readme](https://github.com/google-research/bert))\n",
411
+ "6. Append \"index\" and \"segment\" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))\n",
412
+ "\n",
413
+ "Happily, we don't have to worry about most of these details.\n",
414
+ "\n",
415
+ "\n"
416
+ ]
417
+ },
418
+ {
419
+ "metadata": {
420
+ "id": "qMWiDtpyQSoU",
421
+ "colab_type": "text"
422
+ },
423
+ "cell_type": "markdown",
424
+ "source": [
425
+ "To start, we'll need to load a vocabulary file and lowercasing information directly from the BERT tf hub module:"
426
+ ]
427
+ },
428
+ {
429
+ "metadata": {
430
+ "id": "IhJSe0QHNG7U",
431
+ "colab_type": "code",
432
+ "outputId": "20b28cc7-3cb3-4ce6-bfff-a7847ce3bbaa",
433
+ "colab": {
434
+ "base_uri": "https://localhost:8080/",
435
+ "height": 34
436
+ }
437
+ },
438
+ "cell_type": "code",
439
+ "source": [
440
+ "# This is a path to an uncased (all lowercase) version of BERT\n",
441
+ "BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n",
442
+ "\n",
443
+ "def create_tokenizer_from_hub_module():\n",
444
+ " \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n",
445
+ " with tf.Graph().as_default():\n",
446
+ " bert_module = hub.Module(BERT_MODEL_HUB)\n",
447
+ " tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n",
448
+ " with tf.Session() as sess:\n",
449
+ " vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n",
450
+ " tokenization_info[\"do_lower_case\"]])\n",
451
+ " \n",
452
+ " return bert.tokenization.FullTokenizer(\n",
453
+ " vocab_file=vocab_file, do_lower_case=do_lower_case)\n",
454
+ "\n",
455
+ "tokenizer = create_tokenizer_from_hub_module()"
456
+ ],
457
+ "execution_count": 47,
458
+ "outputs": [
459
+ {
460
+ "output_type": "stream",
461
+ "text": [
462
+ "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n"
463
+ ],
464
+ "name": "stdout"
465
+ }
466
+ ]
467
+ },
468
+ {
469
+ "metadata": {
470
+ "id": "z4oFkhpZBDKm",
471
+ "colab_type": "text"
472
+ },
473
+ "cell_type": "markdown",
474
+ "source": [
475
+ "Great--we just learned that the BERT model we're using expects lowercase data (that's what stored in tokenization_info[\"do_lower_case\"]) and we also loaded BERT's vocab file. We also created a tokenizer, which breaks words into word pieces:"
476
+ ]
477
+ },
478
+ {
479
+ "metadata": {
480
+ "id": "dsBo6RCtQmwx",
481
+ "colab_type": "code",
482
+ "outputId": "9af8c917-90ec-4fe9-897b-79dc89ca88e1",
483
+ "colab": {
484
+ "base_uri": "https://localhost:8080/",
485
+ "height": 221
486
+ }
487
+ },
488
+ "cell_type": "code",
489
+ "source": [
490
+ "tokenizer.tokenize(\"This here's an example of using the BERT tokenizer\")"
491
+ ],
492
+ "execution_count": 48,
493
+ "outputs": [
494
+ {
495
+ "output_type": "execute_result",
496
+ "data": {
497
+ "text/plain": [
498
+ "['this',\n",
499
+ " 'here',\n",
500
+ " \"'\",\n",
501
+ " 's',\n",
502
+ " 'an',\n",
503
+ " 'example',\n",
504
+ " 'of',\n",
505
+ " 'using',\n",
506
+ " 'the',\n",
507
+ " 'bert',\n",
508
+ " 'token',\n",
509
+ " '##izer']"
510
+ ]
511
+ },
512
+ "metadata": {
513
+ "tags": []
514
+ },
515
+ "execution_count": 48
516
+ }
517
+ ]
518
+ },
519
+ {
520
+ "metadata": {
521
+ "id": "0OEzfFIt6GIc",
522
+ "colab_type": "text"
523
+ },
524
+ "cell_type": "markdown",
525
+ "source": [
526
+ "Using our tokenizer, we'll call `run_classifier.convert_examples_to_features` on our InputExamples to convert them into features BERT understands."
527
+ ]
528
+ },
529
+ {
530
+ "metadata": {
531
+ "id": "LL5W8gEGRTAf",
532
+ "colab_type": "code",
533
+ "outputId": "65001dda-155b-48fc-b5fc-1e4cabc8dfbf",
534
+ "colab": {
535
+ "base_uri": "https://localhost:8080/",
536
+ "height": 1261
537
+ }
538
+ },
539
+ "cell_type": "code",
540
+ "source": [
541
+ "# We'll set sequences to be at most 128 tokens long.\n",
542
+ "MAX_SEQ_LENGTH = 128\n",
543
+ "# Convert our train and test features to InputFeatures that BERT understands.\n",
544
+ "train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\n",
545
+ "test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)"
546
+ ],
547
+ "execution_count": 49,
548
+ "outputs": [
549
+ {
550
+ "output_type": "stream",
551
+ "text": [
552
+ "INFO:tensorflow:Writing example 0 of 5000\n",
553
+ "INFO:tensorflow:*** Example ***\n",
554
+ "INFO:tensorflow:guid: None\n",
555
+ "INFO:tensorflow:tokens: [CLS] i ' m watching this on the sci - fi channel right now . it ' s so horrible i can ' t stop watching it ! i ' m a video ##grapher and this movie makes me sad . i feel bad for anyone associated with this movie . some of the camera work is good . most is very questionable . there are a few decent actors in the flick . too bad they ' re surrounded by what must have been the director ' s relatives . that ' s the only way they could have been qualified to be in a movie ! music was a little better than the acting . if you get around to watching this i hope it [SEP]\n",
556
+ "INFO:tensorflow:input_ids: 101 1045 1005 1049 3666 2023 2006 1996 16596 1011 10882 3149 2157 2085 1012 2009 1005 1055 2061 9202 1045 2064 1005 1056 2644 3666 2009 999 1045 1005 1049 1037 2678 18657 1998 2023 3185 3084 2033 6517 1012 1045 2514 2919 2005 3087 3378 2007 2023 3185 1012 2070 1997 1996 4950 2147 2003 2204 1012 2087 2003 2200 21068 1012 2045 2024 1037 2261 11519 5889 1999 1996 17312 1012 2205 2919 2027 1005 2128 5129 2011 2054 2442 2031 2042 1996 2472 1005 1055 9064 1012 2008 1005 1055 1996 2069 2126 2027 2071 2031 2042 4591 2000 2022 1999 1037 3185 999 2189 2001 1037 2210 2488 2084 1996 3772 1012 2065 2017 2131 2105 2000 3666 2023 1045 3246 2009 102\n",
557
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
558
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
559
+ "INFO:tensorflow:label: 0 (id = 0)\n",
560
+ "INFO:tensorflow:*** Example ***\n",
561
+ "INFO:tensorflow:guid: None\n",
562
+ "INFO:tensorflow:tokens: [CLS] i have been a fan of pushing dai ##sies since the very beginning . it is wonderful ##ly thought up , and bryan fuller has the most remarkable ideas for this show . < br / > < br / > it is unbelievable on how much tv has been needing a creative , original show like pushing dai ##sies . it is a huge relief to see a show , that is unlike the rest , where as , if you compared it to some of the newer shows , such as scrub ##s and house , you would see the similarities , and it does get ted ##ious at moments to see shows so close in identity . < br / > < br [SEP]\n",
563
+ "INFO:tensorflow:input_ids: 101 1045 2031 2042 1037 5470 1997 6183 18765 14625 2144 1996 2200 2927 1012 2009 2003 6919 2135 2245 2039 1010 1998 8527 12548 2038 1996 2087 9487 4784 2005 2023 2265 1012 1026 7987 1013 1028 1026 7987 1013 1028 2009 2003 23653 2006 2129 2172 2694 2038 2042 11303 1037 5541 1010 2434 2265 2066 6183 18765 14625 1012 2009 2003 1037 4121 4335 2000 2156 1037 2265 1010 2008 2003 4406 1996 2717 1010 2073 2004 1010 2065 2017 4102 2009 2000 2070 1997 1996 10947 3065 1010 2107 2004 18157 2015 1998 2160 1010 2017 2052 2156 1996 12319 1010 1998 2009 2515 2131 6945 6313 2012 5312 2000 2156 3065 2061 2485 1999 4767 1012 1026 7987 1013 1028 1026 7987 102\n",
564
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
565
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
566
+ "INFO:tensorflow:label: 1 (id = 1)\n",
567
+ "INFO:tensorflow:*** Example ***\n",
568
+ "INFO:tensorflow:guid: None\n",
569
+ "INFO:tensorflow:tokens: [CLS] this movie starts out promising ##ly , with an early scene in which frank morgan advises against gary cooper ' s marriage to his daughter , anita louise . frank morgan , playing an una ##bas ##hed gold - digger , loudly complain ##s to cooper about his perceived pen ##ury at the hands of his family - including his daughter , anita louise . i am a fan of all 3 actors . frank morgan is ( to my mind ) a hollywood treasure , cooper a legend , and louise a very lovely , versatile and under - appreciated actress seldom seen in the leading role . i also have nothing against teresa wright , and while not blessed with great range , she [SEP]\n",
570
+ "INFO:tensorflow:input_ids: 101 2023 3185 4627 2041 10015 2135 1010 2007 2019 2220 3496 1999 2029 3581 5253 25453 2114 5639 6201 1005 1055 3510 2000 2010 2684 1010 12918 8227 1012 3581 5253 1010 2652 2019 14477 22083 9072 2751 1011 28661 1010 9928 17612 2015 2000 6201 2055 2010 8690 7279 13098 2012 1996 2398 1997 2010 2155 1011 2164 2010 2684 1010 12918 8227 1012 1045 2572 1037 5470 1997 2035 1017 5889 1012 3581 5253 2003 1006 2000 2026 2568 1007 1037 5365 8813 1010 6201 1037 5722 1010 1998 8227 1037 2200 8403 1010 22979 1998 2104 1011 12315 3883 15839 2464 1999 1996 2877 2535 1012 1045 2036 2031 2498 2114 12409 6119 1010 1998 2096 2025 10190 2007 2307 2846 1010 2016 102\n",
571
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
572
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
573
+ "INFO:tensorflow:label: 0 (id = 0)\n",
574
+ "INFO:tensorflow:*** Example ***\n",
575
+ "INFO:tensorflow:guid: None\n",
576
+ "INFO:tensorflow:tokens: [CLS] i was over ##taken by the emotion . un ##for ##get ##table rendering of a wartime story which is unknown to most people . the performances were fault ##less and outstanding . [SEP]\n",
577
+ "INFO:tensorflow:input_ids: 101 1045 2001 2058 25310 2011 1996 7603 1012 4895 29278 18150 10880 14259 1997 1037 12498 2466 2029 2003 4242 2000 2087 2111 1012 1996 4616 2020 6346 3238 1998 5151 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
578
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
579
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
580
+ "INFO:tensorflow:label: 1 (id = 1)\n",
581
+ "INFO:tensorflow:*** Example ***\n",
582
+ "INFO:tensorflow:guid: None\n",
583
+ "INFO:tensorflow:tokens: [CLS] soldier blue is a movie with pre ##tension ##s : pre ##tension ##s to be some sort of profound statement on man ' s inhuman ##ity to man , on the white man ' s exploitation of and brutality towards indigenous peoples ; a biting , un ##fl ##in ##ching and sar ##don ##ic commentary on the horrors of vietnam . well , sorry , but it fails mis ##era ##bly to be any of those things . what soldier blue actually is is per ##nic ##ious , tri ##te , badly made , dish ##ones ##t rubbish . < br / > < br / > another reviewer here hit the nail on the head in saying that it appears to be a hybrid of [SEP]\n",
584
+ "INFO:tensorflow:input_ids: 101 5268 2630 2003 1037 3185 2007 3653 29048 2015 1024 3653 29048 2015 2000 2022 2070 4066 1997 13769 4861 2006 2158 1005 1055 29582 3012 2000 2158 1010 2006 1996 2317 2158 1005 1055 14427 1997 1998 24083 2875 6284 7243 1025 1037 12344 1010 4895 10258 2378 8450 1998 18906 5280 2594 8570 2006 1996 22812 1997 5148 1012 2092 1010 3374 1010 2021 2009 11896 28616 6906 6321 2000 2022 2151 1997 2216 2477 1012 2054 5268 2630 2941 2003 2003 2566 8713 6313 1010 13012 2618 1010 6649 2081 1010 9841 21821 2102 29132 1012 1026 7987 1013 1028 1026 7987 1013 1028 2178 12027 2182 2718 1996 13774 2006 1996 2132 1999 3038 2008 2009 3544 2000 2022 1037 8893 1997 102\n",
585
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
586
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
587
+ "INFO:tensorflow:label: 0 (id = 0)\n",
588
+ "INFO:tensorflow:Writing example 0 of 5000\n",
589
+ "INFO:tensorflow:*** Example ***\n",
590
+ "INFO:tensorflow:guid: None\n",
591
+ "INFO:tensorflow:tokens: [CLS] i just watched this today on tv . it was on abc ' s sunday afternoon movie . < br / > < br / > this wasn ' t a very good movie , but for a low budget independent film like this , it was okay . there is some suspense in it , but there are so many bad qualities that really bring the movie down . the script is pretty lame , and the plot elements aren ' t very realistic , such as the way a 911 operator would laugh and hang up when someone is reporting a murder . i don ' t know what the writer was thinking when they came up with that idea , but it isn [SEP]\n",
592
+ "INFO:tensorflow:input_ids: 101 1045 2074 3427 2023 2651 2006 2694 1012 2009 2001 2006 5925 1005 1055 4465 5027 3185 1012 1026 7987 1013 1028 1026 7987 1013 1028 2023 2347 1005 1056 1037 2200 2204 3185 1010 2021 2005 1037 2659 5166 2981 2143 2066 2023 1010 2009 2001 3100 1012 2045 2003 2070 23873 1999 2009 1010 2021 2045 2024 2061 2116 2919 11647 2008 2428 3288 1996 3185 2091 1012 1996 5896 2003 3492 20342 1010 1998 1996 5436 3787 4995 1005 1056 2200 12689 1010 2107 2004 1996 2126 1037 19989 6872 2052 4756 1998 6865 2039 2043 2619 2003 7316 1037 4028 1012 1045 2123 1005 1056 2113 2054 1996 3213 2001 3241 2043 2027 2234 2039 2007 2008 2801 1010 2021 2009 3475 102\n",
593
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
594
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
595
+ "INFO:tensorflow:label: 0 (id = 0)\n",
596
+ "INFO:tensorflow:*** Example ***\n",
597
+ "INFO:tensorflow:guid: None\n",
598
+ "INFO:tensorflow:tokens: [CLS] from hardly alien sounding lasers , to an elementary school style shuttle crash , \" night ##be ##ast \" is better classified as a far ##cic ##al mix of fake blood and bare chest . the almost pornographic style of the film seems to be a failed attempt to recover from a lack of co ##hesive or effective story . the acting however is not nearly as beast ##ly , many of the young , aspiring , actors ad ##mir ##ably showcase a hidden talent . particularly don lei ##fer ##t and jamie ze ##mare ##l , who shed a well needed sha ##rd of light on this otherwise terrible film . night ##be ##ast would have never shown up on set had he known the [SEP]\n",
599
+ "INFO:tensorflow:input_ids: 101 2013 6684 7344 9391 23965 1010 2000 2019 4732 2082 2806 10382 5823 1010 1000 2305 4783 14083 1000 2003 2488 6219 2004 1037 2521 19053 2389 4666 1997 8275 2668 1998 6436 3108 1012 1996 2471 26932 2806 1997 1996 2143 3849 2000 2022 1037 3478 3535 2000 8980 2013 1037 3768 1997 2522 21579 2030 4621 2466 1012 1996 3772 2174 2003 2025 3053 2004 6841 2135 1010 2116 1997 1996 2402 1010 22344 1010 5889 4748 14503 8231 13398 1037 5023 5848 1012 3391 2123 26947 7512 2102 1998 6175 27838 24376 2140 1010 2040 8328 1037 2092 2734 21146 4103 1997 2422 2006 2023 4728 6659 2143 1012 2305 4783 14083 2052 2031 2196 3491 2039 2006 2275 2018 2002 2124 1996 102\n",
600
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
601
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
602
+ "INFO:tensorflow:label: 0 (id = 0)\n",
603
+ "INFO:tensorflow:*** Example ***\n",
604
+ "INFO:tensorflow:guid: None\n",
605
+ "INFO:tensorflow:tokens: [CLS] here we have the in ##imi ##table charlie chaplin for ##sa ##king his slap ##stick past to tackle the serious subject of anti - semi ##tism , and into ##ler ##ance in general . he portrays two characters - the sweet , innocent jewish barber - a war veteran , and the ravi ##ng and ruthless dictator , aden ##oid h ##yn ##kel . the jewish ghetto in this country is not safe for long , due to the w ##him ##s of h ##yn ##kel and his armed thugs , who routinely rough up its residents , or leave them alone , dependent upon his mood that day or week . the barber is among them , but is befriended by his former commanding officer [SEP]\n",
606
+ "INFO:tensorflow:input_ids: 101 2182 2057 2031 1996 1999 27605 10880 4918 23331 2005 3736 6834 2010 14308 21354 2627 2000 11147 1996 3809 3395 1997 3424 1011 4100 17456 1010 1998 2046 3917 6651 1999 2236 1012 2002 17509 2048 3494 1011 1996 4086 1010 7036 3644 13362 1011 1037 2162 8003 1010 1998 1996 16806 3070 1998 18101 21237 1010 16298 9314 1044 6038 11705 1012 1996 3644 17276 1999 2023 2406 2003 2025 3647 2005 2146 1010 2349 2000 1996 1059 14341 2015 1997 1044 6038 11705 1998 2010 4273 24106 1010 2040 19974 5931 2039 2049 3901 1010 2030 2681 2068 2894 1010 7790 2588 2010 6888 2008 2154 2030 2733 1012 1996 13362 2003 2426 2068 1010 2021 2003 23386 2011 2010 2280 7991 2961 102\n",
607
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
608
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
609
+ "INFO:tensorflow:label: 1 (id = 1)\n",
610
+ "INFO:tensorflow:*** Example ***\n",
611
+ "INFO:tensorflow:guid: None\n",
612
+ "INFO:tensorflow:tokens: [CLS] i really hated this movie and it ' s the first movie written by stephen king that i didn ' t finish . i was truly disappointed , it was the worst crap i ' ve ever seen . what were you thinking making three hours out of it ? it may have a quite good story , but actors ? no . suspense ? no . romance ? no . horror ? no . it didn ' t have anything . < br / > < br / > it ' s got this strange , crazy science man with einstein - hair , the classic thing . not real at all . and a man keep getting younger all the time . it seems [SEP]\n",
613
+ "INFO:tensorflow:input_ids: 101 1045 2428 6283 2023 3185 1998 2009 1005 1055 1996 2034 3185 2517 2011 4459 2332 2008 1045 2134 1005 1056 3926 1012 1045 2001 5621 9364 1010 2009 2001 1996 5409 10231 1045 1005 2310 2412 2464 1012 2054 2020 2017 3241 2437 2093 2847 2041 1997 2009 1029 2009 2089 2031 1037 3243 2204 2466 1010 2021 5889 1029 2053 1012 23873 1029 2053 1012 7472 1029 2053 1012 5469 1029 2053 1012 2009 2134 1005 1056 2031 2505 1012 1026 7987 1013 1028 1026 7987 1013 1028 2009 1005 1055 2288 2023 4326 1010 4689 2671 2158 2007 15313 1011 2606 1010 1996 4438 2518 1012 2025 2613 2012 2035 1012 1998 1037 2158 2562 2893 3920 2035 1996 2051 1012 2009 3849 102\n",
614
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
615
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
616
+ "INFO:tensorflow:label: 0 (id = 0)\n",
617
+ "INFO:tensorflow:*** Example ***\n",
618
+ "INFO:tensorflow:guid: None\n",
619
+ "INFO:tensorflow:tokens: [CLS] story chinese tall story tells the story of righteous monk trip ##ita ##ka , who , along with his guardians monkey , sandy and pigs ##y make their journey west on a quest to recover ancient sutra ##s , finally , they reach the final leg of their journey in sha ##che city but all is not as it seems when the city is attacked by evil tree demons . monkey tries his best to battle them but is overwhelmed , knowing his master is in grave danger , he uses his trust ##y golden staff to thrust trip ##ita ##ka to safety . < br / > < br / > the monk ends up being knocked out when he land and when he wakes [SEP]\n",
620
+ "INFO:tensorflow:input_ids: 101 2466 2822 4206 2466 4136 1996 2466 1997 19556 8284 4440 6590 2912 1010 2040 1010 2247 2007 2010 14240 10608 1010 7525 1998 14695 2100 2191 2037 4990 2225 2006 1037 8795 2000 8980 3418 26567 2015 1010 2633 1010 2027 3362 1996 2345 4190 1997 2037 4990 1999 21146 5403 2103 2021 2035 2003 2025 2004 2009 3849 2043 1996 2103 2003 4457 2011 4763 3392 7942 1012 10608 5363 2010 2190 2000 2645 2068 2021 2003 13394 1010 4209 2010 3040 2003 1999 6542 5473 1010 2002 3594 2010 3404 2100 3585 3095 2000 7400 4440 6590 2912 2000 3808 1012 1026 7987 1013 1028 1026 7987 1013 1028 1996 8284 4515 2039 2108 6573 2041 2043 2002 2455 1998 2043 2002 17507 102\n",
621
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
622
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
623
+ "INFO:tensorflow:label: 1 (id = 1)\n"
624
+ ],
625
+ "name": "stdout"
626
+ }
627
+ ]
628
+ },
629
+ {
630
+ "metadata": {
631
+ "id": "ccp5trMwRtmr",
632
+ "colab_type": "text"
633
+ },
634
+ "cell_type": "markdown",
635
+ "source": [
636
+ "#Creating a model\n",
637
+ "\n",
638
+ "Now that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning)."
639
+ ]
640
+ },
641
+ {
642
+ "metadata": {
643
+ "id": "6o2a5ZIvRcJq",
644
+ "colab_type": "code",
645
+ "colab": {}
646
+ },
647
+ "cell_type": "code",
648
+ "source": [
649
+ "def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,\n",
650
+ " num_labels):\n",
651
+ " \"\"\"Creates a classification model.\"\"\"\n",
652
+ "\n",
653
+ " bert_module = hub.Module(\n",
654
+ " BERT_MODEL_HUB,\n",
655
+ " trainable=True)\n",
656
+ " bert_inputs = dict(\n",
657
+ " input_ids=input_ids,\n",
658
+ " input_mask=input_mask,\n",
659
+ " segment_ids=segment_ids)\n",
660
+ " bert_outputs = bert_module(\n",
661
+ " inputs=bert_inputs,\n",
662
+ " signature=\"tokens\",\n",
663
+ " as_dict=True)\n",
664
+ "\n",
665
+ " # Use \"pooled_output\" for classification tasks on an entire sentence.\n",
666
+ " # Use \"sequence_outputs\" for token-level output.\n",
667
+ " output_layer = bert_outputs[\"pooled_output\"]\n",
668
+ "\n",
669
+ " hidden_size = output_layer.shape[-1].value\n",
670
+ "\n",
671
+ " # Create our own layer to tune for politeness data.\n",
672
+ " output_weights = tf.get_variable(\n",
673
+ " \"output_weights\", [num_labels, hidden_size],\n",
674
+ " initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
675
+ "\n",
676
+ " output_bias = tf.get_variable(\n",
677
+ " \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n",
678
+ "\n",
679
+ " with tf.variable_scope(\"loss\"):\n",
680
+ "\n",
681
+ " # Dropout helps prevent overfitting\n",
682
+ " output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
683
+ "\n",
684
+ " logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
685
+ " logits = tf.nn.bias_add(logits, output_bias)\n",
686
+ " log_probs = tf.nn.log_softmax(logits, axis=-1)\n",
687
+ "\n",
688
+ " # Convert labels into one-hot encoding\n",
689
+ " one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n",
690
+ "\n",
691
+ " predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n",
692
+ " # If we're predicting, we want predicted labels and the probabiltiies.\n",
693
+ " if is_predicting:\n",
694
+ " return (predicted_labels, log_probs)\n",
695
+ "\n",
696
+ " # If we're train/eval, compute loss between predicted and actual label\n",
697
+ " per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n",
698
+ " loss = tf.reduce_mean(per_example_loss)\n",
699
+ " return (loss, predicted_labels, log_probs)\n"
700
+ ],
701
+ "execution_count": 0,
702
+ "outputs": []
703
+ },
704
+ {
705
+ "metadata": {
706
+ "id": "qpE0ZIDOCQzE",
707
+ "colab_type": "text"
708
+ },
709
+ "cell_type": "markdown",
710
+ "source": [
711
+ "Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction."
712
+ ]
713
+ },
714
+ {
715
+ "metadata": {
716
+ "id": "FnH-AnOQ9KKW",
717
+ "colab_type": "code",
718
+ "colab": {}
719
+ },
720
+ "cell_type": "code",
721
+ "source": [
722
+ "# model_fn_builder actually creates our model function\n",
723
+ "# using the passed parameters for num_labels, learning_rate, etc.\n",
724
+ "def model_fn_builder(num_labels, learning_rate, num_train_steps,\n",
725
+ " num_warmup_steps):\n",
726
+ " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n",
727
+ " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n",
728
+ " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n",
729
+ "\n",
730
+ " input_ids = features[\"input_ids\"]\n",
731
+ " input_mask = features[\"input_mask\"]\n",
732
+ " segment_ids = features[\"segment_ids\"]\n",
733
+ " label_ids = features[\"label_ids\"]\n",
734
+ "\n",
735
+ " is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n",
736
+ " \n",
737
+ " # TRAIN and EVAL\n",
738
+ " if not is_predicting:\n",
739
+ "\n",
740
+ " (loss, predicted_labels, log_probs) = create_model(\n",
741
+ " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n",
742
+ "\n",
743
+ " train_op = bert.optimization.create_optimizer(\n",
744
+ " loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n",
745
+ "\n",
746
+ " # Calculate evaluation metrics. \n",
747
+ " def metric_fn(label_ids, predicted_labels):\n",
748
+ " accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n",
749
+ " f1_score = tf.contrib.metrics.f1_score(\n",
750
+ " label_ids,\n",
751
+ " predicted_labels)\n",
752
+ " auc = tf.metrics.auc(\n",
753
+ " label_ids,\n",
754
+ " predicted_labels)\n",
755
+ " recall = tf.metrics.recall(\n",
756
+ " label_ids,\n",
757
+ " predicted_labels)\n",
758
+ " precision = tf.metrics.precision(\n",
759
+ " label_ids,\n",
760
+ " predicted_labels) \n",
761
+ " true_pos = tf.metrics.true_positives(\n",
762
+ " label_ids,\n",
763
+ " predicted_labels)\n",
764
+ " true_neg = tf.metrics.true_negatives(\n",
765
+ " label_ids,\n",
766
+ " predicted_labels) \n",
767
+ " false_pos = tf.metrics.false_positives(\n",
768
+ " label_ids,\n",
769
+ " predicted_labels) \n",
770
+ " false_neg = tf.metrics.false_negatives(\n",
771
+ " label_ids,\n",
772
+ " predicted_labels)\n",
773
+ " return {\n",
774
+ " \"eval_accuracy\": accuracy,\n",
775
+ " \"f1_score\": f1_score,\n",
776
+ " \"auc\": auc,\n",
777
+ " \"precision\": precision,\n",
778
+ " \"recall\": recall,\n",
779
+ " \"true_positives\": true_pos,\n",
780
+ " \"true_negatives\": true_neg,\n",
781
+ " \"false_positives\": false_pos,\n",
782
+ " \"false_negatives\": false_neg\n",
783
+ " }\n",
784
+ "\n",
785
+ " eval_metrics = metric_fn(label_ids, predicted_labels)\n",
786
+ "\n",
787
+ " if mode == tf.estimator.ModeKeys.TRAIN:\n",
788
+ " return tf.estimator.EstimatorSpec(mode=mode,\n",
789
+ " loss=loss,\n",
790
+ " train_op=train_op)\n",
791
+ " else:\n",
792
+ " return tf.estimator.EstimatorSpec(mode=mode,\n",
793
+ " loss=loss,\n",
794
+ " eval_metric_ops=eval_metrics)\n",
795
+ " else:\n",
796
+ " (predicted_labels, log_probs) = create_model(\n",
797
+ " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n",
798
+ "\n",
799
+ " predictions = {\n",
800
+ " 'probabilities': log_probs,\n",
801
+ " 'labels': predicted_labels\n",
802
+ " }\n",
803
+ " return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n",
804
+ "\n",
805
+ " # Return the actual model function in the closure\n",
806
+ " return model_fn\n"
807
+ ],
808
+ "execution_count": 0,
809
+ "outputs": []
810
+ },
811
+ {
812
+ "metadata": {
813
+ "id": "OjwJ4bTeWXD8",
814
+ "colab_type": "code",
815
+ "colab": {}
816
+ },
817
+ "cell_type": "code",
818
+ "source": [
819
+ "# Compute train and warmup steps from batch size\n",
820
+ "# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n",
821
+ "BATCH_SIZE = 32\n",
822
+ "LEARNING_RATE = 2e-5\n",
823
+ "NUM_TRAIN_EPOCHS = 3.0\n",
824
+ "# Warmup is a period of time where hte learning rate \n",
825
+ "# is small and gradually increases--usually helps training.\n",
826
+ "WARMUP_PROPORTION = 0.1\n",
827
+ "# Model configs\n",
828
+ "SAVE_CHECKPOINTS_STEPS = 500\n",
829
+ "SAVE_SUMMARY_STEPS = 100"
830
+ ],
831
+ "execution_count": 0,
832
+ "outputs": []
833
+ },
834
+ {
835
+ "metadata": {
836
+ "id": "emHf9GhfWBZ_",
837
+ "colab_type": "code",
838
+ "colab": {}
839
+ },
840
+ "cell_type": "code",
841
+ "source": [
842
+ "# Compute # train and warmup steps from batch size\n",
843
+ "num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\n",
844
+ "num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)"
845
+ ],
846
+ "execution_count": 0,
847
+ "outputs": []
848
+ },
849
+ {
850
+ "metadata": {
851
+ "id": "oEJldMr3WYZa",
852
+ "colab_type": "code",
853
+ "colab": {}
854
+ },
855
+ "cell_type": "code",
856
+ "source": [
857
+ "# Specify outpit directory and number of checkpoint steps to save\n",
858
+ "run_config = tf.estimator.RunConfig(\n",
859
+ " model_dir=OUTPUT_DIR,\n",
860
+ " save_summary_steps=SAVE_SUMMARY_STEPS,\n",
861
+ " save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)"
862
+ ],
863
+ "execution_count": 0,
864
+ "outputs": []
865
+ },
866
+ {
867
+ "metadata": {
868
+ "id": "q_WebpS1X97v",
869
+ "colab_type": "code",
870
+ "outputId": "1648932a-7391-49d3-8af7-52d514e226e8",
871
+ "colab": {
872
+ "base_uri": "https://localhost:8080/",
873
+ "height": 156
874
+ }
875
+ },
876
+ "cell_type": "code",
877
+ "source": [
878
+ "model_fn = model_fn_builder(\n",
879
+ " num_labels=len(label_list),\n",
880
+ " learning_rate=LEARNING_RATE,\n",
881
+ " num_train_steps=num_train_steps,\n",
882
+ " num_warmup_steps=num_warmup_steps)\n",
883
+ "\n",
884
+ "estimator = tf.estimator.Estimator(\n",
885
+ " model_fn=model_fn,\n",
886
+ " config=run_config,\n",
887
+ " params={\"batch_size\": BATCH_SIZE})\n"
888
+ ],
889
+ "execution_count": 55,
890
+ "outputs": [
891
+ {
892
+ "output_type": "stream",
893
+ "text": [
894
+ "INFO:tensorflow:Using config: {'_model_dir': 'gs://bert-tfhub/aclImdb_v1', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 500, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n",
895
+ "graph_options {\n",
896
+ " rewrite_options {\n",
897
+ " meta_optimizer_iterations: ONE\n",
898
+ " }\n",
899
+ "}\n",
900
+ ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fcedb507be0>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n"
901
+ ],
902
+ "name": "stdout"
903
+ }
904
+ ]
905
+ },
906
+ {
907
+ "metadata": {
908
+ "id": "NOO3RfG1DYLo",
909
+ "colab_type": "text"
910
+ },
911
+ "cell_type": "markdown",
912
+ "source": [
913
+ "Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with Tensorflow [Estimators](https://www.tensorflow.org/guide/estimators)."
914
+ ]
915
+ },
916
+ {
917
+ "metadata": {
918
+ "id": "1Pv2bAlOX_-K",
919
+ "colab_type": "code",
920
+ "colab": {}
921
+ },
922
+ "cell_type": "code",
923
+ "source": [
924
+ "# Create an input function for training. drop_remainder = True for using TPUs.\n",
925
+ "train_input_fn = bert.run_classifier.input_fn_builder(\n",
926
+ " features=train_features,\n",
927
+ " seq_length=MAX_SEQ_LENGTH,\n",
928
+ " is_training=True,\n",
929
+ " drop_remainder=False)"
930
+ ],
931
+ "execution_count": 0,
932
+ "outputs": []
933
+ },
934
+ {
935
+ "metadata": {
936
+ "id": "t6Nukby2EB6-",
937
+ "colab_type": "text"
938
+ },
939
+ "cell_type": "markdown",
940
+ "source": [
941
+ "Now we train our model! For me, using a Colab notebook running on Google's GPUs, my training time was about 14 minutes."
942
+ ]
943
+ },
944
+ {
945
+ "metadata": {
946
+ "id": "nucD4gluYJmK",
947
+ "colab_type": "code",
948
+ "outputId": "5d728e72-4631-42bf-c48d-3f51d4b968ce",
949
+ "colab": {
950
+ "base_uri": "https://localhost:8080/",
951
+ "height": 68
952
+ }
953
+ },
954
+ "cell_type": "code",
955
+ "source": [
956
+ "print(f'Beginning Training!')\n",
957
+ "current_time = datetime.now()\n",
958
+ "estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n",
959
+ "print(\"Training took time \", datetime.now() - current_time)"
960
+ ],
961
+ "execution_count": 57,
962
+ "outputs": [
963
+ {
964
+ "output_type": "stream",
965
+ "text": [
966
+ "Beginning Training!\n",
967
+ "INFO:tensorflow:Skipping training since max_steps has already saved.\n",
968
+ "Training took time 0:00:00.759709\n"
969
+ ],
970
+ "name": "stdout"
971
+ }
972
+ ]
973
+ },
974
+ {
975
+ "metadata": {
976
+ "id": "CmbLTVniARy3",
977
+ "colab_type": "text"
978
+ },
979
+ "cell_type": "markdown",
980
+ "source": [
981
+ "Now let's use our test data to see how well our model did:"
982
+ ]
983
+ },
984
+ {
985
+ "metadata": {
986
+ "id": "JIhejfpyJ8Bx",
987
+ "colab_type": "code",
988
+ "colab": {}
989
+ },
990
+ "cell_type": "code",
991
+ "source": [
992
+ "test_input_fn = run_classifier.input_fn_builder(\n",
993
+ " features=test_features,\n",
994
+ " seq_length=MAX_SEQ_LENGTH,\n",
995
+ " is_training=False,\n",
996
+ " drop_remainder=False)"
997
+ ],
998
+ "execution_count": 0,
999
+ "outputs": []
1000
+ },
1001
+ {
1002
+ "metadata": {
1003
+ "id": "PPVEXhNjYXC-",
1004
+ "colab_type": "code",
1005
+ "outputId": "dd5482cd-c558-465f-c854-ec11a0175316",
1006
+ "colab": {
1007
+ "base_uri": "https://localhost:8080/",
1008
+ "height": 445
1009
+ }
1010
+ },
1011
+ "cell_type": "code",
1012
+ "source": [
1013
+ "estimator.evaluate(input_fn=test_input_fn, steps=None)"
1014
+ ],
1015
+ "execution_count": 59,
1016
+ "outputs": [
1017
+ {
1018
+ "output_type": "stream",
1019
+ "text": [
1020
+ "INFO:tensorflow:Calling model_fn.\n",
1021
+ "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n"
1022
+ ],
1023
+ "name": "stdout"
1024
+ },
1025
+ {
1026
+ "output_type": "stream",
1027
+ "text": [
1028
+ "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py:110: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
1029
+ " \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n"
1030
+ ],
1031
+ "name": "stderr"
1032
+ },
1033
+ {
1034
+ "output_type": "stream",
1035
+ "text": [
1036
+ "INFO:tensorflow:Done calling model_fn.\n",
1037
+ "INFO:tensorflow:Starting evaluation at 2019-02-12T21:04:20Z\n",
1038
+ "INFO:tensorflow:Graph was finalized.\n",
1039
+ "INFO:tensorflow:Restoring parameters from gs://bert-tfhub/aclImdb_v1/model.ckpt-468\n",
1040
+ "INFO:tensorflow:Running local_init_op.\n",
1041
+ "INFO:tensorflow:Done running local_init_op.\n",
1042
+ "INFO:tensorflow:Finished evaluation at 2019-02-12-21:06:05\n",
1043
+ "INFO:tensorflow:Saving dict for global step 468: auc = 0.86659324, eval_accuracy = 0.8664, f1_score = 0.8659711, false_negatives = 375.0, false_positives = 293.0, global_step = 468, loss = 0.51870537, precision = 0.880457, recall = 0.8519542, true_negatives = 2174.0, true_positives = 2158.0\n",
1044
+ "INFO:tensorflow:Saving 'checkpoint_path' summary for global step 468: gs://bert-tfhub/aclImdb_v1/model.ckpt-468\n"
1045
+ ],
1046
+ "name": "stdout"
1047
+ },
1048
+ {
1049
+ "output_type": "execute_result",
1050
+ "data": {
1051
+ "text/plain": [
1052
+ "{'auc': 0.86659324,\n",
1053
+ " 'eval_accuracy': 0.8664,\n",
1054
+ " 'f1_score': 0.8659711,\n",
1055
+ " 'false_negatives': 375.0,\n",
1056
+ " 'false_positives': 293.0,\n",
1057
+ " 'global_step': 468,\n",
1058
+ " 'loss': 0.51870537,\n",
1059
+ " 'precision': 0.880457,\n",
1060
+ " 'recall': 0.8519542,\n",
1061
+ " 'true_negatives': 2174.0,\n",
1062
+ " 'true_positives': 2158.0}"
1063
+ ]
1064
+ },
1065
+ "metadata": {
1066
+ "tags": []
1067
+ },
1068
+ "execution_count": 59
1069
+ }
1070
+ ]
1071
+ },
1072
+ {
1073
+ "metadata": {
1074
+ "id": "ueKsULteiz1B",
1075
+ "colab_type": "text"
1076
+ },
1077
+ "cell_type": "markdown",
1078
+ "source": [
1079
+ "Now let's write code to make predictions on new sentences:"
1080
+ ]
1081
+ },
1082
+ {
1083
+ "metadata": {
1084
+ "id": "OsrbTD2EJTVl",
1085
+ "colab_type": "code",
1086
+ "colab": {}
1087
+ },
1088
+ "cell_type": "code",
1089
+ "source": [
1090
+ "def getPrediction(in_sentences):\n",
1091
+ " labels = [\"Negative\", \"Positive\"]\n",
1092
+ " input_examples = [run_classifier.InputExample(guid=\"\", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, \"\" is just a dummy label\n",
1093
+ " input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n",
1094
+ " predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n",
1095
+ " predictions = estimator.predict(predict_input_fn)\n",
1096
+ " return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]"
1097
+ ],
1098
+ "execution_count": 0,
1099
+ "outputs": []
1100
+ },
1101
+ {
1102
+ "metadata": {
1103
+ "id": "-thbodgih_VJ",
1104
+ "colab_type": "code",
1105
+ "colab": {}
1106
+ },
1107
+ "cell_type": "code",
1108
+ "source": [
1109
+ "pred_sentences = [\n",
1110
+ " \"That movie was absolutely awful\",\n",
1111
+ " \"The acting was a bit lacking\",\n",
1112
+ " \"The film was creative and surprising\",\n",
1113
+ " \"Absolutely fantastic!\"\n",
1114
+ "]"
1115
+ ],
1116
+ "execution_count": 0,
1117
+ "outputs": []
1118
+ },
1119
+ {
1120
+ "metadata": {
1121
+ "id": "QrZmvZySKQTm",
1122
+ "colab_type": "code",
1123
+ "colab": {
1124
+ "base_uri": "https://localhost:8080/",
1125
+ "height": 649
1126
+ },
1127
+ "outputId": "3891fafb-a460-4eb8-fa6c-335a5bbc10e5"
1128
+ },
1129
+ "cell_type": "code",
1130
+ "source": [
1131
+ "predictions = getPrediction(pred_sentences)"
1132
+ ],
1133
+ "execution_count": 72,
1134
+ "outputs": [
1135
+ {
1136
+ "output_type": "stream",
1137
+ "text": [
1138
+ "INFO:tensorflow:Writing example 0 of 4\n",
1139
+ "INFO:tensorflow:*** Example ***\n",
1140
+ "INFO:tensorflow:guid: \n",
1141
+ "INFO:tensorflow:tokens: [CLS] that movie was absolutely awful [SEP]\n",
1142
+ "INFO:tensorflow:input_ids: 101 2008 3185 2001 7078 9643 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1143
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1144
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1145
+ "INFO:tensorflow:label: 0 (id = 0)\n",
1146
+ "INFO:tensorflow:*** Example ***\n",
1147
+ "INFO:tensorflow:guid: \n",
1148
+ "INFO:tensorflow:tokens: [CLS] the acting was a bit lacking [SEP]\n",
1149
+ "INFO:tensorflow:input_ids: 101 1996 3772 2001 1037 2978 11158 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1150
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1151
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1152
+ "INFO:tensorflow:label: 0 (id = 0)\n",
1153
+ "INFO:tensorflow:*** Example ***\n",
1154
+ "INFO:tensorflow:guid: \n",
1155
+ "INFO:tensorflow:tokens: [CLS] the film was creative and surprising [SEP]\n",
1156
+ "INFO:tensorflow:input_ids: 101 1996 2143 2001 5541 1998 11341 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1157
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1158
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1159
+ "INFO:tensorflow:label: 0 (id = 0)\n",
1160
+ "INFO:tensorflow:*** Example ***\n",
1161
+ "INFO:tensorflow:guid: \n",
1162
+ "INFO:tensorflow:tokens: [CLS] absolutely fantastic ! [SEP]\n",
1163
+ "INFO:tensorflow:input_ids: 101 7078 10392 999 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1164
+ "INFO:tensorflow:input_mask: 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1165
+ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
1166
+ "INFO:tensorflow:label: 0 (id = 0)\n",
1167
+ "INFO:tensorflow:Calling model_fn.\n",
1168
+ "INFO:tensorflow:Saver not created because there are no variables in the graph to restore\n",
1169
+ "INFO:tensorflow:Done calling model_fn.\n",
1170
+ "INFO:tensorflow:Graph was finalized.\n",
1171
+ "INFO:tensorflow:Restoring parameters from gs://bert-tfhub/aclImdb_v1/model.ckpt-468\n",
1172
+ "INFO:tensorflow:Running local_init_op.\n",
1173
+ "INFO:tensorflow:Done running local_init_op.\n"
1174
+ ],
1175
+ "name": "stdout"
1176
+ }
1177
+ ]
1178
+ },
1179
+ {
1180
+ "metadata": {
1181
+ "id": "MXkRiEBUqN3n",
1182
+ "colab_type": "text"
1183
+ },
1184
+ "cell_type": "markdown",
1185
+ "source": [
1186
+ "Voila! We have a sentiment classifier!"
1187
+ ]
1188
+ },
1189
+ {
1190
+ "metadata": {
1191
+ "id": "ERkTE8-7oQLZ",
1192
+ "colab_type": "code",
1193
+ "colab": {
1194
+ "base_uri": "https://localhost:8080/",
1195
+ "height": 221
1196
+ },
1197
+ "outputId": "26c33224-dc2c-4b3d-f7b4-ac3ef0a58b27"
1198
+ },
1199
+ "cell_type": "code",
1200
+ "source": [
1201
+ "predictions"
1202
+ ],
1203
+ "execution_count": 73,
1204
+ "outputs": [
1205
+ {
1206
+ "output_type": "execute_result",
1207
+ "data": {
1208
+ "text/plain": [
1209
+ "[('That movie was absolutely awful',\n",
1210
+ " array([-4.9142293e-03, -5.3180690e+00], dtype=float32),\n",
1211
+ " 'Negative'),\n",
1212
+ " ('The acting was a bit lacking',\n",
1213
+ " array([-0.03325794, -3.4200459 ], dtype=float32),\n",
1214
+ " 'Negative'),\n",
1215
+ " ('The film was creative and surprising',\n",
1216
+ " array([-5.3589125e+00, -4.7171740e-03], dtype=float32),\n",
1217
+ " 'Positive'),\n",
1218
+ " ('Absolutely fantastic!',\n",
1219
+ " array([-5.0434084 , -0.00647258], dtype=float32),\n",
1220
+ " 'Positive')]"
1221
+ ]
1222
+ },
1223
+ "metadata": {
1224
+ "tags": []
1225
+ },
1226
+ "execution_count": 73
1227
+ }
1228
+ ]
1229
+ }
1230
+ ]
1231
+ }
RIS-DMMI/bert/requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ tensorflow >= 1.11.0 # CPU Version of TensorFlow.
2
+ # tensorflow-gpu >= 1.11.0 # GPU version of TensorFlow.
RIS-DMMI/bert/run_classifier.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """BERT finetuning runner."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import collections
22
+ import csv
23
+ import os
24
+ import modeling
25
+ import optimization
26
+ import tokenization
27
+ import tensorflow as tf
28
+
29
+ flags = tf.flags
30
+
31
+ FLAGS = flags.FLAGS
32
+
33
+ ## Required parameters
34
+ flags.DEFINE_string(
35
+ "data_dir", None,
36
+ "The input data dir. Should contain the .tsv files (or other data files) "
37
+ "for the task.")
38
+
39
+ flags.DEFINE_string(
40
+ "bert_config_file", None,
41
+ "The config json file corresponding to the pre-trained BERT model. "
42
+ "This specifies the model architecture.")
43
+
44
+ flags.DEFINE_string("task_name", None, "The name of the task to train.")
45
+
46
+ flags.DEFINE_string("vocab_file", None,
47
+ "The vocabulary file that the BERT model was trained on.")
48
+
49
+ flags.DEFINE_string(
50
+ "output_dir", None,
51
+ "The output directory where the model checkpoints will be written.")
52
+
53
+ ## Other parameters
54
+
55
+ flags.DEFINE_string(
56
+ "init_checkpoint", None,
57
+ "Initial checkpoint (usually from a pre-trained BERT model).")
58
+
59
+ flags.DEFINE_bool(
60
+ "do_lower_case", True,
61
+ "Whether to lower case the input text. Should be True for uncased "
62
+ "models and False for cased models.")
63
+
64
+ flags.DEFINE_integer(
65
+ "max_seq_length", 128,
66
+ "The maximum total input sequence length after WordPiece tokenization. "
67
+ "Sequences longer than this will be truncated, and sequences shorter "
68
+ "than this will be padded.")
69
+
70
+ flags.DEFINE_bool("do_train", False, "Whether to run training.")
71
+
72
+ flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
73
+
74
+ flags.DEFINE_bool(
75
+ "do_predict", False,
76
+ "Whether to run the model in inference mode on the test set.")
77
+
78
+ flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
79
+
80
+ flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
81
+
82
+ flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
83
+
84
+ flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
85
+
86
+ flags.DEFINE_float("num_train_epochs", 3.0,
87
+ "Total number of training epochs to perform.")
88
+
89
+ flags.DEFINE_float(
90
+ "warmup_proportion", 0.1,
91
+ "Proportion of training to perform linear learning rate warmup for. "
92
+ "E.g., 0.1 = 10% of training.")
93
+
94
+ flags.DEFINE_integer("save_checkpoints_steps", 1000,
95
+ "How often to save the model checkpoint.")
96
+
97
+ flags.DEFINE_integer("iterations_per_loop", 1000,
98
+ "How many steps to make in each estimator call.")
99
+
100
+ flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
101
+
102
+ tf.flags.DEFINE_string(
103
+ "tpu_name", None,
104
+ "The Cloud TPU to use for training. This should be either the name "
105
+ "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
106
+ "url.")
107
+
108
+ tf.flags.DEFINE_string(
109
+ "tpu_zone", None,
110
+ "[Optional] GCE zone where the Cloud TPU is located in. If not "
111
+ "specified, we will attempt to automatically detect the GCE project from "
112
+ "metadata.")
113
+
114
+ tf.flags.DEFINE_string(
115
+ "gcp_project", None,
116
+ "[Optional] Project name for the Cloud TPU-enabled project. If not "
117
+ "specified, we will attempt to automatically detect the GCE project from "
118
+ "metadata.")
119
+
120
+ tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
121
+
122
+ flags.DEFINE_integer(
123
+ "num_tpu_cores", 8,
124
+ "Only used if `use_tpu` is True. Total number of TPU cores to use.")
125
+
126
+
127
+ class InputExample(object):
128
+ """A single training/test example for simple sequence classification."""
129
+
130
+ def __init__(self, guid, text_a, text_b=None, label=None):
131
+ """Constructs a InputExample.
132
+
133
+ Args:
134
+ guid: Unique id for the example.
135
+ text_a: string. The untokenized text of the first sequence. For single
136
+ sequence tasks, only this sequence must be specified.
137
+ text_b: (Optional) string. The untokenized text of the second sequence.
138
+ Only must be specified for sequence pair tasks.
139
+ label: (Optional) string. The label of the example. This should be
140
+ specified for train and dev examples, but not for test examples.
141
+ """
142
+ self.guid = guid
143
+ self.text_a = text_a
144
+ self.text_b = text_b
145
+ self.label = label
146
+
147
+
148
+ class PaddingInputExample(object):
149
+ """Fake example so the num input examples is a multiple of the batch size.
150
+
151
+ When running eval/predict on the TPU, we need to pad the number of examples
152
+ to be a multiple of the batch size, because the TPU requires a fixed batch
153
+ size. The alternative is to drop the last batch, which is bad because it means
154
+ the entire output data won't be generated.
155
+
156
+ We use this class instead of `None` because treating `None` as padding
157
+ battches could cause silent errors.
158
+ """
159
+
160
+
161
+ class InputFeatures(object):
162
+ """A single set of features of data."""
163
+
164
+ def __init__(self,
165
+ input_ids,
166
+ input_mask,
167
+ segment_ids,
168
+ label_id,
169
+ is_real_example=True):
170
+ self.input_ids = input_ids
171
+ self.input_mask = input_mask
172
+ self.segment_ids = segment_ids
173
+ self.label_id = label_id
174
+ self.is_real_example = is_real_example
175
+
176
+
177
+ class DataProcessor(object):
178
+ """Base class for data converters for sequence classification data sets."""
179
+
180
+ def get_train_examples(self, data_dir):
181
+ """Gets a collection of `InputExample`s for the train set."""
182
+ raise NotImplementedError()
183
+
184
+ def get_dev_examples(self, data_dir):
185
+ """Gets a collection of `InputExample`s for the dev set."""
186
+ raise NotImplementedError()
187
+
188
+ def get_test_examples(self, data_dir):
189
+ """Gets a collection of `InputExample`s for prediction."""
190
+ raise NotImplementedError()
191
+
192
+ def get_labels(self):
193
+ """Gets the list of labels for this data set."""
194
+ raise NotImplementedError()
195
+
196
+ @classmethod
197
+ def _read_tsv(cls, input_file, quotechar=None):
198
+ """Reads a tab separated value file."""
199
+ with tf.gfile.Open(input_file, "r") as f:
200
+ reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
201
+ lines = []
202
+ for line in reader:
203
+ lines.append(line)
204
+ return lines
205
+
206
+
207
+ class XnliProcessor(DataProcessor):
208
+ """Processor for the XNLI data set."""
209
+
210
+ def __init__(self):
211
+ self.language = "zh"
212
+
213
+ def get_train_examples(self, data_dir):
214
+ """See base class."""
215
+ lines = self._read_tsv(
216
+ os.path.join(data_dir, "multinli",
217
+ "multinli.train.%s.tsv" % self.language))
218
+ examples = []
219
+ for (i, line) in enumerate(lines):
220
+ if i == 0:
221
+ continue
222
+ guid = "train-%d" % (i)
223
+ text_a = tokenization.convert_to_unicode(line[0])
224
+ text_b = tokenization.convert_to_unicode(line[1])
225
+ label = tokenization.convert_to_unicode(line[2])
226
+ if label == tokenization.convert_to_unicode("contradictory"):
227
+ label = tokenization.convert_to_unicode("contradiction")
228
+ examples.append(
229
+ InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
230
+ return examples
231
+
232
+ def get_dev_examples(self, data_dir):
233
+ """See base class."""
234
+ lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
235
+ examples = []
236
+ for (i, line) in enumerate(lines):
237
+ if i == 0:
238
+ continue
239
+ guid = "dev-%d" % (i)
240
+ language = tokenization.convert_to_unicode(line[0])
241
+ if language != tokenization.convert_to_unicode(self.language):
242
+ continue
243
+ text_a = tokenization.convert_to_unicode(line[6])
244
+ text_b = tokenization.convert_to_unicode(line[7])
245
+ label = tokenization.convert_to_unicode(line[1])
246
+ examples.append(
247
+ InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
248
+ return examples
249
+
250
+ def get_labels(self):
251
+ """See base class."""
252
+ return ["contradiction", "entailment", "neutral"]
253
+
254
+
255
+ class MnliProcessor(DataProcessor):
256
+ """Processor for the MultiNLI data set (GLUE version)."""
257
+
258
+ def get_train_examples(self, data_dir):
259
+ """See base class."""
260
+ return self._create_examples(
261
+ self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
262
+
263
+ def get_dev_examples(self, data_dir):
264
+ """See base class."""
265
+ return self._create_examples(
266
+ self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
267
+ "dev_matched")
268
+
269
+ def get_test_examples(self, data_dir):
270
+ """See base class."""
271
+ return self._create_examples(
272
+ self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
273
+
274
+ def get_labels(self):
275
+ """See base class."""
276
+ return ["contradiction", "entailment", "neutral"]
277
+
278
+ def _create_examples(self, lines, set_type):
279
+ """Creates examples for the training and dev sets."""
280
+ examples = []
281
+ for (i, line) in enumerate(lines):
282
+ if i == 0:
283
+ continue
284
+ guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
285
+ text_a = tokenization.convert_to_unicode(line[8])
286
+ text_b = tokenization.convert_to_unicode(line[9])
287
+ if set_type == "test":
288
+ label = "contradiction"
289
+ else:
290
+ label = tokenization.convert_to_unicode(line[-1])
291
+ examples.append(
292
+ InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
293
+ return examples
294
+
295
+
296
+ class MrpcProcessor(DataProcessor):
297
+ """Processor for the MRPC data set (GLUE version)."""
298
+
299
+ def get_train_examples(self, data_dir):
300
+ """See base class."""
301
+ return self._create_examples(
302
+ self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
303
+
304
+ def get_dev_examples(self, data_dir):
305
+ """See base class."""
306
+ return self._create_examples(
307
+ self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
308
+
309
+ def get_test_examples(self, data_dir):
310
+ """See base class."""
311
+ return self._create_examples(
312
+ self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
313
+
314
+ def get_labels(self):
315
+ """See base class."""
316
+ return ["0", "1"]
317
+
318
+ def _create_examples(self, lines, set_type):
319
+ """Creates examples for the training and dev sets."""
320
+ examples = []
321
+ for (i, line) in enumerate(lines):
322
+ if i == 0:
323
+ continue
324
+ guid = "%s-%s" % (set_type, i)
325
+ text_a = tokenization.convert_to_unicode(line[3])
326
+ text_b = tokenization.convert_to_unicode(line[4])
327
+ if set_type == "test":
328
+ label = "0"
329
+ else:
330
+ label = tokenization.convert_to_unicode(line[0])
331
+ examples.append(
332
+ InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
333
+ return examples
334
+
335
+
336
+ class ColaProcessor(DataProcessor):
337
+ """Processor for the CoLA data set (GLUE version)."""
338
+
339
+ def get_train_examples(self, data_dir):
340
+ """See base class."""
341
+ return self._create_examples(
342
+ self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
343
+
344
+ def get_dev_examples(self, data_dir):
345
+ """See base class."""
346
+ return self._create_examples(
347
+ self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348
+
349
+ def get_test_examples(self, data_dir):
350
+ """See base class."""
351
+ return self._create_examples(
352
+ self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
353
+
354
+ def get_labels(self):
355
+ """See base class."""
356
+ return ["0", "1"]
357
+
358
+ def _create_examples(self, lines, set_type):
359
+ """Creates examples for the training and dev sets."""
360
+ examples = []
361
+ for (i, line) in enumerate(lines):
362
+ # Only the test set has a header
363
+ if set_type == "test" and i == 0:
364
+ continue
365
+ guid = "%s-%s" % (set_type, i)
366
+ if set_type == "test":
367
+ text_a = tokenization.convert_to_unicode(line[1])
368
+ label = "0"
369
+ else:
370
+ text_a = tokenization.convert_to_unicode(line[3])
371
+ label = tokenization.convert_to_unicode(line[1])
372
+ examples.append(
373
+ InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
374
+ return examples
375
+
376
+
377
+ def convert_single_example(ex_index, example, label_list, max_seq_length,
378
+ tokenizer):
379
+ """Converts a single `InputExample` into a single `InputFeatures`."""
380
+
381
+ if isinstance(example, PaddingInputExample):
382
+ return InputFeatures(
383
+ input_ids=[0] * max_seq_length,
384
+ input_mask=[0] * max_seq_length,
385
+ segment_ids=[0] * max_seq_length,
386
+ label_id=0,
387
+ is_real_example=False)
388
+
389
+ label_map = {}
390
+ for (i, label) in enumerate(label_list):
391
+ label_map[label] = i
392
+
393
+ tokens_a = tokenizer.tokenize(example.text_a)
394
+ tokens_b = None
395
+ if example.text_b:
396
+ tokens_b = tokenizer.tokenize(example.text_b)
397
+
398
+ if tokens_b:
399
+ # Modifies `tokens_a` and `tokens_b` in place so that the total
400
+ # length is less than the specified length.
401
+ # Account for [CLS], [SEP], [SEP] with "- 3"
402
+ _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
403
+ else:
404
+ # Account for [CLS] and [SEP] with "- 2"
405
+ if len(tokens_a) > max_seq_length - 2:
406
+ tokens_a = tokens_a[0:(max_seq_length - 2)]
407
+
408
+ # The convention in BERT is:
409
+ # (a) For sequence pairs:
410
+ # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
411
+ # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
412
+ # (b) For single sequences:
413
+ # tokens: [CLS] the dog is hairy . [SEP]
414
+ # type_ids: 0 0 0 0 0 0 0
415
+ #
416
+ # Where "type_ids" are used to indicate whether this is the first
417
+ # sequence or the second sequence. The embedding vectors for `type=0` and
418
+ # `type=1` were learned during pre-training and are added to the wordpiece
419
+ # embedding vector (and position vector). This is not *strictly* necessary
420
+ # since the [SEP] token unambiguously separates the sequences, but it makes
421
+ # it easier for the model to learn the concept of sequences.
422
+ #
423
+ # For classification tasks, the first vector (corresponding to [CLS]) is
424
+ # used as the "sentence vector". Note that this only makes sense because
425
+ # the entire model is fine-tuned.
426
+ tokens = []
427
+ segment_ids = []
428
+ tokens.append("[CLS]")
429
+ segment_ids.append(0)
430
+ for token in tokens_a:
431
+ tokens.append(token)
432
+ segment_ids.append(0)
433
+ tokens.append("[SEP]")
434
+ segment_ids.append(0)
435
+
436
+ if tokens_b:
437
+ for token in tokens_b:
438
+ tokens.append(token)
439
+ segment_ids.append(1)
440
+ tokens.append("[SEP]")
441
+ segment_ids.append(1)
442
+
443
+ input_ids = tokenizer.convert_tokens_to_ids(tokens)
444
+
445
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
446
+ # tokens are attended to.
447
+ input_mask = [1] * len(input_ids)
448
+
449
+ # Zero-pad up to the sequence length.
450
+ while len(input_ids) < max_seq_length:
451
+ input_ids.append(0)
452
+ input_mask.append(0)
453
+ segment_ids.append(0)
454
+
455
+ assert len(input_ids) == max_seq_length
456
+ assert len(input_mask) == max_seq_length
457
+ assert len(segment_ids) == max_seq_length
458
+
459
+ label_id = label_map[example.label]
460
+ if ex_index < 5:
461
+ tf.logging.info("*** Example ***")
462
+ tf.logging.info("guid: %s" % (example.guid))
463
+ tf.logging.info("tokens: %s" % " ".join(
464
+ [tokenization.printable_text(x) for x in tokens]))
465
+ tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
466
+ tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
467
+ tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
468
+ tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
469
+
470
+ feature = InputFeatures(
471
+ input_ids=input_ids,
472
+ input_mask=input_mask,
473
+ segment_ids=segment_ids,
474
+ label_id=label_id,
475
+ is_real_example=True)
476
+ return feature
477
+
478
+
479
+ def file_based_convert_examples_to_features(
480
+ examples, label_list, max_seq_length, tokenizer, output_file):
481
+ """Convert a set of `InputExample`s to a TFRecord file."""
482
+
483
+ writer = tf.python_io.TFRecordWriter(output_file)
484
+
485
+ for (ex_index, example) in enumerate(examples):
486
+ if ex_index % 10000 == 0:
487
+ tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
488
+
489
+ feature = convert_single_example(ex_index, example, label_list,
490
+ max_seq_length, tokenizer)
491
+
492
+ def create_int_feature(values):
493
+ f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
494
+ return f
495
+
496
+ features = collections.OrderedDict()
497
+ features["input_ids"] = create_int_feature(feature.input_ids)
498
+ features["input_mask"] = create_int_feature(feature.input_mask)
499
+ features["segment_ids"] = create_int_feature(feature.segment_ids)
500
+ features["label_ids"] = create_int_feature([feature.label_id])
501
+ features["is_real_example"] = create_int_feature(
502
+ [int(feature.is_real_example)])
503
+
504
+ tf_example = tf.train.Example(features=tf.train.Features(feature=features))
505
+ writer.write(tf_example.SerializeToString())
506
+ writer.close()
507
+
508
+
509
+ def file_based_input_fn_builder(input_file, seq_length, is_training,
510
+ drop_remainder):
511
+ """Creates an `input_fn` closure to be passed to TPUEstimator."""
512
+
513
+ name_to_features = {
514
+ "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
515
+ "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
516
+ "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
517
+ "label_ids": tf.FixedLenFeature([], tf.int64),
518
+ "is_real_example": tf.FixedLenFeature([], tf.int64),
519
+ }
520
+
521
+ def _decode_record(record, name_to_features):
522
+ """Decodes a record to a TensorFlow example."""
523
+ example = tf.parse_single_example(record, name_to_features)
524
+
525
+ # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
526
+ # So cast all int64 to int32.
527
+ for name in list(example.keys()):
528
+ t = example[name]
529
+ if t.dtype == tf.int64:
530
+ t = tf.to_int32(t)
531
+ example[name] = t
532
+
533
+ return example
534
+
535
+ def input_fn(params):
536
+ """The actual input function."""
537
+ batch_size = params["batch_size"]
538
+
539
+ # For training, we want a lot of parallel reading and shuffling.
540
+ # For eval, we want no shuffling and parallel reading doesn't matter.
541
+ d = tf.data.TFRecordDataset(input_file)
542
+ if is_training:
543
+ d = d.repeat()
544
+ d = d.shuffle(buffer_size=100)
545
+
546
+ d = d.apply(
547
+ tf.contrib.data.map_and_batch(
548
+ lambda record: _decode_record(record, name_to_features),
549
+ batch_size=batch_size,
550
+ drop_remainder=drop_remainder))
551
+
552
+ return d
553
+
554
+ return input_fn
555
+
556
+
557
+ def _truncate_seq_pair(tokens_a, tokens_b, max_length):
558
+ """Truncates a sequence pair in place to the maximum length."""
559
+
560
+ # This is a simple heuristic which will always truncate the longer sequence
561
+ # one token at a time. This makes more sense than truncating an equal percent
562
+ # of tokens from each, since if one sequence is very short then each token
563
+ # that's truncated likely contains more information than a longer sequence.
564
+ while True:
565
+ total_length = len(tokens_a) + len(tokens_b)
566
+ if total_length <= max_length:
567
+ break
568
+ if len(tokens_a) > len(tokens_b):
569
+ tokens_a.pop()
570
+ else:
571
+ tokens_b.pop()
572
+
573
+
574
+ def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
575
+ labels, num_labels, use_one_hot_embeddings):
576
+ """Creates a classification model."""
577
+ model = modeling.BertModel(
578
+ config=bert_config,
579
+ is_training=is_training,
580
+ input_ids=input_ids,
581
+ input_mask=input_mask,
582
+ token_type_ids=segment_ids,
583
+ use_one_hot_embeddings=use_one_hot_embeddings)
584
+
585
+ # In the demo, we are doing a simple classification task on the entire
586
+ # segment.
587
+ #
588
+ # If you want to use the token-level output, use model.get_sequence_output()
589
+ # instead.
590
+ output_layer = model.get_pooled_output()
591
+
592
+ hidden_size = output_layer.shape[-1].value
593
+
594
+ output_weights = tf.get_variable(
595
+ "output_weights", [num_labels, hidden_size],
596
+ initializer=tf.truncated_normal_initializer(stddev=0.02))
597
+
598
+ output_bias = tf.get_variable(
599
+ "output_bias", [num_labels], initializer=tf.zeros_initializer())
600
+
601
+ with tf.variable_scope("loss"):
602
+ if is_training:
603
+ # I.e., 0.1 dropout
604
+ output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
605
+
606
+ logits = tf.matmul(output_layer, output_weights, transpose_b=True)
607
+ logits = tf.nn.bias_add(logits, output_bias)
608
+ probabilities = tf.nn.softmax(logits, axis=-1)
609
+ log_probs = tf.nn.log_softmax(logits, axis=-1)
610
+
611
+ one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
612
+
613
+ per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
614
+ loss = tf.reduce_mean(per_example_loss)
615
+
616
+ return (loss, per_example_loss, logits, probabilities)
617
+
618
+
619
+ def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
620
+ num_train_steps, num_warmup_steps, use_tpu,
621
+ use_one_hot_embeddings):
622
+ """Returns `model_fn` closure for TPUEstimator."""
623
+
624
+ def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
625
+ """The `model_fn` for TPUEstimator."""
626
+
627
+ tf.logging.info("*** Features ***")
628
+ for name in sorted(features.keys()):
629
+ tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
630
+
631
+ input_ids = features["input_ids"]
632
+ input_mask = features["input_mask"]
633
+ segment_ids = features["segment_ids"]
634
+ label_ids = features["label_ids"]
635
+ is_real_example = None
636
+ if "is_real_example" in features:
637
+ is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
638
+ else:
639
+ is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
640
+
641
+ is_training = (mode == tf.estimator.ModeKeys.TRAIN)
642
+
643
+ (total_loss, per_example_loss, logits, probabilities) = create_model(
644
+ bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
645
+ num_labels, use_one_hot_embeddings)
646
+
647
+ tvars = tf.trainable_variables()
648
+ initialized_variable_names = {}
649
+ scaffold_fn = None
650
+ if init_checkpoint:
651
+ (assignment_map, initialized_variable_names
652
+ ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
653
+ if use_tpu:
654
+
655
+ def tpu_scaffold():
656
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
657
+ return tf.train.Scaffold()
658
+
659
+ scaffold_fn = tpu_scaffold
660
+ else:
661
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
662
+
663
+ tf.logging.info("**** Trainable Variables ****")
664
+ for var in tvars:
665
+ init_string = ""
666
+ if var.name in initialized_variable_names:
667
+ init_string = ", *INIT_FROM_CKPT*"
668
+ tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
669
+ init_string)
670
+
671
+ output_spec = None
672
+ if mode == tf.estimator.ModeKeys.TRAIN:
673
+
674
+ train_op = optimization.create_optimizer(
675
+ total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
676
+
677
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
678
+ mode=mode,
679
+ loss=total_loss,
680
+ train_op=train_op,
681
+ scaffold_fn=scaffold_fn)
682
+ elif mode == tf.estimator.ModeKeys.EVAL:
683
+
684
+ def metric_fn(per_example_loss, label_ids, logits, is_real_example):
685
+ predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
686
+ accuracy = tf.metrics.accuracy(
687
+ labels=label_ids, predictions=predictions, weights=is_real_example)
688
+ loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
689
+ return {
690
+ "eval_accuracy": accuracy,
691
+ "eval_loss": loss,
692
+ }
693
+
694
+ eval_metrics = (metric_fn,
695
+ [per_example_loss, label_ids, logits, is_real_example])
696
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
697
+ mode=mode,
698
+ loss=total_loss,
699
+ eval_metrics=eval_metrics,
700
+ scaffold_fn=scaffold_fn)
701
+ else:
702
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
703
+ mode=mode,
704
+ predictions={"probabilities": probabilities},
705
+ scaffold_fn=scaffold_fn)
706
+ return output_spec
707
+
708
+ return model_fn
709
+
710
+
711
+ # This function is not used by this file but is still used by the Colab and
712
+ # people who depend on it.
713
+ def input_fn_builder(features, seq_length, is_training, drop_remainder):
714
+ """Creates an `input_fn` closure to be passed to TPUEstimator."""
715
+
716
+ all_input_ids = []
717
+ all_input_mask = []
718
+ all_segment_ids = []
719
+ all_label_ids = []
720
+
721
+ for feature in features:
722
+ all_input_ids.append(feature.input_ids)
723
+ all_input_mask.append(feature.input_mask)
724
+ all_segment_ids.append(feature.segment_ids)
725
+ all_label_ids.append(feature.label_id)
726
+
727
+ def input_fn(params):
728
+ """The actual input function."""
729
+ batch_size = params["batch_size"]
730
+
731
+ num_examples = len(features)
732
+
733
+ # This is for demo purposes and does NOT scale to large data sets. We do
734
+ # not use Dataset.from_generator() because that uses tf.py_func which is
735
+ # not TPU compatible. The right way to load data is with TFRecordReader.
736
+ d = tf.data.Dataset.from_tensor_slices({
737
+ "input_ids":
738
+ tf.constant(
739
+ all_input_ids, shape=[num_examples, seq_length],
740
+ dtype=tf.int32),
741
+ "input_mask":
742
+ tf.constant(
743
+ all_input_mask,
744
+ shape=[num_examples, seq_length],
745
+ dtype=tf.int32),
746
+ "segment_ids":
747
+ tf.constant(
748
+ all_segment_ids,
749
+ shape=[num_examples, seq_length],
750
+ dtype=tf.int32),
751
+ "label_ids":
752
+ tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
753
+ })
754
+
755
+ if is_training:
756
+ d = d.repeat()
757
+ d = d.shuffle(buffer_size=100)
758
+
759
+ d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
760
+ return d
761
+
762
+ return input_fn
763
+
764
+
765
+ # This function is not used by this file but is still used by the Colab and
766
+ # people who depend on it.
767
+ def convert_examples_to_features(examples, label_list, max_seq_length,
768
+ tokenizer):
769
+ """Convert a set of `InputExample`s to a list of `InputFeatures`."""
770
+
771
+ features = []
772
+ for (ex_index, example) in enumerate(examples):
773
+ if ex_index % 10000 == 0:
774
+ tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
775
+
776
+ feature = convert_single_example(ex_index, example, label_list,
777
+ max_seq_length, tokenizer)
778
+
779
+ features.append(feature)
780
+ return features
781
+
782
+
783
+ def main(_):
784
+ tf.logging.set_verbosity(tf.logging.INFO)
785
+
786
+ processors = {
787
+ "cola": ColaProcessor,
788
+ "mnli": MnliProcessor,
789
+ "mrpc": MrpcProcessor,
790
+ "xnli": XnliProcessor,
791
+ }
792
+
793
+ tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
794
+ FLAGS.init_checkpoint)
795
+
796
+ if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
797
+ raise ValueError(
798
+ "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
799
+
800
+ bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
801
+
802
+ if FLAGS.max_seq_length > bert_config.max_position_embeddings:
803
+ raise ValueError(
804
+ "Cannot use sequence length %d because the BERT model "
805
+ "was only trained up to sequence length %d" %
806
+ (FLAGS.max_seq_length, bert_config.max_position_embeddings))
807
+
808
+ tf.gfile.MakeDirs(FLAGS.output_dir)
809
+
810
+ task_name = FLAGS.task_name.lower()
811
+
812
+ if task_name not in processors:
813
+ raise ValueError("Task not found: %s" % (task_name))
814
+
815
+ processor = processors[task_name]()
816
+
817
+ label_list = processor.get_labels()
818
+
819
+ tokenizer = tokenization.FullTokenizer(
820
+ vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
821
+
822
+ tpu_cluster_resolver = None
823
+ if FLAGS.use_tpu and FLAGS.tpu_name:
824
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
825
+ FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
826
+
827
+ is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
828
+ run_config = tf.contrib.tpu.RunConfig(
829
+ cluster=tpu_cluster_resolver,
830
+ master=FLAGS.master,
831
+ model_dir=FLAGS.output_dir,
832
+ save_checkpoints_steps=FLAGS.save_checkpoints_steps,
833
+ tpu_config=tf.contrib.tpu.TPUConfig(
834
+ iterations_per_loop=FLAGS.iterations_per_loop,
835
+ num_shards=FLAGS.num_tpu_cores,
836
+ per_host_input_for_training=is_per_host))
837
+
838
+ train_examples = None
839
+ num_train_steps = None
840
+ num_warmup_steps = None
841
+ if FLAGS.do_train:
842
+ train_examples = processor.get_train_examples(FLAGS.data_dir)
843
+ num_train_steps = int(
844
+ len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
845
+ num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
846
+
847
+ model_fn = model_fn_builder(
848
+ bert_config=bert_config,
849
+ num_labels=len(label_list),
850
+ init_checkpoint=FLAGS.init_checkpoint,
851
+ learning_rate=FLAGS.learning_rate,
852
+ num_train_steps=num_train_steps,
853
+ num_warmup_steps=num_warmup_steps,
854
+ use_tpu=FLAGS.use_tpu,
855
+ use_one_hot_embeddings=FLAGS.use_tpu)
856
+
857
+ # If TPU is not available, this will fall back to normal Estimator on CPU
858
+ # or GPU.
859
+ estimator = tf.contrib.tpu.TPUEstimator(
860
+ use_tpu=FLAGS.use_tpu,
861
+ model_fn=model_fn,
862
+ config=run_config,
863
+ train_batch_size=FLAGS.train_batch_size,
864
+ eval_batch_size=FLAGS.eval_batch_size,
865
+ predict_batch_size=FLAGS.predict_batch_size)
866
+
867
+ if FLAGS.do_train:
868
+ train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
869
+ file_based_convert_examples_to_features(
870
+ train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
871
+ tf.logging.info("***** Running training *****")
872
+ tf.logging.info(" Num examples = %d", len(train_examples))
873
+ tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
874
+ tf.logging.info(" Num steps = %d", num_train_steps)
875
+ train_input_fn = file_based_input_fn_builder(
876
+ input_file=train_file,
877
+ seq_length=FLAGS.max_seq_length,
878
+ is_training=True,
879
+ drop_remainder=True)
880
+ estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
881
+
882
+ if FLAGS.do_eval:
883
+ eval_examples = processor.get_dev_examples(FLAGS.data_dir)
884
+ num_actual_eval_examples = len(eval_examples)
885
+ if FLAGS.use_tpu:
886
+ # TPU requires a fixed batch size for all batches, therefore the number
887
+ # of examples must be a multiple of the batch size, or else examples
888
+ # will get dropped. So we pad with fake examples which are ignored
889
+ # later on. These do NOT count towards the metric (all tf.metrics
890
+ # support a per-instance weight, and these get a weight of 0.0).
891
+ while len(eval_examples) % FLAGS.eval_batch_size != 0:
892
+ eval_examples.append(PaddingInputExample())
893
+
894
+ eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
895
+ file_based_convert_examples_to_features(
896
+ eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
897
+
898
+ tf.logging.info("***** Running evaluation *****")
899
+ tf.logging.info(" Num examples = %d (%d actual, %d padding)",
900
+ len(eval_examples), num_actual_eval_examples,
901
+ len(eval_examples) - num_actual_eval_examples)
902
+ tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
903
+
904
+ # This tells the estimator to run through the entire set.
905
+ eval_steps = None
906
+ # However, if running eval on the TPU, you will need to specify the
907
+ # number of steps.
908
+ if FLAGS.use_tpu:
909
+ assert len(eval_examples) % FLAGS.eval_batch_size == 0
910
+ eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
911
+
912
+ eval_drop_remainder = True if FLAGS.use_tpu else False
913
+ eval_input_fn = file_based_input_fn_builder(
914
+ input_file=eval_file,
915
+ seq_length=FLAGS.max_seq_length,
916
+ is_training=False,
917
+ drop_remainder=eval_drop_remainder)
918
+
919
+ result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
920
+
921
+ output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
922
+ with tf.gfile.GFile(output_eval_file, "w") as writer:
923
+ tf.logging.info("***** Eval results *****")
924
+ for key in sorted(result.keys()):
925
+ tf.logging.info(" %s = %s", key, str(result[key]))
926
+ writer.write("%s = %s\n" % (key, str(result[key])))
927
+
928
+ if FLAGS.do_predict:
929
+ predict_examples = processor.get_test_examples(FLAGS.data_dir)
930
+ num_actual_predict_examples = len(predict_examples)
931
+ if FLAGS.use_tpu:
932
+ # TPU requires a fixed batch size for all batches, therefore the number
933
+ # of examples must be a multiple of the batch size, or else examples
934
+ # will get dropped. So we pad with fake examples which are ignored
935
+ # later on.
936
+ while len(predict_examples) % FLAGS.predict_batch_size != 0:
937
+ predict_examples.append(PaddingInputExample())
938
+
939
+ predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
940
+ file_based_convert_examples_to_features(predict_examples, label_list,
941
+ FLAGS.max_seq_length, tokenizer,
942
+ predict_file)
943
+
944
+ tf.logging.info("***** Running prediction*****")
945
+ tf.logging.info(" Num examples = %d (%d actual, %d padding)",
946
+ len(predict_examples), num_actual_predict_examples,
947
+ len(predict_examples) - num_actual_predict_examples)
948
+ tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
949
+
950
+ predict_drop_remainder = True if FLAGS.use_tpu else False
951
+ predict_input_fn = file_based_input_fn_builder(
952
+ input_file=predict_file,
953
+ seq_length=FLAGS.max_seq_length,
954
+ is_training=False,
955
+ drop_remainder=predict_drop_remainder)
956
+
957
+ result = estimator.predict(input_fn=predict_input_fn)
958
+
959
+ output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
960
+ with tf.gfile.GFile(output_predict_file, "w") as writer:
961
+ num_written_lines = 0
962
+ tf.logging.info("***** Predict results *****")
963
+ for (i, prediction) in enumerate(result):
964
+ probabilities = prediction["probabilities"]
965
+ if i >= num_actual_predict_examples:
966
+ break
967
+ output_line = "\t".join(
968
+ str(class_probability)
969
+ for class_probability in probabilities) + "\n"
970
+ writer.write(output_line)
971
+ num_written_lines += 1
972
+ assert num_written_lines == num_actual_predict_examples
973
+
974
+
975
+ if __name__ == "__main__":
976
+ flags.mark_flag_as_required("data_dir")
977
+ flags.mark_flag_as_required("task_name")
978
+ flags.mark_flag_as_required("vocab_file")
979
+ flags.mark_flag_as_required("bert_config_file")
980
+ flags.mark_flag_as_required("output_dir")
981
+ tf.app.run()
RIS-DMMI/bert/run_classifier_with_tfhub.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """BERT finetuning runner with TF-Hub."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import os
22
+ import optimization
23
+ import run_classifier
24
+ import tokenization
25
+ import tensorflow as tf
26
+ import tensorflow_hub as hub
27
+
28
+ flags = tf.flags
29
+
30
+ FLAGS = flags.FLAGS
31
+
32
+ flags.DEFINE_string(
33
+ "bert_hub_module_handle", None,
34
+ "Handle for the BERT TF-Hub module.")
35
+
36
+
37
+ def create_model(is_training, input_ids, input_mask, segment_ids, labels,
38
+ num_labels, bert_hub_module_handle):
39
+ """Creates a classification model."""
40
+ tags = set()
41
+ if is_training:
42
+ tags.add("train")
43
+ bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)
44
+ bert_inputs = dict(
45
+ input_ids=input_ids,
46
+ input_mask=input_mask,
47
+ segment_ids=segment_ids)
48
+ bert_outputs = bert_module(
49
+ inputs=bert_inputs,
50
+ signature="tokens",
51
+ as_dict=True)
52
+
53
+ # In the demo, we are doing a simple classification task on the entire
54
+ # segment.
55
+ #
56
+ # If you want to use the token-level output, use
57
+ # bert_outputs["sequence_output"] instead.
58
+ output_layer = bert_outputs["pooled_output"]
59
+
60
+ hidden_size = output_layer.shape[-1].value
61
+
62
+ output_weights = tf.get_variable(
63
+ "output_weights", [num_labels, hidden_size],
64
+ initializer=tf.truncated_normal_initializer(stddev=0.02))
65
+
66
+ output_bias = tf.get_variable(
67
+ "output_bias", [num_labels], initializer=tf.zeros_initializer())
68
+
69
+ with tf.variable_scope("loss"):
70
+ if is_training:
71
+ # I.e., 0.1 dropout
72
+ output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
73
+
74
+ logits = tf.matmul(output_layer, output_weights, transpose_b=True)
75
+ logits = tf.nn.bias_add(logits, output_bias)
76
+ probabilities = tf.nn.softmax(logits, axis=-1)
77
+ log_probs = tf.nn.log_softmax(logits, axis=-1)
78
+
79
+ one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
80
+
81
+ per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
82
+ loss = tf.reduce_mean(per_example_loss)
83
+
84
+ return (loss, per_example_loss, logits, probabilities)
85
+
86
+
87
+ def model_fn_builder(num_labels, learning_rate, num_train_steps,
88
+ num_warmup_steps, use_tpu, bert_hub_module_handle):
89
+ """Returns `model_fn` closure for TPUEstimator."""
90
+
91
+ def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
92
+ """The `model_fn` for TPUEstimator."""
93
+
94
+ tf.logging.info("*** Features ***")
95
+ for name in sorted(features.keys()):
96
+ tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
97
+
98
+ input_ids = features["input_ids"]
99
+ input_mask = features["input_mask"]
100
+ segment_ids = features["segment_ids"]
101
+ label_ids = features["label_ids"]
102
+
103
+ is_training = (mode == tf.estimator.ModeKeys.TRAIN)
104
+
105
+ (total_loss, per_example_loss, logits, probabilities) = create_model(
106
+ is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,
107
+ bert_hub_module_handle)
108
+
109
+ output_spec = None
110
+ if mode == tf.estimator.ModeKeys.TRAIN:
111
+ train_op = optimization.create_optimizer(
112
+ total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
113
+
114
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
115
+ mode=mode,
116
+ loss=total_loss,
117
+ train_op=train_op)
118
+ elif mode == tf.estimator.ModeKeys.EVAL:
119
+
120
+ def metric_fn(per_example_loss, label_ids, logits):
121
+ predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
122
+ accuracy = tf.metrics.accuracy(label_ids, predictions)
123
+ loss = tf.metrics.mean(per_example_loss)
124
+ return {
125
+ "eval_accuracy": accuracy,
126
+ "eval_loss": loss,
127
+ }
128
+
129
+ eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
130
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
131
+ mode=mode,
132
+ loss=total_loss,
133
+ eval_metrics=eval_metrics)
134
+ elif mode == tf.estimator.ModeKeys.PREDICT:
135
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
136
+ mode=mode, predictions={"probabilities": probabilities})
137
+ else:
138
+ raise ValueError(
139
+ "Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode))
140
+
141
+ return output_spec
142
+
143
+ return model_fn
144
+
145
+
146
+ def create_tokenizer_from_hub_module(bert_hub_module_handle):
147
+ """Get the vocab file and casing info from the Hub module."""
148
+ with tf.Graph().as_default():
149
+ bert_module = hub.Module(bert_hub_module_handle)
150
+ tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
151
+ with tf.Session() as sess:
152
+ vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
153
+ tokenization_info["do_lower_case"]])
154
+ return tokenization.FullTokenizer(
155
+ vocab_file=vocab_file, do_lower_case=do_lower_case)
156
+
157
+
158
+ def main(_):
159
+ tf.logging.set_verbosity(tf.logging.INFO)
160
+
161
+ processors = {
162
+ "cola": run_classifier.ColaProcessor,
163
+ "mnli": run_classifier.MnliProcessor,
164
+ "mrpc": run_classifier.MrpcProcessor,
165
+ }
166
+
167
+ if not FLAGS.do_train and not FLAGS.do_eval:
168
+ raise ValueError("At least one of `do_train` or `do_eval` must be True.")
169
+
170
+ tf.gfile.MakeDirs(FLAGS.output_dir)
171
+
172
+ task_name = FLAGS.task_name.lower()
173
+
174
+ if task_name not in processors:
175
+ raise ValueError("Task not found: %s" % (task_name))
176
+
177
+ processor = processors[task_name]()
178
+
179
+ label_list = processor.get_labels()
180
+
181
+ tokenizer = create_tokenizer_from_hub_module(FLAGS.bert_hub_module_handle)
182
+
183
+ tpu_cluster_resolver = None
184
+ if FLAGS.use_tpu and FLAGS.tpu_name:
185
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
186
+ FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
187
+
188
+ is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
189
+ run_config = tf.contrib.tpu.RunConfig(
190
+ cluster=tpu_cluster_resolver,
191
+ master=FLAGS.master,
192
+ model_dir=FLAGS.output_dir,
193
+ save_checkpoints_steps=FLAGS.save_checkpoints_steps,
194
+ tpu_config=tf.contrib.tpu.TPUConfig(
195
+ iterations_per_loop=FLAGS.iterations_per_loop,
196
+ num_shards=FLAGS.num_tpu_cores,
197
+ per_host_input_for_training=is_per_host))
198
+
199
+ train_examples = None
200
+ num_train_steps = None
201
+ num_warmup_steps = None
202
+ if FLAGS.do_train:
203
+ train_examples = processor.get_train_examples(FLAGS.data_dir)
204
+ num_train_steps = int(
205
+ len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
206
+ num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
207
+
208
+ model_fn = model_fn_builder(
209
+ num_labels=len(label_list),
210
+ learning_rate=FLAGS.learning_rate,
211
+ num_train_steps=num_train_steps,
212
+ num_warmup_steps=num_warmup_steps,
213
+ use_tpu=FLAGS.use_tpu,
214
+ bert_hub_module_handle=FLAGS.bert_hub_module_handle)
215
+
216
+ # If TPU is not available, this will fall back to normal Estimator on CPU
217
+ # or GPU.
218
+ estimator = tf.contrib.tpu.TPUEstimator(
219
+ use_tpu=FLAGS.use_tpu,
220
+ model_fn=model_fn,
221
+ config=run_config,
222
+ train_batch_size=FLAGS.train_batch_size,
223
+ eval_batch_size=FLAGS.eval_batch_size,
224
+ predict_batch_size=FLAGS.predict_batch_size)
225
+
226
+ if FLAGS.do_train:
227
+ train_features = run_classifier.convert_examples_to_features(
228
+ train_examples, label_list, FLAGS.max_seq_length, tokenizer)
229
+ tf.logging.info("***** Running training *****")
230
+ tf.logging.info(" Num examples = %d", len(train_examples))
231
+ tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
232
+ tf.logging.info(" Num steps = %d", num_train_steps)
233
+ train_input_fn = run_classifier.input_fn_builder(
234
+ features=train_features,
235
+ seq_length=FLAGS.max_seq_length,
236
+ is_training=True,
237
+ drop_remainder=True)
238
+ estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
239
+
240
+ if FLAGS.do_eval:
241
+ eval_examples = processor.get_dev_examples(FLAGS.data_dir)
242
+ eval_features = run_classifier.convert_examples_to_features(
243
+ eval_examples, label_list, FLAGS.max_seq_length, tokenizer)
244
+
245
+ tf.logging.info("***** Running evaluation *****")
246
+ tf.logging.info(" Num examples = %d", len(eval_examples))
247
+ tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
248
+
249
+ # This tells the estimator to run through the entire set.
250
+ eval_steps = None
251
+ # However, if running eval on the TPU, you will need to specify the
252
+ # number of steps.
253
+ if FLAGS.use_tpu:
254
+ # Eval will be slightly WRONG on the TPU because it will truncate
255
+ # the last batch.
256
+ eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
257
+
258
+ eval_drop_remainder = True if FLAGS.use_tpu else False
259
+ eval_input_fn = run_classifier.input_fn_builder(
260
+ features=eval_features,
261
+ seq_length=FLAGS.max_seq_length,
262
+ is_training=False,
263
+ drop_remainder=eval_drop_remainder)
264
+
265
+ result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
266
+
267
+ output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
268
+ with tf.gfile.GFile(output_eval_file, "w") as writer:
269
+ tf.logging.info("***** Eval results *****")
270
+ for key in sorted(result.keys()):
271
+ tf.logging.info(" %s = %s", key, str(result[key]))
272
+ writer.write("%s = %s\n" % (key, str(result[key])))
273
+
274
+ if FLAGS.do_predict:
275
+ predict_examples = processor.get_test_examples(FLAGS.data_dir)
276
+ if FLAGS.use_tpu:
277
+ # Discard batch remainder if running on TPU
278
+ n = len(predict_examples)
279
+ predict_examples = predict_examples[:(n - n % FLAGS.predict_batch_size)]
280
+
281
+ predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
282
+ run_classifier.file_based_convert_examples_to_features(
283
+ predict_examples, label_list, FLAGS.max_seq_length, tokenizer,
284
+ predict_file)
285
+
286
+ tf.logging.info("***** Running prediction*****")
287
+ tf.logging.info(" Num examples = %d", len(predict_examples))
288
+ tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
289
+
290
+ predict_input_fn = run_classifier.file_based_input_fn_builder(
291
+ input_file=predict_file,
292
+ seq_length=FLAGS.max_seq_length,
293
+ is_training=False,
294
+ drop_remainder=FLAGS.use_tpu)
295
+
296
+ result = estimator.predict(input_fn=predict_input_fn)
297
+
298
+ output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
299
+ with tf.gfile.GFile(output_predict_file, "w") as writer:
300
+ tf.logging.info("***** Predict results *****")
301
+ for prediction in result:
302
+ probabilities = prediction["probabilities"]
303
+ output_line = "\t".join(
304
+ str(class_probability)
305
+ for class_probability in probabilities) + "\n"
306
+ writer.write(output_line)
307
+
308
+
309
+ if __name__ == "__main__":
310
+ flags.mark_flag_as_required("data_dir")
311
+ flags.mark_flag_as_required("task_name")
312
+ flags.mark_flag_as_required("bert_hub_module_handle")
313
+ flags.mark_flag_as_required("output_dir")
314
+ tf.app.run()
RIS-DMMI/bert/run_pretraining.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Run masked LM/next sentence masked_lm pre-training for BERT."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import os
22
+ import modeling
23
+ import optimization
24
+ import tensorflow as tf
25
+
26
+ flags = tf.flags
27
+
28
+ FLAGS = flags.FLAGS
29
+
30
+ ## Required parameters
31
+ flags.DEFINE_string(
32
+ "bert_config_file", None,
33
+ "The config json file corresponding to the pre-trained BERT model. "
34
+ "This specifies the model architecture.")
35
+
36
+ flags.DEFINE_string(
37
+ "input_file", None,
38
+ "Input TF example files (can be a glob or comma separated).")
39
+
40
+ flags.DEFINE_string(
41
+ "output_dir", None,
42
+ "The output directory where the model checkpoints will be written.")
43
+
44
+ ## Other parameters
45
+ flags.DEFINE_string(
46
+ "init_checkpoint", None,
47
+ "Initial checkpoint (usually from a pre-trained BERT model).")
48
+
49
+ flags.DEFINE_integer(
50
+ "max_seq_length", 128,
51
+ "The maximum total input sequence length after WordPiece tokenization. "
52
+ "Sequences longer than this will be truncated, and sequences shorter "
53
+ "than this will be padded. Must match data generation.")
54
+
55
+ flags.DEFINE_integer(
56
+ "max_predictions_per_seq", 20,
57
+ "Maximum number of masked LM predictions per sequence. "
58
+ "Must match data generation.")
59
+
60
+ flags.DEFINE_bool("do_train", False, "Whether to run training.")
61
+
62
+ flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
63
+
64
+ flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
65
+
66
+ flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
67
+
68
+ flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
69
+
70
+ flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
71
+
72
+ flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
73
+
74
+ flags.DEFINE_integer("save_checkpoints_steps", 1000,
75
+ "How often to save the model checkpoint.")
76
+
77
+ flags.DEFINE_integer("iterations_per_loop", 1000,
78
+ "How many steps to make in each estimator call.")
79
+
80
+ flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
81
+
82
+ flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
83
+
84
+ tf.flags.DEFINE_string(
85
+ "tpu_name", None,
86
+ "The Cloud TPU to use for training. This should be either the name "
87
+ "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
88
+ "url.")
89
+
90
+ tf.flags.DEFINE_string(
91
+ "tpu_zone", None,
92
+ "[Optional] GCE zone where the Cloud TPU is located in. If not "
93
+ "specified, we will attempt to automatically detect the GCE project from "
94
+ "metadata.")
95
+
96
+ tf.flags.DEFINE_string(
97
+ "gcp_project", None,
98
+ "[Optional] Project name for the Cloud TPU-enabled project. If not "
99
+ "specified, we will attempt to automatically detect the GCE project from "
100
+ "metadata.")
101
+
102
+ tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
103
+
104
+ flags.DEFINE_integer(
105
+ "num_tpu_cores", 8,
106
+ "Only used if `use_tpu` is True. Total number of TPU cores to use.")
107
+
108
+
109
+ def model_fn_builder(bert_config, init_checkpoint, learning_rate,
110
+ num_train_steps, num_warmup_steps, use_tpu,
111
+ use_one_hot_embeddings):
112
+ """Returns `model_fn` closure for TPUEstimator."""
113
+
114
+ def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
115
+ """The `model_fn` for TPUEstimator."""
116
+
117
+ tf.logging.info("*** Features ***")
118
+ for name in sorted(features.keys()):
119
+ tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
120
+
121
+ input_ids = features["input_ids"]
122
+ input_mask = features["input_mask"]
123
+ segment_ids = features["segment_ids"]
124
+ masked_lm_positions = features["masked_lm_positions"]
125
+ masked_lm_ids = features["masked_lm_ids"]
126
+ masked_lm_weights = features["masked_lm_weights"]
127
+ next_sentence_labels = features["next_sentence_labels"]
128
+
129
+ is_training = (mode == tf.estimator.ModeKeys.TRAIN)
130
+
131
+ model = modeling.BertModel(
132
+ config=bert_config,
133
+ is_training=is_training,
134
+ input_ids=input_ids,
135
+ input_mask=input_mask,
136
+ token_type_ids=segment_ids,
137
+ use_one_hot_embeddings=use_one_hot_embeddings)
138
+
139
+ (masked_lm_loss,
140
+ masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
141
+ bert_config, model.get_sequence_output(), model.get_embedding_table(),
142
+ masked_lm_positions, masked_lm_ids, masked_lm_weights)
143
+
144
+ (next_sentence_loss, next_sentence_example_loss,
145
+ next_sentence_log_probs) = get_next_sentence_output(
146
+ bert_config, model.get_pooled_output(), next_sentence_labels)
147
+
148
+ total_loss = masked_lm_loss + next_sentence_loss
149
+
150
+ tvars = tf.trainable_variables()
151
+
152
+ initialized_variable_names = {}
153
+ scaffold_fn = None
154
+ if init_checkpoint:
155
+ (assignment_map, initialized_variable_names
156
+ ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
157
+ if use_tpu:
158
+
159
+ def tpu_scaffold():
160
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
161
+ return tf.train.Scaffold()
162
+
163
+ scaffold_fn = tpu_scaffold
164
+ else:
165
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
166
+
167
+ tf.logging.info("**** Trainable Variables ****")
168
+ for var in tvars:
169
+ init_string = ""
170
+ if var.name in initialized_variable_names:
171
+ init_string = ", *INIT_FROM_CKPT*"
172
+ tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
173
+ init_string)
174
+
175
+ output_spec = None
176
+ if mode == tf.estimator.ModeKeys.TRAIN:
177
+ train_op = optimization.create_optimizer(
178
+ total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
179
+
180
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
181
+ mode=mode,
182
+ loss=total_loss,
183
+ train_op=train_op,
184
+ scaffold_fn=scaffold_fn)
185
+ elif mode == tf.estimator.ModeKeys.EVAL:
186
+
187
+ def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
188
+ masked_lm_weights, next_sentence_example_loss,
189
+ next_sentence_log_probs, next_sentence_labels):
190
+ """Computes the loss and accuracy of the model."""
191
+ masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
192
+ [-1, masked_lm_log_probs.shape[-1]])
193
+ masked_lm_predictions = tf.argmax(
194
+ masked_lm_log_probs, axis=-1, output_type=tf.int32)
195
+ masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
196
+ masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
197
+ masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
198
+ masked_lm_accuracy = tf.metrics.accuracy(
199
+ labels=masked_lm_ids,
200
+ predictions=masked_lm_predictions,
201
+ weights=masked_lm_weights)
202
+ masked_lm_mean_loss = tf.metrics.mean(
203
+ values=masked_lm_example_loss, weights=masked_lm_weights)
204
+
205
+ next_sentence_log_probs = tf.reshape(
206
+ next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
207
+ next_sentence_predictions = tf.argmax(
208
+ next_sentence_log_probs, axis=-1, output_type=tf.int32)
209
+ next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
210
+ next_sentence_accuracy = tf.metrics.accuracy(
211
+ labels=next_sentence_labels, predictions=next_sentence_predictions)
212
+ next_sentence_mean_loss = tf.metrics.mean(
213
+ values=next_sentence_example_loss)
214
+
215
+ return {
216
+ "masked_lm_accuracy": masked_lm_accuracy,
217
+ "masked_lm_loss": masked_lm_mean_loss,
218
+ "next_sentence_accuracy": next_sentence_accuracy,
219
+ "next_sentence_loss": next_sentence_mean_loss,
220
+ }
221
+
222
+ eval_metrics = (metric_fn, [
223
+ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
224
+ masked_lm_weights, next_sentence_example_loss,
225
+ next_sentence_log_probs, next_sentence_labels
226
+ ])
227
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
228
+ mode=mode,
229
+ loss=total_loss,
230
+ eval_metrics=eval_metrics,
231
+ scaffold_fn=scaffold_fn)
232
+ else:
233
+ raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
234
+
235
+ return output_spec
236
+
237
+ return model_fn
238
+
239
+
240
+ def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
241
+ label_ids, label_weights):
242
+ """Get loss and log probs for the masked LM."""
243
+ input_tensor = gather_indexes(input_tensor, positions)
244
+
245
+ with tf.variable_scope("cls/predictions"):
246
+ # We apply one more non-linear transformation before the output layer.
247
+ # This matrix is not used after pre-training.
248
+ with tf.variable_scope("transform"):
249
+ input_tensor = tf.layers.dense(
250
+ input_tensor,
251
+ units=bert_config.hidden_size,
252
+ activation=modeling.get_activation(bert_config.hidden_act),
253
+ kernel_initializer=modeling.create_initializer(
254
+ bert_config.initializer_range))
255
+ input_tensor = modeling.layer_norm(input_tensor)
256
+
257
+ # The output weights are the same as the input embeddings, but there is
258
+ # an output-only bias for each token.
259
+ output_bias = tf.get_variable(
260
+ "output_bias",
261
+ shape=[bert_config.vocab_size],
262
+ initializer=tf.zeros_initializer())
263
+ logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
264
+ logits = tf.nn.bias_add(logits, output_bias)
265
+ log_probs = tf.nn.log_softmax(logits, axis=-1)
266
+
267
+ label_ids = tf.reshape(label_ids, [-1])
268
+ label_weights = tf.reshape(label_weights, [-1])
269
+
270
+ one_hot_labels = tf.one_hot(
271
+ label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
272
+
273
+ # The `positions` tensor might be zero-padded (if the sequence is too
274
+ # short to have the maximum number of predictions). The `label_weights`
275
+ # tensor has a value of 1.0 for every real prediction and 0.0 for the
276
+ # padding predictions.
277
+ per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
278
+ numerator = tf.reduce_sum(label_weights * per_example_loss)
279
+ denominator = tf.reduce_sum(label_weights) + 1e-5
280
+ loss = numerator / denominator
281
+
282
+ return (loss, per_example_loss, log_probs)
283
+
284
+
285
+ def get_next_sentence_output(bert_config, input_tensor, labels):
286
+ """Get loss and log probs for the next sentence prediction."""
287
+
288
+ # Simple binary classification. Note that 0 is "next sentence" and 1 is
289
+ # "random sentence". This weight matrix is not used after pre-training.
290
+ with tf.variable_scope("cls/seq_relationship"):
291
+ output_weights = tf.get_variable(
292
+ "output_weights",
293
+ shape=[2, bert_config.hidden_size],
294
+ initializer=modeling.create_initializer(bert_config.initializer_range))
295
+ output_bias = tf.get_variable(
296
+ "output_bias", shape=[2], initializer=tf.zeros_initializer())
297
+
298
+ logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
299
+ logits = tf.nn.bias_add(logits, output_bias)
300
+ log_probs = tf.nn.log_softmax(logits, axis=-1)
301
+ labels = tf.reshape(labels, [-1])
302
+ one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
303
+ per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
304
+ loss = tf.reduce_mean(per_example_loss)
305
+ return (loss, per_example_loss, log_probs)
306
+
307
+
308
+ def gather_indexes(sequence_tensor, positions):
309
+ """Gathers the vectors at the specific positions over a minibatch."""
310
+ sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
311
+ batch_size = sequence_shape[0]
312
+ seq_length = sequence_shape[1]
313
+ width = sequence_shape[2]
314
+
315
+ flat_offsets = tf.reshape(
316
+ tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
317
+ flat_positions = tf.reshape(positions + flat_offsets, [-1])
318
+ flat_sequence_tensor = tf.reshape(sequence_tensor,
319
+ [batch_size * seq_length, width])
320
+ output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
321
+ return output_tensor
322
+
323
+
324
+ def input_fn_builder(input_files,
325
+ max_seq_length,
326
+ max_predictions_per_seq,
327
+ is_training,
328
+ num_cpu_threads=4):
329
+ """Creates an `input_fn` closure to be passed to TPUEstimator."""
330
+
331
+ def input_fn(params):
332
+ """The actual input function."""
333
+ batch_size = params["batch_size"]
334
+
335
+ name_to_features = {
336
+ "input_ids":
337
+ tf.FixedLenFeature([max_seq_length], tf.int64),
338
+ "input_mask":
339
+ tf.FixedLenFeature([max_seq_length], tf.int64),
340
+ "segment_ids":
341
+ tf.FixedLenFeature([max_seq_length], tf.int64),
342
+ "masked_lm_positions":
343
+ tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
344
+ "masked_lm_ids":
345
+ tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
346
+ "masked_lm_weights":
347
+ tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
348
+ "next_sentence_labels":
349
+ tf.FixedLenFeature([1], tf.int64),
350
+ }
351
+
352
+ # For training, we want a lot of parallel reading and shuffling.
353
+ # For eval, we want no shuffling and parallel reading doesn't matter.
354
+ if is_training:
355
+ d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
356
+ d = d.repeat()
357
+ d = d.shuffle(buffer_size=len(input_files))
358
+
359
+ # `cycle_length` is the number of parallel files that get read.
360
+ cycle_length = min(num_cpu_threads, len(input_files))
361
+
362
+ # `sloppy` mode means that the interleaving is not exact. This adds
363
+ # even more randomness to the training pipeline.
364
+ d = d.apply(
365
+ tf.contrib.data.parallel_interleave(
366
+ tf.data.TFRecordDataset,
367
+ sloppy=is_training,
368
+ cycle_length=cycle_length))
369
+ d = d.shuffle(buffer_size=100)
370
+ else:
371
+ d = tf.data.TFRecordDataset(input_files)
372
+ # Since we evaluate for a fixed number of steps we don't want to encounter
373
+ # out-of-range exceptions.
374
+ d = d.repeat()
375
+
376
+ # We must `drop_remainder` on training because the TPU requires fixed
377
+ # size dimensions. For eval, we assume we are evaluating on the CPU or GPU
378
+ # and we *don't* want to drop the remainder, otherwise we wont cover
379
+ # every sample.
380
+ d = d.apply(
381
+ tf.contrib.data.map_and_batch(
382
+ lambda record: _decode_record(record, name_to_features),
383
+ batch_size=batch_size,
384
+ num_parallel_batches=num_cpu_threads,
385
+ drop_remainder=True))
386
+ return d
387
+
388
+ return input_fn
389
+
390
+
391
+ def _decode_record(record, name_to_features):
392
+ """Decodes a record to a TensorFlow example."""
393
+ example = tf.parse_single_example(record, name_to_features)
394
+
395
+ # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
396
+ # So cast all int64 to int32.
397
+ for name in list(example.keys()):
398
+ t = example[name]
399
+ if t.dtype == tf.int64:
400
+ t = tf.to_int32(t)
401
+ example[name] = t
402
+
403
+ return example
404
+
405
+
406
+ def main(_):
407
+ tf.logging.set_verbosity(tf.logging.INFO)
408
+
409
+ if not FLAGS.do_train and not FLAGS.do_eval:
410
+ raise ValueError("At least one of `do_train` or `do_eval` must be True.")
411
+
412
+ bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
413
+
414
+ tf.gfile.MakeDirs(FLAGS.output_dir)
415
+
416
+ input_files = []
417
+ for input_pattern in FLAGS.input_file.split(","):
418
+ input_files.extend(tf.gfile.Glob(input_pattern))
419
+
420
+ tf.logging.info("*** Input Files ***")
421
+ for input_file in input_files:
422
+ tf.logging.info(" %s" % input_file)
423
+
424
+ tpu_cluster_resolver = None
425
+ if FLAGS.use_tpu and FLAGS.tpu_name:
426
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
427
+ FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
428
+
429
+ is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
430
+ run_config = tf.contrib.tpu.RunConfig(
431
+ cluster=tpu_cluster_resolver,
432
+ master=FLAGS.master,
433
+ model_dir=FLAGS.output_dir,
434
+ save_checkpoints_steps=FLAGS.save_checkpoints_steps,
435
+ tpu_config=tf.contrib.tpu.TPUConfig(
436
+ iterations_per_loop=FLAGS.iterations_per_loop,
437
+ num_shards=FLAGS.num_tpu_cores,
438
+ per_host_input_for_training=is_per_host))
439
+
440
+ model_fn = model_fn_builder(
441
+ bert_config=bert_config,
442
+ init_checkpoint=FLAGS.init_checkpoint,
443
+ learning_rate=FLAGS.learning_rate,
444
+ num_train_steps=FLAGS.num_train_steps,
445
+ num_warmup_steps=FLAGS.num_warmup_steps,
446
+ use_tpu=FLAGS.use_tpu,
447
+ use_one_hot_embeddings=FLAGS.use_tpu)
448
+
449
+ # If TPU is not available, this will fall back to normal Estimator on CPU
450
+ # or GPU.
451
+ estimator = tf.contrib.tpu.TPUEstimator(
452
+ use_tpu=FLAGS.use_tpu,
453
+ model_fn=model_fn,
454
+ config=run_config,
455
+ train_batch_size=FLAGS.train_batch_size,
456
+ eval_batch_size=FLAGS.eval_batch_size)
457
+
458
+ if FLAGS.do_train:
459
+ tf.logging.info("***** Running training *****")
460
+ tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
461
+ train_input_fn = input_fn_builder(
462
+ input_files=input_files,
463
+ max_seq_length=FLAGS.max_seq_length,
464
+ max_predictions_per_seq=FLAGS.max_predictions_per_seq,
465
+ is_training=True)
466
+ estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
467
+
468
+ if FLAGS.do_eval:
469
+ tf.logging.info("***** Running evaluation *****")
470
+ tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
471
+
472
+ eval_input_fn = input_fn_builder(
473
+ input_files=input_files,
474
+ max_seq_length=FLAGS.max_seq_length,
475
+ max_predictions_per_seq=FLAGS.max_predictions_per_seq,
476
+ is_training=False)
477
+
478
+ result = estimator.evaluate(
479
+ input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
480
+
481
+ output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
482
+ with tf.gfile.GFile(output_eval_file, "w") as writer:
483
+ tf.logging.info("***** Eval results *****")
484
+ for key in sorted(result.keys()):
485
+ tf.logging.info(" %s = %s", key, str(result[key]))
486
+ writer.write("%s = %s\n" % (key, str(result[key])))
487
+
488
+
489
+ if __name__ == "__main__":
490
+ flags.mark_flag_as_required("input_file")
491
+ flags.mark_flag_as_required("bert_config_file")
492
+ flags.mark_flag_as_required("output_dir")
493
+ tf.app.run()
RIS-DMMI/bert/run_squad.py ADDED
@@ -0,0 +1,1283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Run BERT on SQuAD 1.1 and SQuAD 2.0."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import collections
22
+ import json
23
+ import math
24
+ import os
25
+ import random
26
+ import modeling
27
+ import optimization
28
+ import tokenization
29
+ import six
30
+ import tensorflow as tf
31
+
32
+ flags = tf.flags
33
+
34
+ FLAGS = flags.FLAGS
35
+
36
+ ## Required parameters
37
+ flags.DEFINE_string(
38
+ "bert_config_file", None,
39
+ "The config json file corresponding to the pre-trained BERT model. "
40
+ "This specifies the model architecture.")
41
+
42
+ flags.DEFINE_string("vocab_file", None,
43
+ "The vocabulary file that the BERT model was trained on.")
44
+
45
+ flags.DEFINE_string(
46
+ "output_dir", None,
47
+ "The output directory where the model checkpoints will be written.")
48
+
49
+ ## Other parameters
50
+ flags.DEFINE_string("train_file", None,
51
+ "SQuAD json for training. E.g., train-v1.1.json")
52
+
53
+ flags.DEFINE_string(
54
+ "predict_file", None,
55
+ "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
56
+
57
+ flags.DEFINE_string(
58
+ "init_checkpoint", None,
59
+ "Initial checkpoint (usually from a pre-trained BERT model).")
60
+
61
+ flags.DEFINE_bool(
62
+ "do_lower_case", True,
63
+ "Whether to lower case the input text. Should be True for uncased "
64
+ "models and False for cased models.")
65
+
66
+ flags.DEFINE_integer(
67
+ "max_seq_length", 384,
68
+ "The maximum total input sequence length after WordPiece tokenization. "
69
+ "Sequences longer than this will be truncated, and sequences shorter "
70
+ "than this will be padded.")
71
+
72
+ flags.DEFINE_integer(
73
+ "doc_stride", 128,
74
+ "When splitting up a long document into chunks, how much stride to "
75
+ "take between chunks.")
76
+
77
+ flags.DEFINE_integer(
78
+ "max_query_length", 64,
79
+ "The maximum number of tokens for the question. Questions longer than "
80
+ "this will be truncated to this length.")
81
+
82
+ flags.DEFINE_bool("do_train", False, "Whether to run training.")
83
+
84
+ flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
85
+
86
+ flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
87
+
88
+ flags.DEFINE_integer("predict_batch_size", 8,
89
+ "Total batch size for predictions.")
90
+
91
+ flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
92
+
93
+ flags.DEFINE_float("num_train_epochs", 3.0,
94
+ "Total number of training epochs to perform.")
95
+
96
+ flags.DEFINE_float(
97
+ "warmup_proportion", 0.1,
98
+ "Proportion of training to perform linear learning rate warmup for. "
99
+ "E.g., 0.1 = 10% of training.")
100
+
101
+ flags.DEFINE_integer("save_checkpoints_steps", 1000,
102
+ "How often to save the model checkpoint.")
103
+
104
+ flags.DEFINE_integer("iterations_per_loop", 1000,
105
+ "How many steps to make in each estimator call.")
106
+
107
+ flags.DEFINE_integer(
108
+ "n_best_size", 20,
109
+ "The total number of n-best predictions to generate in the "
110
+ "nbest_predictions.json output file.")
111
+
112
+ flags.DEFINE_integer(
113
+ "max_answer_length", 30,
114
+ "The maximum length of an answer that can be generated. This is needed "
115
+ "because the start and end predictions are not conditioned on one another.")
116
+
117
+ flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
118
+
119
+ tf.flags.DEFINE_string(
120
+ "tpu_name", None,
121
+ "The Cloud TPU to use for training. This should be either the name "
122
+ "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
123
+ "url.")
124
+
125
+ tf.flags.DEFINE_string(
126
+ "tpu_zone", None,
127
+ "[Optional] GCE zone where the Cloud TPU is located in. If not "
128
+ "specified, we will attempt to automatically detect the GCE project from "
129
+ "metadata.")
130
+
131
+ tf.flags.DEFINE_string(
132
+ "gcp_project", None,
133
+ "[Optional] Project name for the Cloud TPU-enabled project. If not "
134
+ "specified, we will attempt to automatically detect the GCE project from "
135
+ "metadata.")
136
+
137
+ tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
138
+
139
+ flags.DEFINE_integer(
140
+ "num_tpu_cores", 8,
141
+ "Only used if `use_tpu` is True. Total number of TPU cores to use.")
142
+
143
+ flags.DEFINE_bool(
144
+ "verbose_logging", False,
145
+ "If true, all of the warnings related to data processing will be printed. "
146
+ "A number of warnings are expected for a normal SQuAD evaluation.")
147
+
148
+ flags.DEFINE_bool(
149
+ "version_2_with_negative", False,
150
+ "If true, the SQuAD examples contain some that do not have an answer.")
151
+
152
+ flags.DEFINE_float(
153
+ "null_score_diff_threshold", 0.0,
154
+ "If null_score - best_non_null is greater than the threshold predict null.")
155
+
156
+
157
+ class SquadExample(object):
158
+ """A single training/test example for simple sequence classification.
159
+
160
+ For examples without an answer, the start and end position are -1.
161
+ """
162
+
163
+ def __init__(self,
164
+ qas_id,
165
+ question_text,
166
+ doc_tokens,
167
+ orig_answer_text=None,
168
+ start_position=None,
169
+ end_position=None,
170
+ is_impossible=False):
171
+ self.qas_id = qas_id
172
+ self.question_text = question_text
173
+ self.doc_tokens = doc_tokens
174
+ self.orig_answer_text = orig_answer_text
175
+ self.start_position = start_position
176
+ self.end_position = end_position
177
+ self.is_impossible = is_impossible
178
+
179
+ def __str__(self):
180
+ return self.__repr__()
181
+
182
+ def __repr__(self):
183
+ s = ""
184
+ s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
185
+ s += ", question_text: %s" % (
186
+ tokenization.printable_text(self.question_text))
187
+ s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
188
+ if self.start_position:
189
+ s += ", start_position: %d" % (self.start_position)
190
+ if self.start_position:
191
+ s += ", end_position: %d" % (self.end_position)
192
+ if self.start_position:
193
+ s += ", is_impossible: %r" % (self.is_impossible)
194
+ return s
195
+
196
+
197
+ class InputFeatures(object):
198
+ """A single set of features of data."""
199
+
200
+ def __init__(self,
201
+ unique_id,
202
+ example_index,
203
+ doc_span_index,
204
+ tokens,
205
+ token_to_orig_map,
206
+ token_is_max_context,
207
+ input_ids,
208
+ input_mask,
209
+ segment_ids,
210
+ start_position=None,
211
+ end_position=None,
212
+ is_impossible=None):
213
+ self.unique_id = unique_id
214
+ self.example_index = example_index
215
+ self.doc_span_index = doc_span_index
216
+ self.tokens = tokens
217
+ self.token_to_orig_map = token_to_orig_map
218
+ self.token_is_max_context = token_is_max_context
219
+ self.input_ids = input_ids
220
+ self.input_mask = input_mask
221
+ self.segment_ids = segment_ids
222
+ self.start_position = start_position
223
+ self.end_position = end_position
224
+ self.is_impossible = is_impossible
225
+
226
+
227
+ def read_squad_examples(input_file, is_training):
228
+ """Read a SQuAD json file into a list of SquadExample."""
229
+ with tf.gfile.Open(input_file, "r") as reader:
230
+ input_data = json.load(reader)["data"]
231
+
232
+ def is_whitespace(c):
233
+ if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
234
+ return True
235
+ return False
236
+
237
+ examples = []
238
+ for entry in input_data:
239
+ for paragraph in entry["paragraphs"]:
240
+ paragraph_text = paragraph["context"]
241
+ doc_tokens = []
242
+ char_to_word_offset = []
243
+ prev_is_whitespace = True
244
+ for c in paragraph_text:
245
+ if is_whitespace(c):
246
+ prev_is_whitespace = True
247
+ else:
248
+ if prev_is_whitespace:
249
+ doc_tokens.append(c)
250
+ else:
251
+ doc_tokens[-1] += c
252
+ prev_is_whitespace = False
253
+ char_to_word_offset.append(len(doc_tokens) - 1)
254
+
255
+ for qa in paragraph["qas"]:
256
+ qas_id = qa["id"]
257
+ question_text = qa["question"]
258
+ start_position = None
259
+ end_position = None
260
+ orig_answer_text = None
261
+ is_impossible = False
262
+ if is_training:
263
+
264
+ if FLAGS.version_2_with_negative:
265
+ is_impossible = qa["is_impossible"]
266
+ if (len(qa["answers"]) != 1) and (not is_impossible):
267
+ raise ValueError(
268
+ "For training, each question should have exactly 1 answer.")
269
+ if not is_impossible:
270
+ answer = qa["answers"][0]
271
+ orig_answer_text = answer["text"]
272
+ answer_offset = answer["answer_start"]
273
+ answer_length = len(orig_answer_text)
274
+ start_position = char_to_word_offset[answer_offset]
275
+ end_position = char_to_word_offset[answer_offset + answer_length -
276
+ 1]
277
+ # Only add answers where the text can be exactly recovered from the
278
+ # document. If this CAN'T happen it's likely due to weird Unicode
279
+ # stuff so we will just skip the example.
280
+ #
281
+ # Note that this means for training mode, every example is NOT
282
+ # guaranteed to be preserved.
283
+ actual_text = " ".join(
284
+ doc_tokens[start_position:(end_position + 1)])
285
+ cleaned_answer_text = " ".join(
286
+ tokenization.whitespace_tokenize(orig_answer_text))
287
+ if actual_text.find(cleaned_answer_text) == -1:
288
+ tf.logging.warning("Could not find answer: '%s' vs. '%s'",
289
+ actual_text, cleaned_answer_text)
290
+ continue
291
+ else:
292
+ start_position = -1
293
+ end_position = -1
294
+ orig_answer_text = ""
295
+
296
+ example = SquadExample(
297
+ qas_id=qas_id,
298
+ question_text=question_text,
299
+ doc_tokens=doc_tokens,
300
+ orig_answer_text=orig_answer_text,
301
+ start_position=start_position,
302
+ end_position=end_position,
303
+ is_impossible=is_impossible)
304
+ examples.append(example)
305
+
306
+ return examples
307
+
308
+
309
+ def convert_examples_to_features(examples, tokenizer, max_seq_length,
310
+ doc_stride, max_query_length, is_training,
311
+ output_fn):
312
+ """Loads a data file into a list of `InputBatch`s."""
313
+
314
+ unique_id = 1000000000
315
+
316
+ for (example_index, example) in enumerate(examples):
317
+ query_tokens = tokenizer.tokenize(example.question_text)
318
+
319
+ if len(query_tokens) > max_query_length:
320
+ query_tokens = query_tokens[0:max_query_length]
321
+
322
+ tok_to_orig_index = []
323
+ orig_to_tok_index = []
324
+ all_doc_tokens = []
325
+ for (i, token) in enumerate(example.doc_tokens):
326
+ orig_to_tok_index.append(len(all_doc_tokens))
327
+ sub_tokens = tokenizer.tokenize(token)
328
+ for sub_token in sub_tokens:
329
+ tok_to_orig_index.append(i)
330
+ all_doc_tokens.append(sub_token)
331
+
332
+ tok_start_position = None
333
+ tok_end_position = None
334
+ if is_training and example.is_impossible:
335
+ tok_start_position = -1
336
+ tok_end_position = -1
337
+ if is_training and not example.is_impossible:
338
+ tok_start_position = orig_to_tok_index[example.start_position]
339
+ if example.end_position < len(example.doc_tokens) - 1:
340
+ tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
341
+ else:
342
+ tok_end_position = len(all_doc_tokens) - 1
343
+ (tok_start_position, tok_end_position) = _improve_answer_span(
344
+ all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
345
+ example.orig_answer_text)
346
+
347
+ # The -3 accounts for [CLS], [SEP] and [SEP]
348
+ max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
349
+
350
+ # We can have documents that are longer than the maximum sequence length.
351
+ # To deal with this we do a sliding window approach, where we take chunks
352
+ # of the up to our max length with a stride of `doc_stride`.
353
+ _DocSpan = collections.namedtuple( # pylint: disable=invalid-name
354
+ "DocSpan", ["start", "length"])
355
+ doc_spans = []
356
+ start_offset = 0
357
+ while start_offset < len(all_doc_tokens):
358
+ length = len(all_doc_tokens) - start_offset
359
+ if length > max_tokens_for_doc:
360
+ length = max_tokens_for_doc
361
+ doc_spans.append(_DocSpan(start=start_offset, length=length))
362
+ if start_offset + length == len(all_doc_tokens):
363
+ break
364
+ start_offset += min(length, doc_stride)
365
+
366
+ for (doc_span_index, doc_span) in enumerate(doc_spans):
367
+ tokens = []
368
+ token_to_orig_map = {}
369
+ token_is_max_context = {}
370
+ segment_ids = []
371
+ tokens.append("[CLS]")
372
+ segment_ids.append(0)
373
+ for token in query_tokens:
374
+ tokens.append(token)
375
+ segment_ids.append(0)
376
+ tokens.append("[SEP]")
377
+ segment_ids.append(0)
378
+
379
+ for i in range(doc_span.length):
380
+ split_token_index = doc_span.start + i
381
+ token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
382
+
383
+ is_max_context = _check_is_max_context(doc_spans, doc_span_index,
384
+ split_token_index)
385
+ token_is_max_context[len(tokens)] = is_max_context
386
+ tokens.append(all_doc_tokens[split_token_index])
387
+ segment_ids.append(1)
388
+ tokens.append("[SEP]")
389
+ segment_ids.append(1)
390
+
391
+ input_ids = tokenizer.convert_tokens_to_ids(tokens)
392
+
393
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
394
+ # tokens are attended to.
395
+ input_mask = [1] * len(input_ids)
396
+
397
+ # Zero-pad up to the sequence length.
398
+ while len(input_ids) < max_seq_length:
399
+ input_ids.append(0)
400
+ input_mask.append(0)
401
+ segment_ids.append(0)
402
+
403
+ assert len(input_ids) == max_seq_length
404
+ assert len(input_mask) == max_seq_length
405
+ assert len(segment_ids) == max_seq_length
406
+
407
+ start_position = None
408
+ end_position = None
409
+ if is_training and not example.is_impossible:
410
+ # For training, if our document chunk does not contain an annotation
411
+ # we throw it out, since there is nothing to predict.
412
+ doc_start = doc_span.start
413
+ doc_end = doc_span.start + doc_span.length - 1
414
+ out_of_span = False
415
+ if not (tok_start_position >= doc_start and
416
+ tok_end_position <= doc_end):
417
+ out_of_span = True
418
+ if out_of_span:
419
+ start_position = 0
420
+ end_position = 0
421
+ else:
422
+ doc_offset = len(query_tokens) + 2
423
+ start_position = tok_start_position - doc_start + doc_offset
424
+ end_position = tok_end_position - doc_start + doc_offset
425
+
426
+ if is_training and example.is_impossible:
427
+ start_position = 0
428
+ end_position = 0
429
+
430
+ if example_index < 20:
431
+ tf.logging.info("*** Example ***")
432
+ tf.logging.info("unique_id: %s" % (unique_id))
433
+ tf.logging.info("example_index: %s" % (example_index))
434
+ tf.logging.info("doc_span_index: %s" % (doc_span_index))
435
+ tf.logging.info("tokens: %s" % " ".join(
436
+ [tokenization.printable_text(x) for x in tokens]))
437
+ tf.logging.info("token_to_orig_map: %s" % " ".join(
438
+ ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
439
+ tf.logging.info("token_is_max_context: %s" % " ".join([
440
+ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
441
+ ]))
442
+ tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
443
+ tf.logging.info(
444
+ "input_mask: %s" % " ".join([str(x) for x in input_mask]))
445
+ tf.logging.info(
446
+ "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
447
+ if is_training and example.is_impossible:
448
+ tf.logging.info("impossible example")
449
+ if is_training and not example.is_impossible:
450
+ answer_text = " ".join(tokens[start_position:(end_position + 1)])
451
+ tf.logging.info("start_position: %d" % (start_position))
452
+ tf.logging.info("end_position: %d" % (end_position))
453
+ tf.logging.info(
454
+ "answer: %s" % (tokenization.printable_text(answer_text)))
455
+
456
+ feature = InputFeatures(
457
+ unique_id=unique_id,
458
+ example_index=example_index,
459
+ doc_span_index=doc_span_index,
460
+ tokens=tokens,
461
+ token_to_orig_map=token_to_orig_map,
462
+ token_is_max_context=token_is_max_context,
463
+ input_ids=input_ids,
464
+ input_mask=input_mask,
465
+ segment_ids=segment_ids,
466
+ start_position=start_position,
467
+ end_position=end_position,
468
+ is_impossible=example.is_impossible)
469
+
470
+ # Run callback
471
+ output_fn(feature)
472
+
473
+ unique_id += 1
474
+
475
+
476
+ def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
477
+ orig_answer_text):
478
+ """Returns tokenized answer spans that better match the annotated answer."""
479
+
480
+ # The SQuAD annotations are character based. We first project them to
481
+ # whitespace-tokenized words. But then after WordPiece tokenization, we can
482
+ # often find a "better match". For example:
483
+ #
484
+ # Question: What year was John Smith born?
485
+ # Context: The leader was John Smith (1895-1943).
486
+ # Answer: 1895
487
+ #
488
+ # The original whitespace-tokenized answer will be "(1895-1943).". However
489
+ # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
490
+ # the exact answer, 1895.
491
+ #
492
+ # However, this is not always possible. Consider the following:
493
+ #
494
+ # Question: What country is the top exporter of electornics?
495
+ # Context: The Japanese electronics industry is the lagest in the world.
496
+ # Answer: Japan
497
+ #
498
+ # In this case, the annotator chose "Japan" as a character sub-span of
499
+ # the word "Japanese". Since our WordPiece tokenizer does not split
500
+ # "Japanese", we just use "Japanese" as the annotation. This is fairly rare
501
+ # in SQuAD, but does happen.
502
+ tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
503
+
504
+ for new_start in range(input_start, input_end + 1):
505
+ for new_end in range(input_end, new_start - 1, -1):
506
+ text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
507
+ if text_span == tok_answer_text:
508
+ return (new_start, new_end)
509
+
510
+ return (input_start, input_end)
511
+
512
+
513
+ def _check_is_max_context(doc_spans, cur_span_index, position):
514
+ """Check if this is the 'max context' doc span for the token."""
515
+
516
+ # Because of the sliding window approach taken to scoring documents, a single
517
+ # token can appear in multiple documents. E.g.
518
+ # Doc: the man went to the store and bought a gallon of milk
519
+ # Span A: the man went to the
520
+ # Span B: to the store and bought
521
+ # Span C: and bought a gallon of
522
+ # ...
523
+ #
524
+ # Now the word 'bought' will have two scores from spans B and C. We only
525
+ # want to consider the score with "maximum context", which we define as
526
+ # the *minimum* of its left and right context (the *sum* of left and
527
+ # right context will always be the same, of course).
528
+ #
529
+ # In the example the maximum context for 'bought' would be span C since
530
+ # it has 1 left context and 3 right context, while span B has 4 left context
531
+ # and 0 right context.
532
+ best_score = None
533
+ best_span_index = None
534
+ for (span_index, doc_span) in enumerate(doc_spans):
535
+ end = doc_span.start + doc_span.length - 1
536
+ if position < doc_span.start:
537
+ continue
538
+ if position > end:
539
+ continue
540
+ num_left_context = position - doc_span.start
541
+ num_right_context = end - position
542
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
543
+ if best_score is None or score > best_score:
544
+ best_score = score
545
+ best_span_index = span_index
546
+
547
+ return cur_span_index == best_span_index
548
+
549
+
550
+ def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
551
+ use_one_hot_embeddings):
552
+ """Creates a classification model."""
553
+ model = modeling.BertModel(
554
+ config=bert_config,
555
+ is_training=is_training,
556
+ input_ids=input_ids,
557
+ input_mask=input_mask,
558
+ token_type_ids=segment_ids,
559
+ use_one_hot_embeddings=use_one_hot_embeddings)
560
+
561
+ final_hidden = model.get_sequence_output()
562
+
563
+ final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
564
+ batch_size = final_hidden_shape[0]
565
+ seq_length = final_hidden_shape[1]
566
+ hidden_size = final_hidden_shape[2]
567
+
568
+ output_weights = tf.get_variable(
569
+ "cls/squad/output_weights", [2, hidden_size],
570
+ initializer=tf.truncated_normal_initializer(stddev=0.02))
571
+
572
+ output_bias = tf.get_variable(
573
+ "cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
574
+
575
+ final_hidden_matrix = tf.reshape(final_hidden,
576
+ [batch_size * seq_length, hidden_size])
577
+ logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
578
+ logits = tf.nn.bias_add(logits, output_bias)
579
+
580
+ logits = tf.reshape(logits, [batch_size, seq_length, 2])
581
+ logits = tf.transpose(logits, [2, 0, 1])
582
+
583
+ unstacked_logits = tf.unstack(logits, axis=0)
584
+
585
+ (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
586
+
587
+ return (start_logits, end_logits)
588
+
589
+
590
+ def model_fn_builder(bert_config, init_checkpoint, learning_rate,
591
+ num_train_steps, num_warmup_steps, use_tpu,
592
+ use_one_hot_embeddings):
593
+ """Returns `model_fn` closure for TPUEstimator."""
594
+
595
+ def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
596
+ """The `model_fn` for TPUEstimator."""
597
+
598
+ tf.logging.info("*** Features ***")
599
+ for name in sorted(features.keys()):
600
+ tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
601
+
602
+ unique_ids = features["unique_ids"]
603
+ input_ids = features["input_ids"]
604
+ input_mask = features["input_mask"]
605
+ segment_ids = features["segment_ids"]
606
+
607
+ is_training = (mode == tf.estimator.ModeKeys.TRAIN)
608
+
609
+ (start_logits, end_logits) = create_model(
610
+ bert_config=bert_config,
611
+ is_training=is_training,
612
+ input_ids=input_ids,
613
+ input_mask=input_mask,
614
+ segment_ids=segment_ids,
615
+ use_one_hot_embeddings=use_one_hot_embeddings)
616
+
617
+ tvars = tf.trainable_variables()
618
+
619
+ initialized_variable_names = {}
620
+ scaffold_fn = None
621
+ if init_checkpoint:
622
+ (assignment_map, initialized_variable_names
623
+ ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
624
+ if use_tpu:
625
+
626
+ def tpu_scaffold():
627
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
628
+ return tf.train.Scaffold()
629
+
630
+ scaffold_fn = tpu_scaffold
631
+ else:
632
+ tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
633
+
634
+ tf.logging.info("**** Trainable Variables ****")
635
+ for var in tvars:
636
+ init_string = ""
637
+ if var.name in initialized_variable_names:
638
+ init_string = ", *INIT_FROM_CKPT*"
639
+ tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
640
+ init_string)
641
+
642
+ output_spec = None
643
+ if mode == tf.estimator.ModeKeys.TRAIN:
644
+ seq_length = modeling.get_shape_list(input_ids)[1]
645
+
646
+ def compute_loss(logits, positions):
647
+ one_hot_positions = tf.one_hot(
648
+ positions, depth=seq_length, dtype=tf.float32)
649
+ log_probs = tf.nn.log_softmax(logits, axis=-1)
650
+ loss = -tf.reduce_mean(
651
+ tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
652
+ return loss
653
+
654
+ start_positions = features["start_positions"]
655
+ end_positions = features["end_positions"]
656
+
657
+ start_loss = compute_loss(start_logits, start_positions)
658
+ end_loss = compute_loss(end_logits, end_positions)
659
+
660
+ total_loss = (start_loss + end_loss) / 2.0
661
+
662
+ train_op = optimization.create_optimizer(
663
+ total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
664
+
665
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
666
+ mode=mode,
667
+ loss=total_loss,
668
+ train_op=train_op,
669
+ scaffold_fn=scaffold_fn)
670
+ elif mode == tf.estimator.ModeKeys.PREDICT:
671
+ predictions = {
672
+ "unique_ids": unique_ids,
673
+ "start_logits": start_logits,
674
+ "end_logits": end_logits,
675
+ }
676
+ output_spec = tf.contrib.tpu.TPUEstimatorSpec(
677
+ mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
678
+ else:
679
+ raise ValueError(
680
+ "Only TRAIN and PREDICT modes are supported: %s" % (mode))
681
+
682
+ return output_spec
683
+
684
+ return model_fn
685
+
686
+
687
+ def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
688
+ """Creates an `input_fn` closure to be passed to TPUEstimator."""
689
+
690
+ name_to_features = {
691
+ "unique_ids": tf.FixedLenFeature([], tf.int64),
692
+ "input_ids": tf.FixedLenFeature([seq_length], tf.int64),
693
+ "input_mask": tf.FixedLenFeature([seq_length], tf.int64),
694
+ "segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
695
+ }
696
+
697
+ if is_training:
698
+ name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
699
+ name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
700
+
701
+ def _decode_record(record, name_to_features):
702
+ """Decodes a record to a TensorFlow example."""
703
+ example = tf.parse_single_example(record, name_to_features)
704
+
705
+ # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
706
+ # So cast all int64 to int32.
707
+ for name in list(example.keys()):
708
+ t = example[name]
709
+ if t.dtype == tf.int64:
710
+ t = tf.to_int32(t)
711
+ example[name] = t
712
+
713
+ return example
714
+
715
+ def input_fn(params):
716
+ """The actual input function."""
717
+ batch_size = params["batch_size"]
718
+
719
+ # For training, we want a lot of parallel reading and shuffling.
720
+ # For eval, we want no shuffling and parallel reading doesn't matter.
721
+ d = tf.data.TFRecordDataset(input_file)
722
+ if is_training:
723
+ d = d.repeat()
724
+ d = d.shuffle(buffer_size=100)
725
+
726
+ d = d.apply(
727
+ tf.contrib.data.map_and_batch(
728
+ lambda record: _decode_record(record, name_to_features),
729
+ batch_size=batch_size,
730
+ drop_remainder=drop_remainder))
731
+
732
+ return d
733
+
734
+ return input_fn
735
+
736
+
737
+ RawResult = collections.namedtuple("RawResult",
738
+ ["unique_id", "start_logits", "end_logits"])
739
+
740
+
741
+ def write_predictions(all_examples, all_features, all_results, n_best_size,
742
+ max_answer_length, do_lower_case, output_prediction_file,
743
+ output_nbest_file, output_null_log_odds_file):
744
+ """Write final predictions to the json file and log-odds of null if needed."""
745
+ tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
746
+ tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
747
+
748
+ example_index_to_features = collections.defaultdict(list)
749
+ for feature in all_features:
750
+ example_index_to_features[feature.example_index].append(feature)
751
+
752
+ unique_id_to_result = {}
753
+ for result in all_results:
754
+ unique_id_to_result[result.unique_id] = result
755
+
756
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
757
+ "PrelimPrediction",
758
+ ["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
759
+
760
+ all_predictions = collections.OrderedDict()
761
+ all_nbest_json = collections.OrderedDict()
762
+ scores_diff_json = collections.OrderedDict()
763
+
764
+ for (example_index, example) in enumerate(all_examples):
765
+ features = example_index_to_features[example_index]
766
+
767
+ prelim_predictions = []
768
+ # keep track of the minimum score of null start+end of position 0
769
+ score_null = 1000000 # large and positive
770
+ min_null_feature_index = 0 # the paragraph slice with min mull score
771
+ null_start_logit = 0 # the start logit at the slice with min null score
772
+ null_end_logit = 0 # the end logit at the slice with min null score
773
+ for (feature_index, feature) in enumerate(features):
774
+ result = unique_id_to_result[feature.unique_id]
775
+ start_indexes = _get_best_indexes(result.start_logits, n_best_size)
776
+ end_indexes = _get_best_indexes(result.end_logits, n_best_size)
777
+ # if we could have irrelevant answers, get the min score of irrelevant
778
+ if FLAGS.version_2_with_negative:
779
+ feature_null_score = result.start_logits[0] + result.end_logits[0]
780
+ if feature_null_score < score_null:
781
+ score_null = feature_null_score
782
+ min_null_feature_index = feature_index
783
+ null_start_logit = result.start_logits[0]
784
+ null_end_logit = result.end_logits[0]
785
+ for start_index in start_indexes:
786
+ for end_index in end_indexes:
787
+ # We could hypothetically create invalid predictions, e.g., predict
788
+ # that the start of the span is in the question. We throw out all
789
+ # invalid predictions.
790
+ if start_index >= len(feature.tokens):
791
+ continue
792
+ if end_index >= len(feature.tokens):
793
+ continue
794
+ if start_index not in feature.token_to_orig_map:
795
+ continue
796
+ if end_index not in feature.token_to_orig_map:
797
+ continue
798
+ if not feature.token_is_max_context.get(start_index, False):
799
+ continue
800
+ if end_index < start_index:
801
+ continue
802
+ length = end_index - start_index + 1
803
+ if length > max_answer_length:
804
+ continue
805
+ prelim_predictions.append(
806
+ _PrelimPrediction(
807
+ feature_index=feature_index,
808
+ start_index=start_index,
809
+ end_index=end_index,
810
+ start_logit=result.start_logits[start_index],
811
+ end_logit=result.end_logits[end_index]))
812
+
813
+ if FLAGS.version_2_with_negative:
814
+ prelim_predictions.append(
815
+ _PrelimPrediction(
816
+ feature_index=min_null_feature_index,
817
+ start_index=0,
818
+ end_index=0,
819
+ start_logit=null_start_logit,
820
+ end_logit=null_end_logit))
821
+ prelim_predictions = sorted(
822
+ prelim_predictions,
823
+ key=lambda x: (x.start_logit + x.end_logit),
824
+ reverse=True)
825
+
826
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
827
+ "NbestPrediction", ["text", "start_logit", "end_logit"])
828
+
829
+ seen_predictions = {}
830
+ nbest = []
831
+ for pred in prelim_predictions:
832
+ if len(nbest) >= n_best_size:
833
+ break
834
+ feature = features[pred.feature_index]
835
+ if pred.start_index > 0: # this is a non-null prediction
836
+ tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
837
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
838
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
839
+ orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
840
+ tok_text = " ".join(tok_tokens)
841
+
842
+ # De-tokenize WordPieces that have been split off.
843
+ tok_text = tok_text.replace(" ##", "")
844
+ tok_text = tok_text.replace("##", "")
845
+
846
+ # Clean whitespace
847
+ tok_text = tok_text.strip()
848
+ tok_text = " ".join(tok_text.split())
849
+ orig_text = " ".join(orig_tokens)
850
+
851
+ final_text = get_final_text(tok_text, orig_text, do_lower_case)
852
+ if final_text in seen_predictions:
853
+ continue
854
+
855
+ seen_predictions[final_text] = True
856
+ else:
857
+ final_text = ""
858
+ seen_predictions[final_text] = True
859
+
860
+ nbest.append(
861
+ _NbestPrediction(
862
+ text=final_text,
863
+ start_logit=pred.start_logit,
864
+ end_logit=pred.end_logit))
865
+
866
+ # if we didn't inlude the empty option in the n-best, inlcude it
867
+ if FLAGS.version_2_with_negative:
868
+ if "" not in seen_predictions:
869
+ nbest.append(
870
+ _NbestPrediction(
871
+ text="", start_logit=null_start_logit,
872
+ end_logit=null_end_logit))
873
+ # In very rare edge cases we could have no valid predictions. So we
874
+ # just create a nonce prediction in this case to avoid failure.
875
+ if not nbest:
876
+ nbest.append(
877
+ _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
878
+
879
+ assert len(nbest) >= 1
880
+
881
+ total_scores = []
882
+ best_non_null_entry = None
883
+ for entry in nbest:
884
+ total_scores.append(entry.start_logit + entry.end_logit)
885
+ if not best_non_null_entry:
886
+ if entry.text:
887
+ best_non_null_entry = entry
888
+
889
+ probs = _compute_softmax(total_scores)
890
+
891
+ nbest_json = []
892
+ for (i, entry) in enumerate(nbest):
893
+ output = collections.OrderedDict()
894
+ output["text"] = entry.text
895
+ output["probability"] = probs[i]
896
+ output["start_logit"] = entry.start_logit
897
+ output["end_logit"] = entry.end_logit
898
+ nbest_json.append(output)
899
+
900
+ assert len(nbest_json) >= 1
901
+
902
+ if not FLAGS.version_2_with_negative:
903
+ all_predictions[example.qas_id] = nbest_json[0]["text"]
904
+ else:
905
+ # predict "" iff the null score - the score of best non-null > threshold
906
+ score_diff = score_null - best_non_null_entry.start_logit - (
907
+ best_non_null_entry.end_logit)
908
+ scores_diff_json[example.qas_id] = score_diff
909
+ if score_diff > FLAGS.null_score_diff_threshold:
910
+ all_predictions[example.qas_id] = ""
911
+ else:
912
+ all_predictions[example.qas_id] = best_non_null_entry.text
913
+
914
+ all_nbest_json[example.qas_id] = nbest_json
915
+
916
+ with tf.gfile.GFile(output_prediction_file, "w") as writer:
917
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
918
+
919
+ with tf.gfile.GFile(output_nbest_file, "w") as writer:
920
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
921
+
922
+ if FLAGS.version_2_with_negative:
923
+ with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
924
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
925
+
926
+
927
+ def get_final_text(pred_text, orig_text, do_lower_case):
928
+ """Project the tokenized prediction back to the original text."""
929
+
930
+ # When we created the data, we kept track of the alignment between original
931
+ # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
932
+ # now `orig_text` contains the span of our original text corresponding to the
933
+ # span that we predicted.
934
+ #
935
+ # However, `orig_text` may contain extra characters that we don't want in
936
+ # our prediction.
937
+ #
938
+ # For example, let's say:
939
+ # pred_text = steve smith
940
+ # orig_text = Steve Smith's
941
+ #
942
+ # We don't want to return `orig_text` because it contains the extra "'s".
943
+ #
944
+ # We don't want to return `pred_text` because it's already been normalized
945
+ # (the SQuAD eval script also does punctuation stripping/lower casing but
946
+ # our tokenizer does additional normalization like stripping accent
947
+ # characters).
948
+ #
949
+ # What we really want to return is "Steve Smith".
950
+ #
951
+ # Therefore, we have to apply a semi-complicated alignment heruistic between
952
+ # `pred_text` and `orig_text` to get a character-to-charcter alignment. This
953
+ # can fail in certain cases in which case we just return `orig_text`.
954
+
955
+ def _strip_spaces(text):
956
+ ns_chars = []
957
+ ns_to_s_map = collections.OrderedDict()
958
+ for (i, c) in enumerate(text):
959
+ if c == " ":
960
+ continue
961
+ ns_to_s_map[len(ns_chars)] = i
962
+ ns_chars.append(c)
963
+ ns_text = "".join(ns_chars)
964
+ return (ns_text, ns_to_s_map)
965
+
966
+ # We first tokenize `orig_text`, strip whitespace from the result
967
+ # and `pred_text`, and check if they are the same length. If they are
968
+ # NOT the same length, the heuristic has failed. If they are the same
969
+ # length, we assume the characters are one-to-one aligned.
970
+ tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
971
+
972
+ tok_text = " ".join(tokenizer.tokenize(orig_text))
973
+
974
+ start_position = tok_text.find(pred_text)
975
+ if start_position == -1:
976
+ if FLAGS.verbose_logging:
977
+ tf.logging.info(
978
+ "Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
979
+ return orig_text
980
+ end_position = start_position + len(pred_text) - 1
981
+
982
+ (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
983
+ (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
984
+
985
+ if len(orig_ns_text) != len(tok_ns_text):
986
+ if FLAGS.verbose_logging:
987
+ tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
988
+ orig_ns_text, tok_ns_text)
989
+ return orig_text
990
+
991
+ # We then project the characters in `pred_text` back to `orig_text` using
992
+ # the character-to-character alignment.
993
+ tok_s_to_ns_map = {}
994
+ for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
995
+ tok_s_to_ns_map[tok_index] = i
996
+
997
+ orig_start_position = None
998
+ if start_position in tok_s_to_ns_map:
999
+ ns_start_position = tok_s_to_ns_map[start_position]
1000
+ if ns_start_position in orig_ns_to_s_map:
1001
+ orig_start_position = orig_ns_to_s_map[ns_start_position]
1002
+
1003
+ if orig_start_position is None:
1004
+ if FLAGS.verbose_logging:
1005
+ tf.logging.info("Couldn't map start position")
1006
+ return orig_text
1007
+
1008
+ orig_end_position = None
1009
+ if end_position in tok_s_to_ns_map:
1010
+ ns_end_position = tok_s_to_ns_map[end_position]
1011
+ if ns_end_position in orig_ns_to_s_map:
1012
+ orig_end_position = orig_ns_to_s_map[ns_end_position]
1013
+
1014
+ if orig_end_position is None:
1015
+ if FLAGS.verbose_logging:
1016
+ tf.logging.info("Couldn't map end position")
1017
+ return orig_text
1018
+
1019
+ output_text = orig_text[orig_start_position:(orig_end_position + 1)]
1020
+ return output_text
1021
+
1022
+
1023
+ def _get_best_indexes(logits, n_best_size):
1024
+ """Get the n-best logits from a list."""
1025
+ index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
1026
+
1027
+ best_indexes = []
1028
+ for i in range(len(index_and_score)):
1029
+ if i >= n_best_size:
1030
+ break
1031
+ best_indexes.append(index_and_score[i][0])
1032
+ return best_indexes
1033
+
1034
+
1035
+ def _compute_softmax(scores):
1036
+ """Compute softmax probability over raw logits."""
1037
+ if not scores:
1038
+ return []
1039
+
1040
+ max_score = None
1041
+ for score in scores:
1042
+ if max_score is None or score > max_score:
1043
+ max_score = score
1044
+
1045
+ exp_scores = []
1046
+ total_sum = 0.0
1047
+ for score in scores:
1048
+ x = math.exp(score - max_score)
1049
+ exp_scores.append(x)
1050
+ total_sum += x
1051
+
1052
+ probs = []
1053
+ for score in exp_scores:
1054
+ probs.append(score / total_sum)
1055
+ return probs
1056
+
1057
+
1058
+ class FeatureWriter(object):
1059
+ """Writes InputFeature to TF example file."""
1060
+
1061
+ def __init__(self, filename, is_training):
1062
+ self.filename = filename
1063
+ self.is_training = is_training
1064
+ self.num_features = 0
1065
+ self._writer = tf.python_io.TFRecordWriter(filename)
1066
+
1067
+ def process_feature(self, feature):
1068
+ """Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
1069
+ self.num_features += 1
1070
+
1071
+ def create_int_feature(values):
1072
+ feature = tf.train.Feature(
1073
+ int64_list=tf.train.Int64List(value=list(values)))
1074
+ return feature
1075
+
1076
+ features = collections.OrderedDict()
1077
+ features["unique_ids"] = create_int_feature([feature.unique_id])
1078
+ features["input_ids"] = create_int_feature(feature.input_ids)
1079
+ features["input_mask"] = create_int_feature(feature.input_mask)
1080
+ features["segment_ids"] = create_int_feature(feature.segment_ids)
1081
+
1082
+ if self.is_training:
1083
+ features["start_positions"] = create_int_feature([feature.start_position])
1084
+ features["end_positions"] = create_int_feature([feature.end_position])
1085
+ impossible = 0
1086
+ if feature.is_impossible:
1087
+ impossible = 1
1088
+ features["is_impossible"] = create_int_feature([impossible])
1089
+
1090
+ tf_example = tf.train.Example(features=tf.train.Features(feature=features))
1091
+ self._writer.write(tf_example.SerializeToString())
1092
+
1093
+ def close(self):
1094
+ self._writer.close()
1095
+
1096
+
1097
+ def validate_flags_or_throw(bert_config):
1098
+ """Validate the input FLAGS or throw an exception."""
1099
+ tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
1100
+ FLAGS.init_checkpoint)
1101
+
1102
+ if not FLAGS.do_train and not FLAGS.do_predict:
1103
+ raise ValueError("At least one of `do_train` or `do_predict` must be True.")
1104
+
1105
+ if FLAGS.do_train:
1106
+ if not FLAGS.train_file:
1107
+ raise ValueError(
1108
+ "If `do_train` is True, then `train_file` must be specified.")
1109
+ if FLAGS.do_predict:
1110
+ if not FLAGS.predict_file:
1111
+ raise ValueError(
1112
+ "If `do_predict` is True, then `predict_file` must be specified.")
1113
+
1114
+ if FLAGS.max_seq_length > bert_config.max_position_embeddings:
1115
+ raise ValueError(
1116
+ "Cannot use sequence length %d because the BERT model "
1117
+ "was only trained up to sequence length %d" %
1118
+ (FLAGS.max_seq_length, bert_config.max_position_embeddings))
1119
+
1120
+ if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
1121
+ raise ValueError(
1122
+ "The max_seq_length (%d) must be greater than max_query_length "
1123
+ "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
1124
+
1125
+
1126
+ def main(_):
1127
+ tf.logging.set_verbosity(tf.logging.INFO)
1128
+
1129
+ bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
1130
+
1131
+ validate_flags_or_throw(bert_config)
1132
+
1133
+ tf.gfile.MakeDirs(FLAGS.output_dir)
1134
+
1135
+ tokenizer = tokenization.FullTokenizer(
1136
+ vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
1137
+
1138
+ tpu_cluster_resolver = None
1139
+ if FLAGS.use_tpu and FLAGS.tpu_name:
1140
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
1141
+ FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
1142
+
1143
+ is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
1144
+ run_config = tf.contrib.tpu.RunConfig(
1145
+ cluster=tpu_cluster_resolver,
1146
+ master=FLAGS.master,
1147
+ model_dir=FLAGS.output_dir,
1148
+ save_checkpoints_steps=FLAGS.save_checkpoints_steps,
1149
+ tpu_config=tf.contrib.tpu.TPUConfig(
1150
+ iterations_per_loop=FLAGS.iterations_per_loop,
1151
+ num_shards=FLAGS.num_tpu_cores,
1152
+ per_host_input_for_training=is_per_host))
1153
+
1154
+ train_examples = None
1155
+ num_train_steps = None
1156
+ num_warmup_steps = None
1157
+ if FLAGS.do_train:
1158
+ train_examples = read_squad_examples(
1159
+ input_file=FLAGS.train_file, is_training=True)
1160
+ num_train_steps = int(
1161
+ len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
1162
+ num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
1163
+
1164
+ # Pre-shuffle the input to avoid having to make a very large shuffle
1165
+ # buffer in in the `input_fn`.
1166
+ rng = random.Random(12345)
1167
+ rng.shuffle(train_examples)
1168
+
1169
+ model_fn = model_fn_builder(
1170
+ bert_config=bert_config,
1171
+ init_checkpoint=FLAGS.init_checkpoint,
1172
+ learning_rate=FLAGS.learning_rate,
1173
+ num_train_steps=num_train_steps,
1174
+ num_warmup_steps=num_warmup_steps,
1175
+ use_tpu=FLAGS.use_tpu,
1176
+ use_one_hot_embeddings=FLAGS.use_tpu)
1177
+
1178
+ # If TPU is not available, this will fall back to normal Estimator on CPU
1179
+ # or GPU.
1180
+ estimator = tf.contrib.tpu.TPUEstimator(
1181
+ use_tpu=FLAGS.use_tpu,
1182
+ model_fn=model_fn,
1183
+ config=run_config,
1184
+ train_batch_size=FLAGS.train_batch_size,
1185
+ predict_batch_size=FLAGS.predict_batch_size)
1186
+
1187
+ if FLAGS.do_train:
1188
+ # We write to a temporary file to avoid storing very large constant tensors
1189
+ # in memory.
1190
+ train_writer = FeatureWriter(
1191
+ filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
1192
+ is_training=True)
1193
+ convert_examples_to_features(
1194
+ examples=train_examples,
1195
+ tokenizer=tokenizer,
1196
+ max_seq_length=FLAGS.max_seq_length,
1197
+ doc_stride=FLAGS.doc_stride,
1198
+ max_query_length=FLAGS.max_query_length,
1199
+ is_training=True,
1200
+ output_fn=train_writer.process_feature)
1201
+ train_writer.close()
1202
+
1203
+ tf.logging.info("***** Running training *****")
1204
+ tf.logging.info(" Num orig examples = %d", len(train_examples))
1205
+ tf.logging.info(" Num split examples = %d", train_writer.num_features)
1206
+ tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
1207
+ tf.logging.info(" Num steps = %d", num_train_steps)
1208
+ del train_examples
1209
+
1210
+ train_input_fn = input_fn_builder(
1211
+ input_file=train_writer.filename,
1212
+ seq_length=FLAGS.max_seq_length,
1213
+ is_training=True,
1214
+ drop_remainder=True)
1215
+ estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
1216
+
1217
+ if FLAGS.do_predict:
1218
+ eval_examples = read_squad_examples(
1219
+ input_file=FLAGS.predict_file, is_training=False)
1220
+
1221
+ eval_writer = FeatureWriter(
1222
+ filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
1223
+ is_training=False)
1224
+ eval_features = []
1225
+
1226
+ def append_feature(feature):
1227
+ eval_features.append(feature)
1228
+ eval_writer.process_feature(feature)
1229
+
1230
+ convert_examples_to_features(
1231
+ examples=eval_examples,
1232
+ tokenizer=tokenizer,
1233
+ max_seq_length=FLAGS.max_seq_length,
1234
+ doc_stride=FLAGS.doc_stride,
1235
+ max_query_length=FLAGS.max_query_length,
1236
+ is_training=False,
1237
+ output_fn=append_feature)
1238
+ eval_writer.close()
1239
+
1240
+ tf.logging.info("***** Running predictions *****")
1241
+ tf.logging.info(" Num orig examples = %d", len(eval_examples))
1242
+ tf.logging.info(" Num split examples = %d", len(eval_features))
1243
+ tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
1244
+
1245
+ all_results = []
1246
+
1247
+ predict_input_fn = input_fn_builder(
1248
+ input_file=eval_writer.filename,
1249
+ seq_length=FLAGS.max_seq_length,
1250
+ is_training=False,
1251
+ drop_remainder=False)
1252
+
1253
+ # If running eval on the TPU, you will need to specify the number of
1254
+ # steps.
1255
+ all_results = []
1256
+ for result in estimator.predict(
1257
+ predict_input_fn, yield_single_examples=True):
1258
+ if len(all_results) % 1000 == 0:
1259
+ tf.logging.info("Processing example: %d" % (len(all_results)))
1260
+ unique_id = int(result["unique_ids"])
1261
+ start_logits = [float(x) for x in result["start_logits"].flat]
1262
+ end_logits = [float(x) for x in result["end_logits"].flat]
1263
+ all_results.append(
1264
+ RawResult(
1265
+ unique_id=unique_id,
1266
+ start_logits=start_logits,
1267
+ end_logits=end_logits))
1268
+
1269
+ output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
1270
+ output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
1271
+ output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")
1272
+
1273
+ write_predictions(eval_examples, eval_features, all_results,
1274
+ FLAGS.n_best_size, FLAGS.max_answer_length,
1275
+ FLAGS.do_lower_case, output_prediction_file,
1276
+ output_nbest_file, output_null_log_odds_file)
1277
+
1278
+
1279
+ if __name__ == "__main__":
1280
+ flags.mark_flag_as_required("vocab_file")
1281
+ flags.mark_flag_as_required("bert_config_file")
1282
+ flags.mark_flag_as_required("output_dir")
1283
+ tf.app.run()
RIS-DMMI/bert/sample_text.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
2
+ Text should be one-sentence-per-line, with empty lines between documents.
3
+ This sample text is public domain and was randomly selected from Project Guttenberg.
4
+
5
+ The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
6
+ Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
7
+ Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
8
+ "Cass" Beard had risen early that morning, but not with a view to discovery.
9
+ A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
10
+ The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
11
+ This was nearly opposite.
12
+ Mr. Cassius crossed the highway, and stopped suddenly.
13
+ Something glittered in the nearest red pool before him.
14
+ Gold, surely!
15
+ But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
16
+ Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
17
+ Like most of his fellow gold-seekers, Cass was superstitious.
18
+
19
+ The fountain of classic wisdom, Hypatia herself.
20
+ As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
21
+ From my youth I felt in me a soul above the matter-entangled herd.
22
+ She revealed to me the glorious fact, that I am a spark of Divinity itself.
23
+ A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
24
+ There is a philosophic pleasure in opening one's treasures to the modest young.
25
+ Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
26
+ Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
27
+ but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
28
+ Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
29
+ His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
30
+ while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
31
+ At last they reached the quay at the opposite end of the street;
32
+ and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
33
+ He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
RIS-DMMI/bert/tokenization.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import division
19
+ from __future__ import print_function
20
+
21
+ import collections
22
+ import re
23
+ import unicodedata
24
+ import six
25
+ import tensorflow as tf
26
+
27
+
28
+ def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
29
+ """Checks whether the casing config is consistent with the checkpoint name."""
30
+
31
+ # The casing has to be passed in by the user and there is no explicit check
32
+ # as to whether it matches the checkpoint. The casing information probably
33
+ # should have been stored in the bert_config.json file, but it's not, so
34
+ # we have to heuristically detect it to validate.
35
+
36
+ if not init_checkpoint:
37
+ return
38
+
39
+ m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
40
+ if m is None:
41
+ return
42
+
43
+ model_name = m.group(1)
44
+
45
+ lower_models = [
46
+ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
47
+ "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
48
+ ]
49
+
50
+ cased_models = [
51
+ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
52
+ "multi_cased_L-12_H-768_A-12"
53
+ ]
54
+
55
+ is_bad_config = False
56
+ if model_name in lower_models and not do_lower_case:
57
+ is_bad_config = True
58
+ actual_flag = "False"
59
+ case_name = "lowercased"
60
+ opposite_flag = "True"
61
+
62
+ if model_name in cased_models and do_lower_case:
63
+ is_bad_config = True
64
+ actual_flag = "True"
65
+ case_name = "cased"
66
+ opposite_flag = "False"
67
+
68
+ if is_bad_config:
69
+ raise ValueError(
70
+ "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
71
+ "However, `%s` seems to be a %s model, so you "
72
+ "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
73
+ "how the model was pre-training. If this error is wrong, please "
74
+ "just comment out this check." % (actual_flag, init_checkpoint,
75
+ model_name, case_name, opposite_flag))
76
+
77
+
78
+ def convert_to_unicode(text):
79
+ """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
80
+ if six.PY3:
81
+ if isinstance(text, str):
82
+ return text
83
+ elif isinstance(text, bytes):
84
+ return text.decode("utf-8", "ignore")
85
+ else:
86
+ raise ValueError("Unsupported string type: %s" % (type(text)))
87
+ elif six.PY2:
88
+ if isinstance(text, str):
89
+ return text.decode("utf-8", "ignore")
90
+ elif isinstance(text, unicode):
91
+ return text
92
+ else:
93
+ raise ValueError("Unsupported string type: %s" % (type(text)))
94
+ else:
95
+ raise ValueError("Not running on Python2 or Python 3?")
96
+
97
+
98
+ def printable_text(text):
99
+ """Returns text encoded in a way suitable for print or `tf.logging`."""
100
+
101
+ # These functions want `str` for both Python2 and Python3, but in one case
102
+ # it's a Unicode string and in the other it's a byte string.
103
+ if six.PY3:
104
+ if isinstance(text, str):
105
+ return text
106
+ elif isinstance(text, bytes):
107
+ return text.decode("utf-8", "ignore")
108
+ else:
109
+ raise ValueError("Unsupported string type: %s" % (type(text)))
110
+ elif six.PY2:
111
+ if isinstance(text, str):
112
+ return text
113
+ elif isinstance(text, unicode):
114
+ return text.encode("utf-8")
115
+ else:
116
+ raise ValueError("Unsupported string type: %s" % (type(text)))
117
+ else:
118
+ raise ValueError("Not running on Python2 or Python 3?")
119
+
120
+
121
+ def load_vocab(vocab_file):
122
+ """Loads a vocabulary file into a dictionary."""
123
+ vocab = collections.OrderedDict()
124
+ index = 0
125
+ with tf.gfile.GFile(vocab_file, "r") as reader:
126
+ while True:
127
+ token = convert_to_unicode(reader.readline())
128
+ if not token:
129
+ break
130
+ token = token.strip()
131
+ vocab[token] = index
132
+ index += 1
133
+ return vocab
134
+
135
+
136
+ def convert_by_vocab(vocab, items):
137
+ """Converts a sequence of [tokens|ids] using the vocab."""
138
+ output = []
139
+ for item in items:
140
+ output.append(vocab[item])
141
+ return output
142
+
143
+
144
+ def convert_tokens_to_ids(vocab, tokens):
145
+ return convert_by_vocab(vocab, tokens)
146
+
147
+
148
+ def convert_ids_to_tokens(inv_vocab, ids):
149
+ return convert_by_vocab(inv_vocab, ids)
150
+
151
+
152
+ def whitespace_tokenize(text):
153
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
154
+ text = text.strip()
155
+ if not text:
156
+ return []
157
+ tokens = text.split()
158
+ return tokens
159
+
160
+
161
+ class FullTokenizer(object):
162
+ """Runs end-to-end tokenziation."""
163
+
164
+ def __init__(self, vocab_file, do_lower_case=True):
165
+ self.vocab = load_vocab(vocab_file)
166
+ self.inv_vocab = {v: k for k, v in self.vocab.items()}
167
+ self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
168
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
169
+
170
+ def tokenize(self, text):
171
+ split_tokens = []
172
+ for token in self.basic_tokenizer.tokenize(text):
173
+ for sub_token in self.wordpiece_tokenizer.tokenize(token):
174
+ split_tokens.append(sub_token)
175
+
176
+ return split_tokens
177
+
178
+ def convert_tokens_to_ids(self, tokens):
179
+ return convert_by_vocab(self.vocab, tokens)
180
+
181
+ def convert_ids_to_tokens(self, ids):
182
+ return convert_by_vocab(self.inv_vocab, ids)
183
+
184
+
185
+ class BasicTokenizer(object):
186
+ """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
187
+
188
+ def __init__(self, do_lower_case=True):
189
+ """Constructs a BasicTokenizer.
190
+
191
+ Args:
192
+ do_lower_case: Whether to lower case the input.
193
+ """
194
+ self.do_lower_case = do_lower_case
195
+
196
+ def tokenize(self, text):
197
+ """Tokenizes a piece of text."""
198
+ text = convert_to_unicode(text)
199
+ text = self._clean_text(text)
200
+
201
+ # This was added on November 1st, 2018 for the multilingual and Chinese
202
+ # models. This is also applied to the English models now, but it doesn't
203
+ # matter since the English models were not trained on any Chinese data
204
+ # and generally don't have any Chinese data in them (there are Chinese
205
+ # characters in the vocabulary because Wikipedia does have some Chinese
206
+ # words in the English Wikipedia.).
207
+ text = self._tokenize_chinese_chars(text)
208
+
209
+ orig_tokens = whitespace_tokenize(text)
210
+ split_tokens = []
211
+ for token in orig_tokens:
212
+ if self.do_lower_case:
213
+ token = token.lower()
214
+ token = self._run_strip_accents(token)
215
+ split_tokens.extend(self._run_split_on_punc(token))
216
+
217
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
218
+ return output_tokens
219
+
220
+ def _run_strip_accents(self, text):
221
+ """Strips accents from a piece of text."""
222
+ text = unicodedata.normalize("NFD", text)
223
+ output = []
224
+ for char in text:
225
+ cat = unicodedata.category(char)
226
+ if cat == "Mn":
227
+ continue
228
+ output.append(char)
229
+ return "".join(output)
230
+
231
+ def _run_split_on_punc(self, text):
232
+ """Splits punctuation on a piece of text."""
233
+ chars = list(text)
234
+ i = 0
235
+ start_new_word = True
236
+ output = []
237
+ while i < len(chars):
238
+ char = chars[i]
239
+ if _is_punctuation(char):
240
+ output.append([char])
241
+ start_new_word = True
242
+ else:
243
+ if start_new_word:
244
+ output.append([])
245
+ start_new_word = False
246
+ output[-1].append(char)
247
+ i += 1
248
+
249
+ return ["".join(x) for x in output]
250
+
251
+ def _tokenize_chinese_chars(self, text):
252
+ """Adds whitespace around any CJK character."""
253
+ output = []
254
+ for char in text:
255
+ cp = ord(char)
256
+ if self._is_chinese_char(cp):
257
+ output.append(" ")
258
+ output.append(char)
259
+ output.append(" ")
260
+ else:
261
+ output.append(char)
262
+ return "".join(output)
263
+
264
+ def _is_chinese_char(self, cp):
265
+ """Checks whether CP is the codepoint of a CJK character."""
266
+ # This defines a "chinese character" as anything in the CJK Unicode block:
267
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
268
+ #
269
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
270
+ # despite its name. The modern Korean Hangul alphabet is a different block,
271
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
272
+ # space-separated words, so they are not treated specially and handled
273
+ # like the all of the other languages.
274
+ if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
275
+ (cp >= 0x3400 and cp <= 0x4DBF) or #
276
+ (cp >= 0x20000 and cp <= 0x2A6DF) or #
277
+ (cp >= 0x2A700 and cp <= 0x2B73F) or #
278
+ (cp >= 0x2B740 and cp <= 0x2B81F) or #
279
+ (cp >= 0x2B820 and cp <= 0x2CEAF) or
280
+ (cp >= 0xF900 and cp <= 0xFAFF) or #
281
+ (cp >= 0x2F800 and cp <= 0x2FA1F)): #
282
+ return True
283
+
284
+ return False
285
+
286
+ def _clean_text(self, text):
287
+ """Performs invalid character removal and whitespace cleanup on text."""
288
+ output = []
289
+ for char in text:
290
+ cp = ord(char)
291
+ if cp == 0 or cp == 0xfffd or _is_control(char):
292
+ continue
293
+ if _is_whitespace(char):
294
+ output.append(" ")
295
+ else:
296
+ output.append(char)
297
+ return "".join(output)
298
+
299
+
300
+ class WordpieceTokenizer(object):
301
+ """Runs WordPiece tokenziation."""
302
+
303
+ def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
304
+ self.vocab = vocab
305
+ self.unk_token = unk_token
306
+ self.max_input_chars_per_word = max_input_chars_per_word
307
+
308
+ def tokenize(self, text):
309
+ """Tokenizes a piece of text into its word pieces.
310
+
311
+ This uses a greedy longest-match-first algorithm to perform tokenization
312
+ using the given vocabulary.
313
+
314
+ For example:
315
+ input = "unaffable"
316
+ output = ["un", "##aff", "##able"]
317
+
318
+ Args:
319
+ text: A single token or whitespace separated tokens. This should have
320
+ already been passed through `BasicTokenizer.
321
+
322
+ Returns:
323
+ A list of wordpiece tokens.
324
+ """
325
+
326
+ text = convert_to_unicode(text)
327
+
328
+ output_tokens = []
329
+ for token in whitespace_tokenize(text):
330
+ chars = list(token)
331
+ if len(chars) > self.max_input_chars_per_word:
332
+ output_tokens.append(self.unk_token)
333
+ continue
334
+
335
+ is_bad = False
336
+ start = 0
337
+ sub_tokens = []
338
+ while start < len(chars):
339
+ end = len(chars)
340
+ cur_substr = None
341
+ while start < end:
342
+ substr = "".join(chars[start:end])
343
+ if start > 0:
344
+ substr = "##" + substr
345
+ if substr in self.vocab:
346
+ cur_substr = substr
347
+ break
348
+ end -= 1
349
+ if cur_substr is None:
350
+ is_bad = True
351
+ break
352
+ sub_tokens.append(cur_substr)
353
+ start = end
354
+
355
+ if is_bad:
356
+ output_tokens.append(self.unk_token)
357
+ else:
358
+ output_tokens.extend(sub_tokens)
359
+ return output_tokens
360
+
361
+
362
+ def _is_whitespace(char):
363
+ """Checks whether `chars` is a whitespace character."""
364
+ # \t, \n, and \r are technically contorl characters but we treat them
365
+ # as whitespace since they are generally considered as such.
366
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
367
+ return True
368
+ cat = unicodedata.category(char)
369
+ if cat == "Zs":
370
+ return True
371
+ return False
372
+
373
+
374
+ def _is_control(char):
375
+ """Checks whether `chars` is a control character."""
376
+ # These are technically control characters but we count them as whitespace
377
+ # characters.
378
+ if char == "\t" or char == "\n" or char == "\r":
379
+ return False
380
+ cat = unicodedata.category(char)
381
+ if cat in ("Cc", "Cf"):
382
+ return True
383
+ return False
384
+
385
+
386
+ def _is_punctuation(char):
387
+ """Checks whether `chars` is a punctuation character."""
388
+ cp = ord(char)
389
+ # We treat all non-letter/number ASCII as punctuation.
390
+ # Characters such as "^", "$", and "`" are not in the Unicode
391
+ # Punctuation class but we treat them as punctuation anyways, for
392
+ # consistency.
393
+ if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
394
+ (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
395
+ return True
396
+ cat = unicodedata.category(char)
397
+ if cat.startswith("P"):
398
+ return True
399
+ return False
RIS-DMMI/bert/tokenization_bert.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes."""
16
+
17
+
18
+ import collections
19
+ import logging
20
+ import os
21
+ import unicodedata
22
+ from typing import List, Optional
23
+
24
+ from .tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
25
+ import pdb
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
30
+
31
+ PRETRAINED_VOCAB_FILES_MAP = {
32
+ "vocab_file": {
33
+ "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
34
+ "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
35
+ "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
36
+ "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
37
+ "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
38
+ "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
39
+ "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
40
+ "bert-base-german-cased": "https://int-deepset-models-bert.s3.eu-central-1.amazonaws.com/pytorch/bert-base-german-cased-vocab.txt",
41
+ "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-vocab.txt",
42
+ "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-vocab.txt",
43
+ "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-vocab.txt",
44
+ "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-vocab.txt",
45
+ "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-vocab.txt",
46
+ "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-vocab.txt",
47
+ "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-vocab.txt",
48
+ "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/vocab.txt",
49
+ "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/vocab.txt",
50
+ "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/vocab.txt",
51
+ }
52
+ }
53
+
54
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
55
+ "bert-base-uncased": 512,
56
+ "bert-large-uncased": 512,
57
+ "bert-base-cased": 512,
58
+ "bert-large-cased": 512,
59
+ "bert-base-multilingual-uncased": 512,
60
+ "bert-base-multilingual-cased": 512,
61
+ "bert-base-chinese": 512,
62
+ "bert-base-german-cased": 512,
63
+ "bert-large-uncased-whole-word-masking": 512,
64
+ "bert-large-cased-whole-word-masking": 512,
65
+ "bert-large-uncased-whole-word-masking-finetuned-squad": 512,
66
+ "bert-large-cased-whole-word-masking-finetuned-squad": 512,
67
+ "bert-base-cased-finetuned-mrpc": 512,
68
+ "bert-base-german-dbmdz-cased": 512,
69
+ "bert-base-german-dbmdz-uncased": 512,
70
+ "TurkuNLP/bert-base-finnish-cased-v1": 512,
71
+ "TurkuNLP/bert-base-finnish-uncased-v1": 512,
72
+ "wietsedv/bert-base-dutch-cased": 512,
73
+ }
74
+
75
+ PRETRAINED_INIT_CONFIGURATION = {
76
+ "bert-base-uncased": {"do_lower_case": True},
77
+ "bert-large-uncased": {"do_lower_case": True},
78
+ "bert-base-cased": {"do_lower_case": False},
79
+ "bert-large-cased": {"do_lower_case": False},
80
+ "bert-base-multilingual-uncased": {"do_lower_case": True},
81
+ "bert-base-multilingual-cased": {"do_lower_case": False},
82
+ "bert-base-chinese": {"do_lower_case": False},
83
+ "bert-base-german-cased": {"do_lower_case": False},
84
+ "bert-large-uncased-whole-word-masking": {"do_lower_case": True},
85
+ "bert-large-cased-whole-word-masking": {"do_lower_case": False},
86
+ "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
87
+ "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
88
+ "bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
89
+ "bert-base-german-dbmdz-cased": {"do_lower_case": False},
90
+ "bert-base-german-dbmdz-uncased": {"do_lower_case": True},
91
+ "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
92
+ "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
93
+ "wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
94
+ }
95
+
96
+
97
+ def load_vocab(vocab_file):
98
+ """Loads a vocabulary file into a dictionary."""
99
+ vocab = collections.OrderedDict()
100
+ with open(vocab_file, "r", encoding="utf-8") as reader:
101
+ tokens = reader.readlines()
102
+ for index, token in enumerate(tokens):
103
+ token = token.rstrip("\n")
104
+ vocab[token] = index
105
+ return vocab
106
+
107
+
108
+ def whitespace_tokenize(text):
109
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
110
+ text = text.strip()
111
+ if not text:
112
+ return []
113
+ tokens = text.split()
114
+ return tokens
115
+
116
+
117
+ class BertTokenizer(PreTrainedTokenizer):
118
+ r"""
119
+ Constructs a BERT tokenizer. Based on WordPiece.
120
+
121
+ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
122
+ should refer to the superclass for more information regarding methods.
123
+
124
+ Args:
125
+ vocab_file (:obj:`string`):
126
+ File containing the vocabulary.
127
+ do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
128
+ Whether to lowercase the input when tokenizing.
129
+ do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
130
+ Whether to do basic tokenization before WordPiece.
131
+ never_split (:obj:`Iterable`, `optional`, defaults to :obj:`None`):
132
+ Collection of tokens which will never be split during tokenization. Only has an effect when
133
+ :obj:`do_basic_tokenize=True`
134
+ unk_token (:obj:`string`, `optional`, defaults to "[UNK]"):
135
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
136
+ token instead.
137
+ sep_token (:obj:`string`, `optional`, defaults to "[SEP]"):
138
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
139
+ for sequence classification or for a text and a question for question answering.
140
+ It is also used as the last token of a sequence built with special tokens.
141
+ pad_token (:obj:`string`, `optional`, defaults to "[PAD]"):
142
+ The token used for padding, for example when batching sequences of different lengths.
143
+ cls_token (:obj:`string`, `optional`, defaults to "[CLS]"):
144
+ The classifier token which is used when doing sequence classification (classification of the whole
145
+ sequence instead of per-token classification). It is the first token of the sequence when built with
146
+ special tokens.
147
+ mask_token (:obj:`string`, `optional`, defaults to "[MASK]"):
148
+ The token used for masking values. This is the token used when training this model with masked language
149
+ modeling. This is the token which the model will try to predict.
150
+ tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
151
+ Whether to tokenize Chinese characters.
152
+ This should likely be deactivated for Japanese:
153
+ see: https://github.com/huggingface/transformers/issues/328
154
+ """
155
+
156
+ vocab_files_names = VOCAB_FILES_NAMES
157
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
158
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
159
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
160
+
161
+ def __init__(
162
+ self,
163
+ vocab_file,
164
+ do_lower_case=True,
165
+ do_basic_tokenize=True,
166
+ never_split=None,
167
+ unk_token="[UNK]",
168
+ sep_token="[SEP]",
169
+ pad_token="[PAD]",
170
+ cls_token="[CLS]",
171
+ mask_token="[MASK]",
172
+ tokenize_chinese_chars=True,
173
+ **kwargs
174
+ ):
175
+ super().__init__(
176
+ unk_token=unk_token,
177
+ sep_token=sep_token,
178
+ pad_token=pad_token,
179
+ cls_token=cls_token,
180
+ mask_token=mask_token,
181
+ **kwargs,
182
+ )
183
+
184
+ if not os.path.isfile(vocab_file):
185
+ raise ValueError(
186
+ "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
187
+ "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
188
+ )
189
+ self.vocab = load_vocab(vocab_file)
190
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
191
+ self.do_basic_tokenize = do_basic_tokenize
192
+ if do_basic_tokenize:
193
+ self.basic_tokenizer = BasicTokenizer(
194
+ do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars
195
+ )
196
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
197
+
198
+ @property
199
+ def vocab_size(self):
200
+ return len(self.vocab)
201
+
202
+ def get_vocab(self):
203
+ return dict(self.vocab, **self.added_tokens_encoder)
204
+
205
+ def _tokenize(self, text):
206
+ split_tokens = []
207
+ if self.do_basic_tokenize:
208
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
209
+
210
+ # If the token is part of the never_split set
211
+ if token in self.basic_tokenizer.never_split:
212
+ split_tokens.append(token)
213
+ else:
214
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
215
+ else:
216
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
217
+ return split_tokens
218
+
219
+ def _convert_token_to_id(self, token):
220
+ """ Converts a token (str) in an id using the vocab. """
221
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
222
+
223
+ def _convert_id_to_token(self, index):
224
+ """Converts an index (integer) in a token (str) using the vocab."""
225
+ return self.ids_to_tokens.get(index, self.unk_token)
226
+
227
+ def convert_tokens_to_string(self, tokens):
228
+ """ Converts a sequence of tokens (string) in a single string. """
229
+ out_string = " ".join(tokens).replace(" ##", "").strip()
230
+ return out_string
231
+
232
+ def build_inputs_with_special_tokens(
233
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
234
+ ) -> List[int]:
235
+ """
236
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks
237
+ by concatenating and adding special tokens.
238
+ A BERT sequence has the following format:
239
+
240
+ - single sequence: ``[CLS] X [SEP]``
241
+ - pair of sequences: ``[CLS] A [SEP] B [SEP]``
242
+
243
+ Args:
244
+ token_ids_0 (:obj:`List[int]`):
245
+ List of IDs to which the special tokens will be added
246
+ token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
247
+ Optional second list of IDs for sequence pairs.
248
+
249
+ Returns:
250
+ :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
251
+ """
252
+ if token_ids_1 is None:
253
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
254
+ cls = [self.cls_token_id]
255
+ sep = [self.sep_token_id]
256
+ return cls + token_ids_0 + sep + token_ids_1 + sep
257
+
258
+ def get_special_tokens_mask(
259
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
260
+ ) -> List[int]:
261
+ """
262
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
263
+ special tokens using the tokenizer ``prepare_for_model`` method.
264
+
265
+ Args:
266
+ token_ids_0 (:obj:`List[int]`):
267
+ List of ids.
268
+ token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
269
+ Optional second list of IDs for sequence pairs.
270
+ already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
271
+ Set to True if the token list is already formatted with special tokens for the model
272
+
273
+ Returns:
274
+ :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
275
+ """
276
+
277
+ if already_has_special_tokens:
278
+ if token_ids_1 is not None:
279
+ raise ValueError(
280
+ "You should not supply a second sequence if the provided sequence of "
281
+ "ids is already formated with special tokens for the model."
282
+ )
283
+ return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
284
+
285
+ if token_ids_1 is not None:
286
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
287
+ return [1] + ([0] * len(token_ids_0)) + [1]
288
+
289
+ def create_token_type_ids_from_sequences(
290
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
291
+ ) -> List[int]:
292
+ """
293
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
294
+ A BERT sequence pair mask has the following format:
295
+
296
+ ::
297
+
298
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
299
+ | first sequence | second sequence |
300
+
301
+ if token_ids_1 is None, only returns the first portion of the mask (0's).
302
+
303
+ Args:
304
+ token_ids_0 (:obj:`List[int]`):
305
+ List of ids.
306
+ token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
307
+ Optional second list of IDs for sequence pairs.
308
+
309
+ Returns:
310
+ :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
311
+ sequence(s).
312
+ """
313
+ sep = [self.sep_token_id]
314
+ cls = [self.cls_token_id]
315
+ # pdb.set_trace()
316
+ if token_ids_1 is None:
317
+ return len(cls + token_ids_0 + sep) * [0]
318
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
319
+
320
+ def save_vocabulary(self, vocab_path):
321
+ """
322
+ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory.
323
+
324
+ Args:
325
+ vocab_path (:obj:`str`):
326
+ The directory in which to save the vocabulary.
327
+
328
+ Returns:
329
+ :obj:`Tuple(str)`: Paths to the files saved.
330
+ """
331
+ index = 0
332
+ if os.path.isdir(vocab_path):
333
+ vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
334
+ else:
335
+ vocab_file = vocab_path
336
+ with open(vocab_file, "w", encoding="utf-8") as writer:
337
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
338
+ if index != token_index:
339
+ logger.warning(
340
+ "Saving vocabulary to {}: vocabulary indices are not consecutive."
341
+ " Please check that the vocabulary is not corrupted!".format(vocab_file)
342
+ )
343
+ index = token_index
344
+ writer.write(token + "\n")
345
+ index += 1
346
+ return (vocab_file,)
347
+
348
+
349
+ class BasicTokenizer(object):
350
+ """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
351
+
352
+ def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
353
+ """ Constructs a BasicTokenizer.
354
+
355
+ Args:
356
+ **do_lower_case**: Whether to lower case the input.
357
+ **never_split**: (`optional`) list of str
358
+ Kept for backward compatibility purposes.
359
+ Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
360
+ List of token not to split.
361
+ **tokenize_chinese_chars**: (`optional`) boolean (default True)
362
+ Whether to tokenize Chinese characters.
363
+ This should likely be deactivated for Japanese:
364
+ see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
365
+ """
366
+ if never_split is None:
367
+ never_split = []
368
+ self.do_lower_case = do_lower_case
369
+ self.never_split = set(never_split)
370
+ self.tokenize_chinese_chars = tokenize_chinese_chars
371
+
372
+ def tokenize(self, text, never_split=None):
373
+ """ Basic Tokenization of a piece of text.
374
+ Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
375
+
376
+ Args:
377
+ **never_split**: (`optional`) list of str
378
+ Kept for backward compatibility purposes.
379
+ Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
380
+ List of token not to split.
381
+ """
382
+ # union() returns a new set by concatenating the two sets.
383
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
384
+
385
+ # This was added on November 1st, 2018 for the multilingual and Chinese
386
+ # models. This is also applied to the English models now, but it doesn't
387
+ # matter since the English models were not trained on any Chinese data
388
+ # and generally don't have any Chinese data in them (there are Chinese
389
+ # characters in the vocabulary because Wikipedia does have some Chinese
390
+ # words in the English Wikipedia.).
391
+ if self.tokenize_chinese_chars:
392
+ text = self._tokenize_chinese_chars(text)
393
+ orig_tokens = whitespace_tokenize(text)
394
+ split_tokens = []
395
+ for token in orig_tokens:
396
+ if self.do_lower_case and token not in never_split:
397
+ token = token.lower()
398
+ token = self._run_strip_accents(token)
399
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
400
+
401
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
402
+ return output_tokens
403
+
404
+ def _run_strip_accents(self, text):
405
+ """Strips accents from a piece of text."""
406
+ text = unicodedata.normalize("NFD", text)
407
+ output = []
408
+ for char in text:
409
+ cat = unicodedata.category(char)
410
+ if cat == "Mn":
411
+ continue
412
+ output.append(char)
413
+ return "".join(output)
414
+
415
+ def _run_split_on_punc(self, text, never_split=None):
416
+ """Splits punctuation on a piece of text."""
417
+ if never_split is not None and text in never_split:
418
+ return [text]
419
+ chars = list(text)
420
+ i = 0
421
+ start_new_word = True
422
+ output = []
423
+ while i < len(chars):
424
+ char = chars[i]
425
+ if _is_punctuation(char):
426
+ output.append([char])
427
+ start_new_word = True
428
+ else:
429
+ if start_new_word:
430
+ output.append([])
431
+ start_new_word = False
432
+ output[-1].append(char)
433
+ i += 1
434
+
435
+ return ["".join(x) for x in output]
436
+
437
+ def _tokenize_chinese_chars(self, text):
438
+ """Adds whitespace around any CJK character."""
439
+ output = []
440
+ for char in text:
441
+ cp = ord(char)
442
+ if self._is_chinese_char(cp):
443
+ output.append(" ")
444
+ output.append(char)
445
+ output.append(" ")
446
+ else:
447
+ output.append(char)
448
+ return "".join(output)
449
+
450
+ def _is_chinese_char(self, cp):
451
+ """Checks whether CP is the codepoint of a CJK character."""
452
+ # This defines a "chinese character" as anything in the CJK Unicode block:
453
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
454
+ #
455
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
456
+ # despite its name. The modern Korean Hangul alphabet is a different block,
457
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
458
+ # space-separated words, so they are not treated specially and handled
459
+ # like the all of the other languages.
460
+ if (
461
+ (cp >= 0x4E00 and cp <= 0x9FFF)
462
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
463
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
464
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
465
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
466
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
467
+ or (cp >= 0xF900 and cp <= 0xFAFF)
468
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
469
+ ): #
470
+ return True
471
+
472
+ return False
473
+
474
+ def _clean_text(self, text):
475
+ """Performs invalid character removal and whitespace cleanup on text."""
476
+ output = []
477
+ for char in text:
478
+ cp = ord(char)
479
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
480
+ continue
481
+ if _is_whitespace(char):
482
+ output.append(" ")
483
+ else:
484
+ output.append(char)
485
+ return "".join(output)
486
+
487
+
488
+ class WordpieceTokenizer(object):
489
+ """Runs WordPiece tokenization."""
490
+
491
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
492
+ self.vocab = vocab
493
+ self.unk_token = unk_token
494
+ self.max_input_chars_per_word = max_input_chars_per_word
495
+
496
+ def tokenize(self, text):
497
+ """Tokenizes a piece of text into its word pieces.
498
+
499
+ This uses a greedy longest-match-first algorithm to perform tokenization
500
+ using the given vocabulary.
501
+
502
+ For example:
503
+ input = "unaffable"
504
+ output = ["un", "##aff", "##able"]
505
+
506
+ Args:
507
+ text: A single token or whitespace separated tokens. This should have
508
+ already been passed through `BasicTokenizer`.
509
+
510
+ Returns:
511
+ A list of wordpiece tokens.
512
+ """
513
+
514
+ output_tokens = []
515
+ for token in whitespace_tokenize(text):
516
+ chars = list(token)
517
+ if len(chars) > self.max_input_chars_per_word:
518
+ output_tokens.append(self.unk_token)
519
+ continue
520
+
521
+ is_bad = False
522
+ start = 0
523
+ sub_tokens = []
524
+ while start < len(chars):
525
+ end = len(chars)
526
+ cur_substr = None
527
+ while start < end:
528
+ substr = "".join(chars[start:end])
529
+ if start > 0:
530
+ substr = "##" + substr
531
+ if substr in self.vocab:
532
+ cur_substr = substr
533
+ break
534
+ end -= 1
535
+ if cur_substr is None:
536
+ is_bad = True
537
+ break
538
+ sub_tokens.append(cur_substr)
539
+ start = end
540
+
541
+ if is_bad:
542
+ output_tokens.append(self.unk_token)
543
+ else:
544
+ output_tokens.extend(sub_tokens)
545
+ return output_tokens
546
+
RIS-DMMI/bert/tokenization_test.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from __future__ import absolute_import
16
+ from __future__ import division
17
+ from __future__ import print_function
18
+
19
+ import os
20
+ import tempfile
21
+ import tokenization
22
+ import six
23
+ import tensorflow as tf
24
+
25
+
26
+ class TokenizationTest(tf.test.TestCase):
27
+
28
+ def test_full_tokenizer(self):
29
+ vocab_tokens = [
30
+ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
31
+ "##ing", ","
32
+ ]
33
+ with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
34
+ if six.PY2:
35
+ vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
36
+ else:
37
+ vocab_writer.write("".join(
38
+ [x + "\n" for x in vocab_tokens]).encode("utf-8"))
39
+
40
+ vocab_file = vocab_writer.name
41
+
42
+ tokenizer = tokenization.FullTokenizer(vocab_file)
43
+ os.unlink(vocab_file)
44
+
45
+ tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
46
+ self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
47
+
48
+ self.assertAllEqual(
49
+ tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
50
+
51
+ def test_chinese(self):
52
+ tokenizer = tokenization.BasicTokenizer()
53
+
54
+ self.assertAllEqual(
55
+ tokenizer.tokenize(u"ah\u535A\u63A8zz"),
56
+ [u"ah", u"\u535A", u"\u63A8", u"zz"])
57
+
58
+ def test_basic_tokenizer_lower(self):
59
+ tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
60
+
61
+ self.assertAllEqual(
62
+ tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
63
+ ["hello", "!", "how", "are", "you", "?"])
64
+ self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
65
+
66
+ def test_basic_tokenizer_no_lower(self):
67
+ tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
68
+
69
+ self.assertAllEqual(
70
+ tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
71
+ ["HeLLo", "!", "how", "Are", "yoU", "?"])
72
+
73
+ def test_wordpiece_tokenizer(self):
74
+ vocab_tokens = [
75
+ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
76
+ "##ing"
77
+ ]
78
+
79
+ vocab = {}
80
+ for (i, token) in enumerate(vocab_tokens):
81
+ vocab[token] = i
82
+ tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
83
+
84
+ self.assertAllEqual(tokenizer.tokenize(""), [])
85
+
86
+ self.assertAllEqual(
87
+ tokenizer.tokenize("unwanted running"),
88
+ ["un", "##want", "##ed", "runn", "##ing"])
89
+
90
+ self.assertAllEqual(
91
+ tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
92
+
93
+ def test_convert_tokens_to_ids(self):
94
+ vocab_tokens = [
95
+ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
96
+ "##ing"
97
+ ]
98
+
99
+ vocab = {}
100
+ for (i, token) in enumerate(vocab_tokens):
101
+ vocab[token] = i
102
+
103
+ self.assertAllEqual(
104
+ tokenization.convert_tokens_to_ids(
105
+ vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
106
+
107
+ def test_is_whitespace(self):
108
+ self.assertTrue(tokenization._is_whitespace(u" "))
109
+ self.assertTrue(tokenization._is_whitespace(u"\t"))
110
+ self.assertTrue(tokenization._is_whitespace(u"\r"))
111
+ self.assertTrue(tokenization._is_whitespace(u"\n"))
112
+ self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
113
+
114
+ self.assertFalse(tokenization._is_whitespace(u"A"))
115
+ self.assertFalse(tokenization._is_whitespace(u"-"))
116
+
117
+ def test_is_control(self):
118
+ self.assertTrue(tokenization._is_control(u"\u0005"))
119
+
120
+ self.assertFalse(tokenization._is_control(u"A"))
121
+ self.assertFalse(tokenization._is_control(u" "))
122
+ self.assertFalse(tokenization._is_control(u"\t"))
123
+ self.assertFalse(tokenization._is_control(u"\r"))
124
+ self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
125
+
126
+ def test_is_punctuation(self):
127
+ self.assertTrue(tokenization._is_punctuation(u"-"))
128
+ self.assertTrue(tokenization._is_punctuation(u"$"))
129
+ self.assertTrue(tokenization._is_punctuation(u"`"))
130
+ self.assertTrue(tokenization._is_punctuation(u"."))
131
+
132
+ self.assertFalse(tokenization._is_punctuation(u"A"))
133
+ self.assertFalse(tokenization._is_punctuation(u" "))
134
+
135
+
136
+ if __name__ == "__main__":
137
+ tf.test.main()
RIS-DMMI/bert/tokenization_utils.py ADDED
@@ -0,0 +1,723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization classes for python tokenizers.
16
+ For fast tokenizers (provided by HuggingFace's tokenizers library) see tokenization_utils_fast.py
17
+ """
18
+
19
+ import itertools
20
+ import logging
21
+ import re
22
+ import unicodedata
23
+ from typing import Dict, List, Optional, Tuple, Union
24
+
25
+ from .file_utils import add_end_docstrings
26
+ from .tokenization_utils_base import (
27
+ ENCODE_KWARGS_DOCSTRING,
28
+ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
29
+ AddedToken,
30
+ BatchEncoding,
31
+ EncodedInput,
32
+ EncodedInputPair,
33
+ PaddingStrategy,
34
+ PreTokenizedInput,
35
+ PreTokenizedInputPair,
36
+ PreTrainedTokenizerBase,
37
+ TensorType,
38
+ TextInput,
39
+ TextInputPair,
40
+ TruncationStrategy,
41
+ )
42
+
43
+
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ def _is_whitespace(char):
48
+ """Checks whether `chars` is a whitespace character."""
49
+ # \t, \n, and \r are technically contorl characters but we treat them
50
+ # as whitespace since they are generally considered as such.
51
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
52
+ return True
53
+ cat = unicodedata.category(char)
54
+ if cat == "Zs":
55
+ return True
56
+ return False
57
+
58
+
59
+ def _is_control(char):
60
+ """Checks whether `chars` is a control character."""
61
+ # These are technically control characters but we count them as whitespace
62
+ # characters.
63
+ if char == "\t" or char == "\n" or char == "\r":
64
+ return False
65
+ cat = unicodedata.category(char)
66
+ if cat.startswith("C"):
67
+ return True
68
+ return False
69
+
70
+
71
+ def _is_punctuation(char):
72
+ """Checks whether `chars` is a punctuation character."""
73
+ cp = ord(char)
74
+ # We treat all non-letter/number ASCII as punctuation.
75
+ # Characters such as "^", "$", and "`" are not in the Unicode
76
+ # Punctuation class but we treat them as punctuation anyways, for
77
+ # consistency.
78
+ if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
79
+ return True
80
+ cat = unicodedata.category(char)
81
+ if cat.startswith("P"):
82
+ return True
83
+ return False
84
+
85
+
86
+ def _is_end_of_word(text):
87
+ """Checks whether the last character in text is one of a punctuation, control or whitespace character."""
88
+ last_char = text[-1]
89
+ return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
90
+
91
+
92
+ def _is_start_of_word(text):
93
+ """Checks whether the first character in text is one of a punctuation, control or whitespace character."""
94
+ first_char = text[0]
95
+ return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
96
+
97
+
98
+ class PreTrainedTokenizer(PreTrainedTokenizerBase):
99
+ """ Base class for all slow tokenizers.
100
+
101
+ Handle all the shared methods for tokenization and special tokens as well as methods
102
+ downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
103
+
104
+ This class also contain the added tokens in a unified way on top of all tokenizers so we don't
105
+ have to handle the specific vocabulary augmentation methods of the various underlying
106
+ dictionary structures (BPE, sentencepiece...).
107
+
108
+ Class attributes (overridden by derived classes):
109
+
110
+ - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
111
+ required by the model, and as associated values, the filename for saving the associated file (string).
112
+ - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
113
+ being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
114
+ `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
115
+ associated pretrained vocabulary file.
116
+ - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
117
+ models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
118
+ model has no maximum input size.
119
+ - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
120
+ pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
121
+ ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
122
+ ``from_pretrained()`` method.
123
+
124
+ Args:
125
+ - ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
126
+ When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
127
+ model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
128
+ no associated max_length can be found in ``max_model_input_sizes``.
129
+ - ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
130
+ Should be selected between ['right', 'left']
131
+ - ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
132
+ model ("token_type_ids", "attention_mask"...).
133
+ - ``bos_token``: (`Optional`) string: a beginning of sentence token.
134
+ Will be associated to ``self.bos_token`` and ``self.bos_token_id``
135
+ - ``eos_token``: (`Optional`) string: an end of sentence token.
136
+ Will be associated to ``self.eos_token`` and ``self.eos_token_id``
137
+ - ``unk_token``: (`Optional`) string: an unknown token.
138
+ Will be associated to ``self.unk_token`` and ``self.unk_token_id``
139
+ - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
140
+ Will be associated to ``self.sep_token`` and ``self.sep_token_id``
141
+ - ``pad_token``: (`Optional`) string: a padding token.
142
+ Will be associated to ``self.pad_token`` and ``self.pad_token_id``
143
+ - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
144
+ leveraging self-attention along the full depth of the model).
145
+ Will be associated to ``self.cls_token`` and ``self.cls_token_id``
146
+ - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
147
+ modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
148
+ - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
149
+ Adding all special tokens here ensure they won't be split by the tokenization process.
150
+ Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
151
+
152
+
153
+ .. automethod:: __call__
154
+ """
155
+
156
+ def __init__(self, **kwargs):
157
+ super().__init__(**kwargs)
158
+
159
+ # Added tokens - We store this for both slow and fast tokenizers
160
+ # until the serialization of Fast tokenizers is updated
161
+ self.added_tokens_encoder: Dict[str, int] = {}
162
+ self.added_tokens_decoder: Dict[int, str] = {}
163
+ self.unique_no_split_tokens: List[str] = []
164
+
165
+ @property
166
+ def is_fast(self) -> bool:
167
+ return False
168
+
169
+ @property
170
+ def vocab_size(self) -> int:
171
+ """ Size of the base vocabulary (without the added tokens) """
172
+ raise NotImplementedError
173
+
174
+ def get_vocab(self):
175
+ """ Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. """
176
+ raise NotImplementedError()
177
+
178
+ def get_added_vocab(self) -> Dict[str, int]:
179
+ return self.added_tokens_encoder
180
+
181
+ def __len__(self):
182
+ """ Size of the full vocabulary with the added tokens """
183
+ return self.vocab_size + len(self.added_tokens_encoder)
184
+
185
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens=False) -> int:
186
+ """
187
+ Add a list of new tokens to the tokenizer class. If the new tokens are not in the
188
+ vocabulary, they are added to it with indices starting from length of the current vocabulary.
189
+
190
+ Args:
191
+ new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not
192
+ already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
193
+
194
+ Returns:
195
+ Number of tokens added to the vocabulary.
196
+
197
+ Examples::
198
+
199
+ # Let's see how to increase the vocabulary of Bert model and tokenizer
200
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
201
+ model = BertModel.from_pretrained('bert-base-uncased')
202
+
203
+ num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
204
+ print('We have added', num_added_toks, 'tokens')
205
+ model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
206
+ """
207
+ new_tokens = [str(tok) for tok in new_tokens]
208
+
209
+ tokens_to_add = []
210
+ for token in new_tokens:
211
+ assert isinstance(token, str)
212
+ if not special_tokens and self.init_kwargs.get("do_lower_case", False):
213
+ token = token.lower()
214
+ if (
215
+ token != self.unk_token
216
+ and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
217
+ and token not in tokens_to_add
218
+ ):
219
+ tokens_to_add.append(token)
220
+ if self.verbose:
221
+ logger.info("Adding %s to the vocabulary", token)
222
+
223
+ added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
224
+ added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
225
+ self.added_tokens_encoder.update(added_tok_encoder)
226
+ self.added_tokens_decoder.update(added_tok_decoder)
227
+
228
+ # Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
229
+ if special_tokens:
230
+ self.unique_no_split_tokens = list(set(self.unique_no_split_tokens).union(set(new_tokens)))
231
+ else:
232
+ # Or on the newly added tokens
233
+ self.unique_no_split_tokens = list(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
234
+
235
+ return len(tokens_to_add)
236
+
237
+ def num_special_tokens_to_add(self, pair=False):
238
+ """
239
+ Returns the number of added tokens when encoding a sequence with special tokens.
240
+
241
+ Note:
242
+ This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
243
+ inside your training loop.
244
+
245
+ Args:
246
+ pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
247
+ number of added tokens in the case of a single sequence if set to False.
248
+
249
+ Returns:
250
+ Number of tokens added to sequences
251
+ """
252
+ token_ids_0 = []
253
+ token_ids_1 = []
254
+ return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
255
+
256
+ def tokenize(self, text: TextInput, **kwargs):
257
+ """ Converts a string in a sequence of tokens (string), using the tokenizer.
258
+ Split in words for word-based vocabulary or sub-words for sub-word-based
259
+ vocabularies (BPE/SentencePieces/WordPieces).
260
+
261
+ Take care of added tokens.
262
+
263
+ Args:
264
+ text (:obj:`string`): The sequence to be encoded.
265
+ **kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method.
266
+ """
267
+ # Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
268
+ all_special_tokens_extended = dict(
269
+ (str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
270
+ )
271
+
272
+ text, kwargs = self.prepare_for_tokenization(text, **kwargs)
273
+
274
+ if kwargs:
275
+ logger.warning(f"Keyword arguments {kwargs} not recognized.")
276
+
277
+ # TODO: should this be in the base class?
278
+ if self.init_kwargs.get("do_lower_case", False):
279
+ # convert non-special tokens to lowercase
280
+ escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens]
281
+ pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
282
+ text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
283
+
284
+ def split_on_token(tok, text):
285
+ result = []
286
+ tok_extended = all_special_tokens_extended.get(tok, None)
287
+ split_text = text.split(tok)
288
+ full_word = ""
289
+ for i, sub_text in enumerate(split_text):
290
+ # AddedToken can control whitespace stripping around them.
291
+ # We use them for GPT2 and Roberta to have different behavior depending on the special token
292
+ # Cf. https://github.com/huggingface/transformers/pull/2778
293
+ # and https://github.com/huggingface/transformers/issues/3788
294
+ if isinstance(tok_extended, AddedToken):
295
+ if tok_extended.single_word:
296
+ # Try to avoid splitting on token
297
+ if (
298
+ i < len(split_text) - 1
299
+ and not _is_end_of_word(sub_text)
300
+ and not _is_start_of_word(split_text[i + 1])
301
+ ):
302
+ # Don't extract the special token
303
+ full_word += sub_text + tok
304
+ elif full_word:
305
+ full_word += sub_text
306
+ result += [full_word]
307
+ full_word = ""
308
+ continue
309
+ # Strip white spaces on the right
310
+ if tok_extended.rstrip and i > 0:
311
+ # A bit counter-intuitive but we strip the left of the string
312
+ # since tok_extended.rstrip means the special token is eating all white spaces on its right
313
+ sub_text = sub_text.lstrip()
314
+ # Strip white spaces on the left
315
+ if tok_extended.lstrip and i < len(split_text) - 1:
316
+ sub_text = sub_text.rstrip() # Opposite here
317
+ else:
318
+ # We strip left and right by default
319
+ if i < len(split_text) - 1:
320
+ sub_text = sub_text.rstrip()
321
+ if i > 0:
322
+ sub_text = sub_text.lstrip()
323
+
324
+ if i == 0 and not sub_text:
325
+ result += [tok]
326
+ elif i == len(split_text) - 1:
327
+ if sub_text:
328
+ result += [sub_text]
329
+ else:
330
+ pass
331
+ else:
332
+ if sub_text:
333
+ result += [sub_text]
334
+ result += [tok]
335
+ return result
336
+
337
+ def split_on_tokens(tok_list, text):
338
+ if not text.strip():
339
+ return []
340
+ if not tok_list:
341
+ return self._tokenize(text)
342
+
343
+ tokenized_text = []
344
+ text_list = [text]
345
+ for tok in tok_list:
346
+ tokenized_text = []
347
+ for sub_text in text_list:
348
+ if sub_text not in self.unique_no_split_tokens:
349
+ tokenized_text += split_on_token(tok, sub_text)
350
+ else:
351
+ tokenized_text += [sub_text]
352
+ text_list = tokenized_text
353
+
354
+ return list(
355
+ itertools.chain.from_iterable(
356
+ (
357
+ self._tokenize(token) if token not in self.unique_no_split_tokens else [token]
358
+ for token in tokenized_text
359
+ )
360
+ )
361
+ )
362
+
363
+ no_split_token = self.unique_no_split_tokens
364
+ tokenized_text = split_on_tokens(no_split_token, text)
365
+ return tokenized_text
366
+
367
+ def _tokenize(self, text, **kwargs):
368
+ """ Converts a string in a sequence of tokens (string), using the tokenizer.
369
+ Split in words for word-based vocabulary or sub-words for sub-word-based
370
+ vocabularies (BPE/SentencePieces/WordPieces).
371
+
372
+ Do NOT take care of added tokens.
373
+ """
374
+ raise NotImplementedError
375
+
376
+ def convert_tokens_to_ids(self, tokens):
377
+ """ Converts a token string (or a sequence of tokens) in a single integer id
378
+ (or a sequence of ids), using the vocabulary.
379
+ """
380
+ if tokens is None:
381
+ return None
382
+
383
+ if isinstance(tokens, str):
384
+ return self._convert_token_to_id_with_added_voc(tokens)
385
+
386
+ ids = []
387
+ for token in tokens:
388
+ ids.append(self._convert_token_to_id_with_added_voc(token))
389
+ return ids
390
+
391
+ def _convert_token_to_id_with_added_voc(self, token):
392
+ if token is None:
393
+ return None
394
+
395
+ if token in self.added_tokens_encoder:
396
+ return self.added_tokens_encoder[token]
397
+ return self._convert_token_to_id(token)
398
+
399
+ def _convert_token_to_id(self, token):
400
+ raise NotImplementedError
401
+
402
+ def _encode_plus(
403
+ self,
404
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
405
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
406
+ add_special_tokens: bool = True,
407
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
408
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
409
+ max_length: Optional[int] = None,
410
+ stride: int = 0,
411
+ is_pretokenized: bool = False,
412
+ pad_to_multiple_of: Optional[int] = None,
413
+ return_tensors: Optional[Union[str, TensorType]] = None,
414
+ return_token_type_ids: Optional[bool] = None,
415
+ return_attention_mask: Optional[bool] = None,
416
+ return_overflowing_tokens: bool = False,
417
+ return_special_tokens_mask: bool = False,
418
+ return_offsets_mapping: bool = False,
419
+ return_length: bool = False,
420
+ verbose: bool = True,
421
+ **kwargs
422
+ ) -> BatchEncoding:
423
+ def get_input_ids(text):
424
+ if isinstance(text, str):
425
+ tokens = self.tokenize(text, **kwargs)
426
+ return self.convert_tokens_to_ids(tokens)
427
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
428
+ if is_pretokenized:
429
+ tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text)))
430
+ return self.convert_tokens_to_ids(tokens)
431
+ else:
432
+ return self.convert_tokens_to_ids(text)
433
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
434
+ return text
435
+ else:
436
+ if is_pretokenized:
437
+ raise ValueError(
438
+ f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_pretokenized=True`."
439
+ )
440
+ else:
441
+ raise ValueError(
442
+ f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
443
+ )
444
+
445
+ if return_offsets_mapping:
446
+ raise NotImplementedError(
447
+ "return_offset_mapping is not available when using Python tokenizers."
448
+ "To use this feature, change your tokenizer to one deriving from "
449
+ "transformers.PreTrainedTokenizerFast."
450
+ "More information on available tokenizers at "
451
+ "https://github.com/huggingface/transformers/pull/2674"
452
+ )
453
+
454
+ first_ids = get_input_ids(text)
455
+ second_ids = get_input_ids(text_pair) if text_pair is not None else None
456
+
457
+ return self.prepare_for_model(
458
+ first_ids,
459
+ pair_ids=second_ids,
460
+ add_special_tokens=add_special_tokens,
461
+ padding=padding_strategy.value,
462
+ truncation=truncation_strategy.value,
463
+ max_length=max_length,
464
+ stride=stride,
465
+ pad_to_multiple_of=pad_to_multiple_of,
466
+ return_tensors=return_tensors,
467
+ prepend_batch_axis=True,
468
+ return_attention_mask=return_attention_mask,
469
+ return_token_type_ids=return_token_type_ids,
470
+ return_overflowing_tokens=return_overflowing_tokens,
471
+ return_special_tokens_mask=return_special_tokens_mask,
472
+ return_length=return_length,
473
+ verbose=verbose,
474
+ )
475
+
476
+ def _batch_encode_plus(
477
+ self,
478
+ batch_text_or_text_pairs: Union[
479
+ List[TextInput],
480
+ List[TextInputPair],
481
+ List[PreTokenizedInput],
482
+ List[PreTokenizedInputPair],
483
+ List[EncodedInput],
484
+ List[EncodedInputPair],
485
+ ],
486
+ add_special_tokens: bool = True,
487
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
488
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
489
+ max_length: Optional[int] = None,
490
+ stride: int = 0,
491
+ is_pretokenized: bool = False,
492
+ pad_to_multiple_of: Optional[int] = None,
493
+ return_tensors: Optional[Union[str, TensorType]] = None,
494
+ return_token_type_ids: Optional[bool] = None,
495
+ return_attention_mask: Optional[bool] = None,
496
+ return_overflowing_tokens: bool = False,
497
+ return_special_tokens_mask: bool = False,
498
+ return_offsets_mapping: bool = False,
499
+ return_length: bool = False,
500
+ verbose: bool = True,
501
+ **kwargs
502
+ ) -> BatchEncoding:
503
+ def get_input_ids(text):
504
+ if isinstance(text, str):
505
+ tokens = self.tokenize(text, **kwargs)
506
+ return self.convert_tokens_to_ids(tokens)
507
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
508
+ if is_pretokenized:
509
+ tokens = list(itertools.chain(*(self.tokenize(t, is_pretokenized=True, **kwargs) for t in text)))
510
+ return self.convert_tokens_to_ids(tokens)
511
+ else:
512
+ return self.convert_tokens_to_ids(text)
513
+ elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
514
+ return text
515
+ else:
516
+ raise ValueError(
517
+ "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
518
+ )
519
+
520
+ if return_offsets_mapping:
521
+ raise NotImplementedError(
522
+ "return_offset_mapping is not available when using Python tokenizers."
523
+ "To use this feature, change your tokenizer to one deriving from "
524
+ "transformers.PreTrainedTokenizerFast."
525
+ )
526
+
527
+ input_ids = []
528
+ for ids_or_pair_ids in batch_text_or_text_pairs:
529
+ if not isinstance(ids_or_pair_ids, (list, tuple)):
530
+ ids, pair_ids = ids_or_pair_ids, None
531
+ elif is_pretokenized and not isinstance(ids_or_pair_ids[0], (list, tuple)):
532
+ ids, pair_ids = ids_or_pair_ids, None
533
+ else:
534
+ ids, pair_ids = ids_or_pair_ids
535
+
536
+ first_ids = get_input_ids(ids)
537
+ second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
538
+ input_ids.append((first_ids, second_ids))
539
+
540
+ batch_outputs = self._batch_prepare_for_model(
541
+ input_ids,
542
+ add_special_tokens=add_special_tokens,
543
+ padding_strategy=padding_strategy,
544
+ truncation_strategy=truncation_strategy,
545
+ max_length=max_length,
546
+ stride=stride,
547
+ pad_to_multiple_of=pad_to_multiple_of,
548
+ return_attention_mask=return_attention_mask,
549
+ return_token_type_ids=return_token_type_ids,
550
+ return_overflowing_tokens=return_overflowing_tokens,
551
+ return_special_tokens_mask=return_special_tokens_mask,
552
+ return_length=return_length,
553
+ return_tensors=return_tensors,
554
+ verbose=verbose,
555
+ )
556
+
557
+ return BatchEncoding(batch_outputs)
558
+
559
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
560
+ def _batch_prepare_for_model(
561
+ self,
562
+ batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
563
+ add_special_tokens: bool = True,
564
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
565
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
566
+ max_length: Optional[int] = None,
567
+ stride: int = 0,
568
+ pad_to_multiple_of: Optional[int] = None,
569
+ return_tensors: Optional[str] = None,
570
+ return_token_type_ids: Optional[bool] = None,
571
+ return_attention_mask: Optional[bool] = None,
572
+ return_overflowing_tokens: bool = False,
573
+ return_special_tokens_mask: bool = False,
574
+ return_length: bool = False,
575
+ verbose: bool = True,
576
+ ) -> BatchEncoding:
577
+ """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
578
+ It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
579
+ manages a moving window (with user defined stride) for overflowing tokens
580
+
581
+ Args:
582
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
583
+ """
584
+
585
+ batch_outputs = {}
586
+ for first_ids, second_ids in batch_ids_pairs:
587
+ outputs = self.prepare_for_model(
588
+ first_ids,
589
+ second_ids,
590
+ add_special_tokens=add_special_tokens,
591
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
592
+ truncation=truncation_strategy.value,
593
+ max_length=max_length,
594
+ stride=stride,
595
+ pad_to_multiple_of=None, # we pad in batch afterward
596
+ return_attention_mask=False, # we pad in batch afterward
597
+ return_token_type_ids=return_token_type_ids,
598
+ return_overflowing_tokens=return_overflowing_tokens,
599
+ return_special_tokens_mask=return_special_tokens_mask,
600
+ return_length=return_length,
601
+ return_tensors=None, # We convert the whole batch to tensors at the end
602
+ prepend_batch_axis=False,
603
+ verbose=verbose,
604
+ )
605
+
606
+ for key, value in outputs.items():
607
+ if key not in batch_outputs:
608
+ batch_outputs[key] = []
609
+ batch_outputs[key].append(value)
610
+
611
+ batch_outputs = self.pad(
612
+ batch_outputs,
613
+ padding=padding_strategy.value,
614
+ max_length=max_length,
615
+ pad_to_multiple_of=pad_to_multiple_of,
616
+ return_attention_mask=return_attention_mask,
617
+ )
618
+
619
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
620
+
621
+ return batch_outputs
622
+
623
+ def prepare_for_tokenization(self, text: str, is_pretokenized=False, **kwargs) -> (str, dict):
624
+ """ Performs any necessary transformations before tokenization.
625
+
626
+ This method should pop the arguments from kwargs and return kwargs as well.
627
+ We test kwargs at the end of the encoding process to be sure all the arguments have been used.
628
+ """
629
+ return (text, kwargs)
630
+
631
+ def get_special_tokens_mask(
632
+ self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
633
+ ) -> List[int]:
634
+ """
635
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
636
+ special tokens using the tokenizer ``prepare_for_model`` method.
637
+
638
+ Args:
639
+ token_ids_0: list of ids (must not contain special tokens)
640
+ token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
641
+ for sequence pairs
642
+ already_has_special_tokens: (default False) Set to True if the token list is already formated with
643
+ special tokens for the model
644
+
645
+ Returns:
646
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
647
+ """
648
+ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
649
+
650
+ def convert_ids_to_tokens(
651
+ self, ids: Union[int, List[int]], skip_special_tokens: bool = False
652
+ ) -> Union[str, List[str]]:
653
+ """ Converts a single index or a sequence of indices (integers) in a token "
654
+ (resp.) a sequence of tokens (str), using the vocabulary and added tokens.
655
+
656
+ Args:
657
+ skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
658
+ """
659
+ if isinstance(ids, int):
660
+ if ids in self.added_tokens_decoder:
661
+ return self.added_tokens_decoder[ids]
662
+ else:
663
+ return self._convert_id_to_token(ids)
664
+ tokens = []
665
+ for index in ids:
666
+ index = int(index)
667
+ if skip_special_tokens and index in self.all_special_ids:
668
+ continue
669
+ if index in self.added_tokens_decoder:
670
+ tokens.append(self.added_tokens_decoder[index])
671
+ else:
672
+ tokens.append(self._convert_id_to_token(index))
673
+ return tokens
674
+
675
+ def _convert_id_to_token(self, index: int) -> str:
676
+ raise NotImplementedError
677
+
678
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
679
+ """ Converts a sequence of tokens (string) in a single string.
680
+ The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
681
+ but we often want to remove sub-word tokenization artifacts at the same time.
682
+ """
683
+ return " ".join(self.convert_ids_to_tokens(tokens))
684
+
685
+ def decode(
686
+ self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
687
+ ) -> str:
688
+ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
689
+
690
+ # To avoid mixing byte-level and unicode for byte-level BPT
691
+ # we need to build string separatly for added tokens and byte-level tokens
692
+ # cf. https://github.com/huggingface/transformers/issues/1133
693
+ sub_texts = []
694
+ current_sub_text = []
695
+ for token in filtered_tokens:
696
+ if skip_special_tokens and token in self.all_special_ids:
697
+ continue
698
+ if token in self.added_tokens_encoder:
699
+ if current_sub_text:
700
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
701
+ current_sub_text = []
702
+ sub_texts.append(token)
703
+ else:
704
+ current_sub_text.append(token)
705
+ if current_sub_text:
706
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
707
+ text = " ".join(sub_texts)
708
+
709
+ if clean_up_tokenization_spaces:
710
+ clean_text = self.clean_up_tokenization(text)
711
+ return clean_text
712
+ else:
713
+ return text
714
+
715
+ def save_vocabulary(self, save_directory) -> Tuple[str]:
716
+ """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
717
+ and special token mappings.
718
+
719
+ Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full
720
+ Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained`
721
+ class method.
722
+ """
723
+ raise NotImplementedError
RIS-DMMI/bert/tokenization_utils_base.py ADDED
The diff for this file is too large to render. See raw diff
 
RIS-DMMI/bert/vocab.txt ADDED
The diff for this file is too large to render. See raw diff