Upload 12 files
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +7 -0
- README.md +9 -11
- config.json +29 -0
- config_sentence_transformers.json +7 -0
- eval/binary_classification_evaluation_results.csv +11 -0
- modules.json +14 -0
- pytorch_model.bin +3 -0
- sentence_bert_config.json +4 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +3 -0
- tokenizer_config.json +19 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false
|
7 |
+
}
|
README.md
CHANGED
@@ -5,16 +5,14 @@ tags:
|
|
5 |
- feature-extraction
|
6 |
- sentence-similarity
|
7 |
- transformers
|
8 |
-
|
9 |
-
base_model:
|
10 |
-
- sentence-transformers/paraphrase-multilingual-mpnet-base-v2
|
11 |
---
|
12 |
|
13 |
-
#
|
14 |
|
15 |
-
This is a
|
16 |
|
17 |
-
|
18 |
|
19 |
## Usage (Sentence-Transformers)
|
20 |
|
@@ -28,7 +26,7 @@ Then you can use the model like this:
|
|
28 |
|
29 |
```python
|
30 |
from sentence_transformers import SentenceTransformer
|
31 |
-
sentences = ["
|
32 |
|
33 |
model = SentenceTransformer('{MODEL_NAME}')
|
34 |
embeddings = model.encode(sentences)
|
@@ -53,11 +51,11 @@ def mean_pooling(model_output, attention_mask):
|
|
53 |
|
54 |
|
55 |
# Sentences we want sentence embeddings for
|
56 |
-
sentences = [
|
57 |
|
58 |
# Load model from HuggingFace Hub
|
59 |
-
tokenizer = AutoTokenizer.from_pretrained('
|
60 |
-
model = AutoModel.from_pretrained('
|
61 |
|
62 |
# Tokenize sentences
|
63 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
@@ -79,7 +77,7 @@ print(sentence_embeddings)
|
|
79 |
|
80 |
<!--- Describe how your model was evaluated -->
|
81 |
|
82 |
-
|
83 |
|
84 |
|
85 |
## Training
|
|
|
5 |
- feature-extraction
|
6 |
- sentence-similarity
|
7 |
- transformers
|
8 |
+
|
|
|
|
|
9 |
---
|
10 |
|
11 |
+
# {MODEL_NAME}
|
12 |
|
13 |
+
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
|
14 |
|
15 |
+
<!--- Describe your model here -->
|
16 |
|
17 |
## Usage (Sentence-Transformers)
|
18 |
|
|
|
26 |
|
27 |
```python
|
28 |
from sentence_transformers import SentenceTransformer
|
29 |
+
sentences = ["This is an example sentence", "Each sentence is converted"]
|
30 |
|
31 |
model = SentenceTransformer('{MODEL_NAME}')
|
32 |
embeddings = model.encode(sentences)
|
|
|
51 |
|
52 |
|
53 |
# Sentences we want sentence embeddings for
|
54 |
+
sentences = ['This is an example sentence', 'Each sentence is converted']
|
55 |
|
56 |
# Load model from HuggingFace Hub
|
57 |
+
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
|
58 |
+
model = AutoModel.from_pretrained('{MODEL_NAME}')
|
59 |
|
60 |
# Tokenize sentences
|
61 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
|
|
77 |
|
78 |
<!--- Describe how your model was evaluated -->
|
79 |
|
80 |
+
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
|
81 |
|
82 |
|
83 |
## Training
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/home/shaunss/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-multilingual-mpnet-base-v2/",
|
3 |
+
"architectures": [
|
4 |
+
"XLMRobertaModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 3072,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 514,
|
18 |
+
"model_type": "xlm-roberta",
|
19 |
+
"num_attention_heads": 12,
|
20 |
+
"num_hidden_layers": 12,
|
21 |
+
"output_past": true,
|
22 |
+
"pad_token_id": 1,
|
23 |
+
"position_embedding_type": "absolute",
|
24 |
+
"torch_dtype": "float32",
|
25 |
+
"transformers_version": "4.29.2",
|
26 |
+
"type_vocab_size": 1,
|
27 |
+
"use_cache": true,
|
28 |
+
"vocab_size": 250002
|
29 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.0.0",
|
4 |
+
"transformers": "4.7.0",
|
5 |
+
"pytorch": "1.9.0+cu102"
|
6 |
+
}
|
7 |
+
}
|
eval/binary_classification_evaluation_results.csv
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch,steps,cossim_accuracy,cossim_accuracy_threshold,cossim_f1,cossim_precision,cossim_recall,cossim_f1_threshold,cossim_ap,manhattan_accuracy,manhattan_accuracy_threshold,manhattan_f1,manhattan_precision,manhattan_recall,manhattan_f1_threshold,manhattan_ap,euclidean_accuracy,euclidean_accuracy_threshold,euclidean_f1,euclidean_precision,euclidean_recall,euclidean_f1_threshold,euclidean_ap,dot_accuracy,dot_accuracy_threshold,dot_f1,dot_precision,dot_recall,dot_f1_threshold,dot_ap
|
2 |
+
0,-1,0.8024883879997224,0.17506900429725647,0.8063041496621397,0.7840946056943959,0.8298085446987237,0.0659412145614624,0.8620151605623863,0.8526282504622252,103.52375030517578,0.8490974497162036,0.8456404198718872,0.8525828606838858,121.41480255126953,0.9262942217350337,0.8527343725739419,4.6177239418029785,0.8522953074575396,0.8412787328163793,0.8636042353406949,6.18421745300293,0.926979495084879,0.7937766537294901,-0.2767452895641327,0.8104768167427614,0.7465210233568693,0.8864178192427855,-0.5480235815048218,0.6638662718204413
|
3 |
+
1,-1,0.8562520640480699,0.4267902672290802,0.8541980796999714,0.8591563321343644,0.8492967277453115,0.212812602519989,0.9191552729790002,0.868453946648931,131.79901123046875,0.8684219629004211,0.8466657537438352,0.8913257709421718,142.26144409179688,0.9393029605668897,0.8714059235580557,7.088883399963379,0.8730945894025861,0.8620765232474062,0.8843979421459863,7.171298980712891,0.9410885581264281,0.8417532993040495,2.064309597015381,0.8475257385019971,0.8022907174865053,0.8981664572377763,0.8476270437240601,0.7242120395710528
|
4 |
+
2,-1,0.8302202830409035,0.4481218755245209,0.8273452634575833,0.8277704934804792,0.8269204700961365,0.24427366256713867,0.9113411926756796,0.8404379791865646,132.42083740234375,0.838898014762301,0.8427066008936674,0.8351236993174913,135.08065795898438,0.9200413996350492,0.8410644506857567,8.616535186767578,0.8419910870603214,0.8305887241623299,0.8537108719414058,8.960345268249512,0.919906658213341,0.8087028772593816,4.547852516174316,0.8156051098421041,0.7797627370948615,0.8549012815326752,0.5787814855575562,0.6507068755138374
|
5 |
+
3,-1,0.8650450775947058,0.3033302426338196,0.8653469548675369,0.8664464232556334,0.8642502732616685,0.29888010025024414,0.929607383130208,0.8527484141765099,95.41083526611328,0.8400213299418179,0.92367190705091,0.7702638588850924,95.61880493164062,0.9139968622029379,0.8736639212633338,9.171188354492188,0.8761585812642521,0.852817741681677,0.90081300725542,9.45157241821289,0.9451804083431754,0.8417303466844671,4.933138847351074,0.8489783734263863,0.8138135589875556,0.8873193675821291,4.865048408508301,0.7125378876944088
|
6 |
+
4,-1,0.8713484069936902,0.6772409081459045,0.8718955615486387,0.8715616499027923,0.8722297291481982,0.6763420104980469,0.93615891570098,0.8688409008120097,73.83671569824219,0.8637921524045287,0.9017763924847725,0.8288784867758566,74.4036865234375,0.918698788792674,0.8753988692729501,6.102813720703125,0.8761979965755444,0.8732105345263459,0.8792059704447065,6.606846809387207,0.942128526085839,0.7804211994714957,4.64598274230957,0.8101076561988367,0.7143176320318051,0.9355672654037818,4.550523281097412,0.6332916924163511
|
7 |
+
5,-1,0.8689391920299864,0.21575260162353516,0.8718192297557826,0.856114746056784,0.8881106430040709,0.2141122817993164,0.9300156584430811,0.8585594773931549,77.21778106689453,0.8477186193030853,0.9216696854619578,0.7847531823150212,77.88922119140625,0.9120666494863384,0.8727255641686386,9.299266815185547,0.874944919268427,0.8631704793076789,0.8870450300803002,9.303229331970215,0.9397909479702942,0.8596015263221991,4.576216697692871,0.8645230616415577,0.837967453035709,0.8928168759521125,4.533451557159424,0.7360157593676956
|
8 |
+
6,-1,0.8713354455143966,0.6016895771026611,0.8703519134454197,0.8798758801897527,0.8610319178235462,0.5895531177520752,0.9275517797612493,0.8670719289192476,73.34446716308594,0.8600940705878745,0.911089329455958,0.8145048154300321,73.94377136230469,0.9121048567424115,0.876789798019648,7.987394332885742,0.8771571705878776,0.8662878809736516,0.8883026792553512,8.711860656738281,0.9391480550458041,0.7294615234436707,6.7831244468688965,0.774172549105996,0.6649606571341743,0.9263075678420505,6.602792739868164,0.6185114062007635
|
9 |
+
7,-1,0.8649932316775314,0.6178315877914429,0.8625849580892574,0.8775699904597679,0.8481030906540206,0.49990156292915344,0.9235150902500275,0.8535995513167918,68.7736587524414,0.8425942741826457,0.9145312332718262,0.7811491406243276,70.22262573242188,0.9054550670130509,0.8681701442585642,9.46362590789795,0.8705566330550115,0.8537452397156138,0.8880434034202893,9.755147933959961,0.9353567274782016,0.8231182024804491,7.171789169311523,0.8357583843732164,0.7810905962605724,0.8986543476576957,7.055145740509033,0.7025103236184465
|
10 |
+
8,-1,0.8694519805545406,0.6050317883491516,0.8698676469839678,0.8664736382825989,0.8732883491552557,0.5112029314041138,0.9302869990687609,0.8688282093635347,75.08961486816406,0.8650922930152832,0.8904031589945651,0.8411806410245376,80.07766723632812,0.9203894196731085,0.8713060121551672,6.7917585372924805,0.8720885745508811,0.8569864777849324,0.8877324875848832,9.010178565979004,0.9355215648070871,0.8460335578099528,14.055868148803711,0.8538792489037188,0.813056109874906,0.8990185172434568,13.523445129394531,0.7378416874030901
|
11 |
+
9,-1,0.8710743257127936,0.5959341526031494,0.8707389005212216,0.87639529833842,0.8651550491010337,0.5940307378768921,0.9286072861137795,0.8640481238123707,74.07192993164062,0.8604433566864135,0.879785727133735,0.8419331864462213,82.67079162597656,0.9182521268955257,0.8715220368100611,7.239246368408203,0.8715577326349558,0.872882192694364,0.8702372858015819,7.660418510437012,0.9332301899565736,0.8387297642279914,16.086734771728516,0.8479424596216002,0.8031441363992143,0.8980335918202239,15.580770492553711,0.7381259874246225
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f98080704852e59f1081925563d9b9f6ee2b66e889ee457ba5666adbee01f07
|
3 |
+
size 1112245805
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 128,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
|
3 |
+
size 5069051
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b60b6b43406a48bf3638526314f3d232d97058bc93472ff2de930d43686fa441
|
3 |
+
size 17082913
|
tokenizer_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"eos_token": "</s>",
|
6 |
+
"mask_token": {
|
7 |
+
"__type": "AddedToken",
|
8 |
+
"content": "<mask>",
|
9 |
+
"lstrip": true,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"model_max_length": 512,
|
15 |
+
"pad_token": "<pad>",
|
16 |
+
"sep_token": "</s>",
|
17 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
18 |
+
"unk_token": "<unk>"
|
19 |
+
}
|