Update README.md
Browse files
README.md
CHANGED
@@ -83,7 +83,10 @@ pip install -U FlagEmbedding
|
|
83 |
```python
|
84 |
from FlagEmbedding import BGEM3FlagModel
|
85 |
|
86 |
-
model = BGEM3FlagModel('BAAI/bge-m3',
|
|
|
|
|
|
|
87 |
|
88 |
sentences_1 = ["What is BGE M3?", "Defination of BM25"]
|
89 |
sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
|
@@ -183,8 +186,10 @@ print(model.compute_score(sentence_pairs))
|
|
183 |

|
184 |
|
185 |
- Long Document Retrieval
|
186 |
-
|
187 |
-

|
|
|
|
|
188 |
|
189 |
|
190 |
## Training
|
|
|
83 |
```python
|
84 |
from FlagEmbedding import BGEM3FlagModel
|
85 |
|
86 |
+
model = BGEM3FlagModel('BAAI/bge-m3',
|
87 |
+
batch_size=12, #
|
88 |
+
max_length=8192, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
|
89 |
+
use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
90 |
|
91 |
sentences_1 = ["What is BGE M3?", "Defination of BM25"]
|
92 |
sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
|
|
|
186 |

|
187 |
|
188 |
- Long Document Retrieval
|
189 |
+
- MLDR:
|
190 |
+

|
191 |
+
- NarritiveQA:
|
192 |
+

|
193 |
|
194 |
|
195 |
## Training
|