|
|
---
|
|
|
license: cc-by-nc-4.0
|
|
|
language:
|
|
|
- ro
|
|
|
base_model:
|
|
|
- mistralai/Mistral-7B-v0.3
|
|
|
datasets:
|
|
|
- OpenLLM-Ro/ro_sft_alpaca
|
|
|
- OpenLLM-Ro/ro_sft_alpaca_gpt4
|
|
|
- OpenLLM-Ro/ro_sft_dolly
|
|
|
- OpenLLM-Ro/ro_sft_selfinstruct_gpt4
|
|
|
- OpenLLM-Ro/ro_sft_norobots
|
|
|
- OpenLLM-Ro/ro_sft_orca
|
|
|
- OpenLLM-Ro/ro_sft_camel
|
|
|
- OpenLLM-Ro/ro_sft_oasst
|
|
|
- OpenLLM-Ro/ro_sft_ultrachat
|
|
|
- OpenLLM-Ro/ro_sft_magpie_mt
|
|
|
- OpenLLM-Ro/ro_sft_magpie_reasoning
|
|
|
model-index:
|
|
|
- name: OpenLLM-Ro/RoMistral-7b-Instruct-2025-04-23
|
|
|
results:
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: RoMT-Bench
|
|
|
type: RoMT-Bench
|
|
|
metrics:
|
|
|
- name: Score
|
|
|
type: Score
|
|
|
value: 6.24
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: RoCulturaBench
|
|
|
type: RoCulturaBench
|
|
|
metrics:
|
|
|
- name: Score
|
|
|
type: Score
|
|
|
value: 4.36
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: Romanian_Academic_Benchmarks
|
|
|
type: Romanian_Academic_Benchmarks
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 54.40
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_arc_challenge
|
|
|
type: OpenLLM-Ro/ro_arc_challenge
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 52.86
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_mmlu
|
|
|
type: OpenLLM-Ro/ro_mmlu
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 52.33
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_winogrande
|
|
|
type: OpenLLM-Ro/ro_winogrande
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 68.57
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_hellaswag
|
|
|
type: OpenLLM-Ro/ro_hellaswag
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 63.50
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_gsm8k
|
|
|
type: OpenLLM-Ro/ro_gsm8k
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 38.15
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_truthfulqa
|
|
|
type: OpenLLM-Ro/ro_truthfulqa
|
|
|
metrics:
|
|
|
- name: Average accuracy
|
|
|
type: accuracy
|
|
|
value: 51.01
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: LaRoSeDa_binary
|
|
|
type: LaRoSeDa_binary
|
|
|
metrics:
|
|
|
- name: Average macro-f1
|
|
|
type: macro-f1
|
|
|
value: 97.67
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: LaRoSeDa_multiclass
|
|
|
type: LaRoSeDa_multiclass
|
|
|
metrics:
|
|
|
- name: Average macro-f1
|
|
|
type: macro-f1
|
|
|
value: 61.79
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: WMT_EN-RO
|
|
|
type: WMT_EN-RO
|
|
|
metrics:
|
|
|
- name: Average bleu
|
|
|
type: bleu
|
|
|
value: 28.69
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: WMT_RO-EN
|
|
|
type: WMT_RO-EN
|
|
|
metrics:
|
|
|
- name: Average bleu
|
|
|
type: bleu
|
|
|
value: 19.23
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: XQuAD
|
|
|
type: XQuAD
|
|
|
metrics:
|
|
|
- name: Average exact_match
|
|
|
type: exact_match
|
|
|
value: 49.05
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: XQuAD
|
|
|
type: XQuAD
|
|
|
metrics:
|
|
|
- name: Average f1
|
|
|
type: f1
|
|
|
value: 69.11
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: STS
|
|
|
type: STS
|
|
|
metrics:
|
|
|
- name: Average spearman
|
|
|
type: spearman
|
|
|
value: 78.67
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: STS
|
|
|
type: STS
|
|
|
metrics:
|
|
|
- name: Average pearson
|
|
|
type: pearson
|
|
|
value: 77.08
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: RoMT-Bench
|
|
|
type: RoMT-Bench
|
|
|
metrics:
|
|
|
- name: First turn
|
|
|
type: Score
|
|
|
value: 6.78
|
|
|
- name: Second turn
|
|
|
type: Score
|
|
|
value: 5.70
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_arc_challenge
|
|
|
type: OpenLLM-Ro/ro_arc_challenge
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: accuracy
|
|
|
value: 50.04
|
|
|
- name: 1-shot
|
|
|
type: accuracy
|
|
|
value: 50.99
|
|
|
- name: 3-shot
|
|
|
type: accuracy
|
|
|
value: 53.30
|
|
|
- name: 5-shot
|
|
|
type: accuracy
|
|
|
value: 53.73
|
|
|
- name: 10-shot
|
|
|
type: accuracy
|
|
|
value: 54.07
|
|
|
- name: 25-shot
|
|
|
type: accuracy
|
|
|
value: 55.01
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_mmlu
|
|
|
type: OpenLLM-Ro/ro_mmlu
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: accuracy
|
|
|
value: 51.04
|
|
|
- name: 1-shot
|
|
|
type: accuracy
|
|
|
value: 52.53
|
|
|
- name: 3-shot
|
|
|
type: accuracy
|
|
|
value: 53.22
|
|
|
- name: 5-shot
|
|
|
type: accuracy
|
|
|
value: 52.52
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_winogrande
|
|
|
type: OpenLLM-Ro/ro_winogrande
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: accuracy
|
|
|
value: 66.38
|
|
|
- name: 1-shot
|
|
|
type: accuracy
|
|
|
value: 68.90
|
|
|
- name: 3-shot
|
|
|
type: accuracy
|
|
|
value: 68.82
|
|
|
- name: 5-shot
|
|
|
type: accuracy
|
|
|
value: 70.17
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_hellaswag
|
|
|
type: OpenLLM-Ro/ro_hellaswag
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: accuracy
|
|
|
value: 62.61
|
|
|
- name: 1-shot
|
|
|
type: accuracy
|
|
|
value: 63.19
|
|
|
- name: 3-shot
|
|
|
type: accuracy
|
|
|
value: 63.46
|
|
|
- name: 5-shot
|
|
|
type: accuracy
|
|
|
value: 63.92
|
|
|
- name: 10-shot
|
|
|
type: accuracy
|
|
|
value: 64.34
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: OpenLLM-Ro/ro_gsm8k
|
|
|
type: OpenLLM-Ro/ro_gsm8k
|
|
|
metrics:
|
|
|
- name: 1-shot
|
|
|
type: accuracy
|
|
|
value: 27.98
|
|
|
- name: 3-shot
|
|
|
type: accuracy
|
|
|
value: 40.46
|
|
|
- name: 5-shot
|
|
|
type: accuracy
|
|
|
value: 46.02
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: LaRoSeDa_binary
|
|
|
type: LaRoSeDa_binary
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: macro-f1
|
|
|
value: 97.87
|
|
|
- name: 1-shot
|
|
|
type: macro-f1
|
|
|
value: 96.73
|
|
|
- name: 3-shot
|
|
|
type: macro-f1
|
|
|
value: 98.20
|
|
|
- name: 5-shot
|
|
|
type: macro-f1
|
|
|
value: 97.87
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: LaRoSeDa_multiclass
|
|
|
type: LaRoSeDa_multiclass
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: macro-f1
|
|
|
value: 45.15
|
|
|
- name: 1-shot
|
|
|
type: macro-f1
|
|
|
value: 65.77
|
|
|
- name: 3-shot
|
|
|
type: macro-f1
|
|
|
value: 66.57
|
|
|
- name: 5-shot
|
|
|
type: macro-f1
|
|
|
value: 69.66
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: WMT_EN-RO
|
|
|
type: WMT_EN-RO
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: bleu
|
|
|
value: 28.92
|
|
|
- name: 1-shot
|
|
|
type: bleu
|
|
|
value: 28.42
|
|
|
- name: 3-shot
|
|
|
type: bleu
|
|
|
value: 28.85
|
|
|
- name: 5-shot
|
|
|
type: bleu
|
|
|
value: 28.58
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: WMT_RO-EN
|
|
|
type: WMT_RO-EN
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: bleu
|
|
|
value: 3.56
|
|
|
- name: 1-shot
|
|
|
type: bleu
|
|
|
value: 9.60
|
|
|
- name: 3-shot
|
|
|
type: bleu
|
|
|
value: 29.53
|
|
|
- name: 5-shot
|
|
|
type: bleu
|
|
|
value: 34.25
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: XQuAD_EM
|
|
|
type: XQuAD_EM
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: exact_match
|
|
|
value: 45.21
|
|
|
- name: 1-shot
|
|
|
type: exact_match
|
|
|
value: 49.83
|
|
|
- name: 3-shot
|
|
|
type: exact_match
|
|
|
value: 50.34
|
|
|
- name: 5-shot
|
|
|
type: exact_match
|
|
|
value: 50.84
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: XQuAD_F1
|
|
|
type: XQuAD_F1
|
|
|
metrics:
|
|
|
- name: 0-shot
|
|
|
type: f1
|
|
|
value: 66.40
|
|
|
- name: 1-shot
|
|
|
type: f1
|
|
|
value: 68.92
|
|
|
- name: 3-shot
|
|
|
type: f1
|
|
|
value: 70.68
|
|
|
- name: 5-shot
|
|
|
type: f1
|
|
|
value: 70.44
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: STS_Spearman
|
|
|
type: STS_Spearman
|
|
|
metrics:
|
|
|
- name: 1-shot
|
|
|
type: spearman
|
|
|
value: 79.08
|
|
|
- name: 3-shot
|
|
|
type: spearman
|
|
|
value: 78.65
|
|
|
- name: 5-shot
|
|
|
type: spearman
|
|
|
value: 78.29
|
|
|
- task:
|
|
|
type: text-generation
|
|
|
dataset:
|
|
|
name: STS_Pearson
|
|
|
type: STS_Pearson
|
|
|
metrics:
|
|
|
- name: 1-shot
|
|
|
type: pearson
|
|
|
value: 77.79
|
|
|
- name: 3-shot
|
|
|
type: pearson
|
|
|
value: 76.89
|
|
|
- name: 5-shot
|
|
|
type: pearson
|
|
|
value: 76.57
|
|
|
|
|
|
---
|
|
|
|
|
|
# Model Card for Model ID
|
|
|
|
|
|
<!-- Provide a quick summary of what the model is/does. -->
|
|
|
|
|
|
RoMistral is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **instruct 7B model**. Links to other models can be found at the bottom of this page.
|
|
|
|
|
|
## Model Details
|
|
|
|
|
|
### Model Description
|
|
|
|
|
|
<!-- Provide a longer summary of what this model is. -->
|
|
|
OpenLLM-Ro represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants.
|
|
|
|
|
|
|
|
|
- **Developed by:** OpenLLM-Ro
|
|
|
<!-- - **Funded by [optional]:** [More Information Needed] -->
|
|
|
<!-- - **Shared by [optional]:** [More Information Needed] -->
|
|
|
<!-- - **Model type:** [More Information Needed] -->
|
|
|
- **Language(s):** Romanian
|
|
|
- **License:** cc-by-nc-4.0
|
|
|
- **Finetuned from model:** [Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3)
|
|
|
- **Trained using:** [RoAlpaca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca), [RoAlpacaGPT4](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_alpaca_gpt4), [RoDolly](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_dolly), [RoSelfInstruct](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_selfinstruct_gpt4), [RoNoRobots](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_norobots), [RoOrca](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_orca), [RoCamel](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_camel), [RoOpenAssistant](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_oasst), [RoUltraChat](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_ultrachat), [RoMagpiePro](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_mt), [RoMagpieReasoning](https://huggingface.co/datasets/OpenLLM-Ro/ro_sft_magpie_reasoning)
|
|
|
|
|
|
|
|
|
<!-- - **Finetuned from model [optional]:** [More Information Needed] -->
|
|
|
|
|
|
### Model Sources
|
|
|
|
|
|
<!-- Provide the basic links for the model. -->
|
|
|
|
|
|
- **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory
|
|
|
- **Paper:** https://arxiv.org/abs/2406.18266
|
|
|
|
|
|
## Intended Use
|
|
|
|
|
|
### Intended Use Cases
|
|
|
|
|
|
RoMistral is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat.
|
|
|
|
|
|
### Out-of-Scope Use
|
|
|
|
|
|
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
|
|
|
|
|
Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian.
|
|
|
|
|
|
|
|
|
|
|
|
## How to Get Started with the Model
|
|
|
|
|
|
Use the code below to get started with the model.
|
|
|
|
|
|
```python
|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoMistral-7b-Instruct-2025-04-23")
|
|
|
model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoMistral-7b-Instruct-2025-04-23")
|
|
|
|
|
|
instruction = "Ce jocuri de societate pot juca cu prietenii mei?"
|
|
|
chat = [
|
|
|
{"role": "user", "content": instruction},
|
|
|
]
|
|
|
prompt = tokenizer.apply_chat_template(chat, tokenize=False, system_message="")
|
|
|
|
|
|
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
|
|
outputs = model.generate(input_ids=inputs, max_new_tokens=128)
|
|
|
print(tokenizer.decode(outputs[0]))
|
|
|
```
|
|
|
|
|
|
## Academic Benchmarks
|
|
|
|
|
|
|
|
|
<table>
|
|
|
<tbody>
|
|
|
<tr>
|
|
|
<td><strong>Model</strong></td>
|
|
|
<td><strong><center>Average</center></strong></td>
|
|
|
<td><strong><center>ARC</center></strong></td>
|
|
|
<td><strong><center>MMLU</center></strong></td>
|
|
|
<td><strong><center>Winogrande</center></strong></td>
|
|
|
<td><strong><center>Hellaswag</center></strong></td>
|
|
|
<td><strong><center>GSM8k</center></strong></td>
|
|
|
<td><strong><center>TruthfulQA</center></strong></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>47.40</center></td><td><center>46.29</center></td><td><center>47.00</center></td><td><center>58.78</center></td><td><center>54.27</center></td><td><center>13.47</center></td><td><center><strong>64.59</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>52.54</center></td><td><center>50.41</center></td><td><center>51.61</center></td><td><center>66.48</center></td><td><center>60.27</center></td><td><center>34.19</center></td><td><center>52.30</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>52.91</center></td><td><center>52.27</center></td><td><center>49.33</center></td><td><center><strong>70.03</strong></center></td><td><center>62.88</center></td><td><center>32.42</center></td><td><center>50.51</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><em>RoMistral-7b-Instruct-2025-04-23</em></td><td><center><em>54.40</em></center></td><td><center><em>52.86</em></center></td><td><center><em>52.33</em></center></td><td><center><em>68.57</em></center></td><td><center><em>63.50</em></center></td><td><center><em>38.15</em></center></td><td><center><em>51.01</em></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2024-10-09</td><td><center>51.95</center></td><td><center>50.73</center></td><td><center>47.88</center></td><td><center>68.41</center></td><td><center>62.27</center></td><td><center>32.27</center></td><td><center>50.12</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2025-04-23</td><td><center><strong>56.62</strong></center></td><td><center><strong>55.51</strong></center></td><td><center><strong>52.61</strong></center></td><td><center>68.04</center></td><td><center><strong>64.97</strong></center></td><td><center><strong>41.07</strong></center></td><td><center>57.55</center></td>
|
|
|
</tr>
|
|
|
</tbody>
|
|
|
</table>
|
|
|
|
|
|
|
|
|
## Downstream tasks
|
|
|
|
|
|
<table>
|
|
|
<tbody>
|
|
|
<tr>
|
|
|
<td></td>
|
|
|
<td colspan="4"><center><strong>LaRoSeDa</strong></center></td>
|
|
|
<td colspan="4"><center><strong>WMT</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td></td>
|
|
|
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><strong>Model</strong></td>
|
|
|
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
|
|
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
|
|
<td><center><strong>Binary<br>(Macro F1)</strong></center></td>
|
|
|
<td><center><strong>Multiclass<br>(Macro F1)</strong></center></td>
|
|
|
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
|
|
<td><center><strong>RO-EN<br>(Bleu)</strong></center></td>
|
|
|
<td><center><strong>EN-RO<br>(Bleu)</strong></center></td>
|
|
|
<td><center><strong>RO-EN<br>(Bleu)</strong></center>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>96.97</center></td><td><center>56.66</center></td><td><center>98.83</center></td><td><center>87.32</center></td><td><center>18.60</center></td><td><center><strong>33.99</strong></center></td><td><center>26.19</center></td><td><center>39.88</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>97.36</center></td><td><center>67.55</center></td><td><center>98.80</center></td><td><center><strong>88.28</strong></center></td><td><center>27.93</center></td><td><center>13.21</center></td><td><center><strong>28.72</strong></center></td><td><center><strong>40.86</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>95.56</center></td><td><center><strong>67.83</strong></center></td><td><center><strong>99.00</strong></center></td><td><center>87.57</center></td><td><center>28.28</center></td><td><center>6.10</center></td><td><center>27.70</center></td><td><center>40.36</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><em>RoMistral-7b-Instruct-2025-04-23</em></td><td><center><em>97.67</em></center></td><td><center><em>61.79</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em><strong>28.69</strong></em></center></td><td><center><em>19.23</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2024-10-09</td><td><center>82.13</center></td><td><center>65.24</center></td><td><center>-</center></td><td><center>-</center></td><td><center>26.25</center></td><td><center>6.09</center></td><td><center>-</center></td><td><center>-</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2025-04-23</td><td><center><strong>97.94</strong></center></td><td><center>66.13</center></td><td><center>-</center></td><td><center>-</center></td><td><center>27.24</center></td><td><center>18.41</center></td><td><center>-</center></td><td><center>-</center></td>
|
|
|
</tr>
|
|
|
</tbody>
|
|
|
</table>
|
|
|
|
|
|
|
|
|
<table>
|
|
|
<tbody>
|
|
|
<tr>
|
|
|
<td></td>
|
|
|
<td colspan="4"><center><strong>XQuAD</strong></center></td>
|
|
|
<td colspan="4"><center><strong>STS</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td></td>
|
|
|
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Few-shot</strong></center></td>
|
|
|
<td colspan="2"><center><strong>Finetuned</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><strong>Model</strong></td>
|
|
|
<td><center><strong>(EM)</strong></center></td>
|
|
|
<td><center><strong>(F1)</strong></center></td>
|
|
|
<td><center><strong>(EM)</strong></center></td>
|
|
|
<td><center><strong>(F1)</strong></center></td>
|
|
|
<td><center><strong>(Spearman)</strong></center></td>
|
|
|
<td><center><strong>(Pearson)</strong></center></td>
|
|
|
<td><center><strong>(Spearman)</strong></center></td>
|
|
|
<td><center><strong>(Pearson)</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>27.92</center></td><td><center>50.71</center></td><td><center><strong>65.46</strong></center></td><td><center><strong>79.73</strong></center></td><td><center>62.62</center></td><td><center>60.86</center></td><td><center>84.92</center></td><td><center>85.44</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>43.66</center></td><td><center>63.70</center></td><td><center>55.04</center></td><td><center>72.31</center></td><td><center>77.43</center></td><td><center><strong>78.43</strong></center></td><td><center>87.25</center></td><td><center>87.79</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>41.09</center></td><td><center>63.21</center></td><td><center>47.56</center></td><td><center>62.69</center></td><td><center>78.47</center></td><td><center>77.24</center></td><td><center><strong>87.28</strong></center></td><td><center><strong>87.88</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><em>RoMistral-7b-Instruct-2025-04-23</em></td><td><center><em><strong>49.05</strong></em></center></td><td><center><em><strong>69.11</strong></em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em><strong>78.67</strong></em></center></td><td><center><em>77.08</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2024-10-09</td><td><center>23.40</center></td><td><center>45.80</center></td><td><center>-</center></td><td><center>-</center></td><td><center>77.33</center></td><td><center>76.60</center></td><td><center>-</center></td><td><center>-</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2025-04-23</td><td><center>40.86</center></td><td><center>62.24</center></td><td><center>-</center></td><td><center>-</center></td><td><center>77.89</center></td><td><center>76.40</center></td><td><center>-</center></td><td><center>-</center></td>
|
|
|
</tr>
|
|
|
</tbody>
|
|
|
</table>
|
|
|
|
|
|
|
|
|
## MT-Bench
|
|
|
|
|
|
<table>
|
|
|
<tbody>
|
|
|
<tr>
|
|
|
<td><strong>Model</strong></td>
|
|
|
<td><strong><center>Average</center></strong></td>
|
|
|
<td><strong><center>1st turn</center></strong></td>
|
|
|
<td><strong><center>2nd turn</center></strong></td>
|
|
|
<td><strong><center>Answers in Ro</center></strong></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>5.03</center></td><td><center>5.05</center></td><td><center>5.00</center></td><td><center>154/160</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>4.99</center></td><td><center>5.46</center></td><td><center>4.53</center></td><td><center><strong>160/160</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>5.29</center></td><td><center>5.86</center></td><td><center>4.72</center></td><td><center><strong>160/160</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><em>RoMistral-7b-Instruct-2025-04-23</em></td><td><center><em>6.24</em></center></td><td><center><em>6.78</em></center></td><td><center><em>5.70</em></center></td><td><center><em><strong>160/160</strong></em></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2024-10-09</td><td><center>5.88</center></td><td><center>6.44</center></td><td><center>5.33</center></td><td><center><strong>160/160</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2025-04-23</td><td><center><strong>6.61</strong></center></td><td><center><strong>6.86</strong></center></td><td><center><strong>6.35</strong></center></td><td><center><strong>160/160</strong></center></td>
|
|
|
</tr>
|
|
|
</tbody>
|
|
|
</table>
|
|
|
|
|
|
|
|
|
## RoCulturaBench
|
|
|
|
|
|
<table>
|
|
|
<tbody>
|
|
|
<tr>
|
|
|
<td><strong>Model</strong></td>
|
|
|
<td><strong><center>Average</center></strong></td>
|
|
|
<td><strong><center>Answers in Ro</center></strong></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>Mistral-7B-Instruct-v0.2</td><td><center>3.68</center></td><td><center>97/100</center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-05-17</td><td><center>3.38</center></td><td><center><strong>100/100</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-2024-10-09</td><td><center>3.99</center></td><td><center><strong>100/100</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td><em>RoMistral-7b-Instruct-2025-04-23</em></td><td><center><em>4.36</em></center></td><td><center><em><strong>100/100</strong></em></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2024-10-09</td><td><center>4.72</center></td><td><center><strong>100/100</strong></center></td>
|
|
|
</tr>
|
|
|
<tr>
|
|
|
<td>RoMistral-7b-Instruct-DPO-2025-04-23</td><td><center><strong>4.93</strong></center></td><td><center><strong>100/100</strong></center></td>
|
|
|
</tr>
|
|
|
</tbody>
|
|
|
</table>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## RoMistral Model Family
|
|
|
|
|
|
| Model | Link |
|
|
|
|--------------------|:--------:|
|
|
|
|RoMistral-7b-Instruct-2024-05-17| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-05-17) |
|
|
|
|RoMistral-7b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2024-10-09) |
|
|
|
|*RoMistral-7b-Instruct-2025-04-23*| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-2025-04-23) |
|
|
|
|RoMistral-7b-Instruct-DPO-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-DPO-2024-10-09) |
|
|
|
|RoMistral-7b-Instruct-DPO-2025-04-23| [link](https://huggingface.co/OpenLLM-Ro/RoMistral-7b-Instruct-DPO-2025-04-23) |
|
|
|
|
|
|
|
|
|
|
|
|
## Citation
|
|
|
|
|
|
```
|
|
|
@misc{masala2024vorbecstiromanecsterecipetrain,
|
|
|
title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions},
|
|
|
author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea},
|
|
|
year={2024},
|
|
|
eprint={2406.18266},
|
|
|
archivePrefix={arXiv},
|
|
|
primaryClass={cs.CL},
|
|
|
url={https://arxiv.org/abs/2406.18266},
|
|
|
}
|
|
|
```
|
|
|
<!-- **APA:**
|
|
|
|
|
|
[More Information Needed] --> |