Upload fine-tuned Llama 3.1 8B QLoRA model
Browse files- README.md +74 -64
- adapter_config.json +4 -4
- adapter_model.safetensors +1 -1
- checkpoint-24/adapter_config.json +4 -4
- checkpoint-24/adapter_model.safetensors +1 -1
- checkpoint-24/optimizer.pt +1 -1
- checkpoint-24/trainer_state.json +25 -25
- checkpoint-24/training_args.bin +1 -1
- checkpoint-25/adapter_config.json +4 -4
- checkpoint-25/adapter_model.safetensors +1 -1
- checkpoint-25/optimizer.pt +1 -1
- checkpoint-25/trainer_state.json +31 -31
- checkpoint-25/training_args.bin +1 -1
- training_args.bin +1 -1
README.md
CHANGED
@@ -1,65 +1,75 @@
|
|
1 |
-
|
2 |
---
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: meta-llama/Llama-3.1-8B
|
4 |
+
tags:
|
5 |
+
- llama
|
6 |
+
- lora
|
7 |
+
- qlora
|
8 |
+
- fine-tuned
|
9 |
+
- robotics
|
10 |
+
- task-planning
|
11 |
+
- construction
|
12 |
+
license: llama3.1
|
13 |
+
language:
|
14 |
+
- en
|
15 |
+
pipeline_tag: text-generation
|
16 |
+
---
|
17 |
+
|
18 |
+
# Llama 3.1 8B - Robot Task Planning (QLoRA Fine-tuned)
|
19 |
+
|
20 |
+
This model is a QLoRA fine-tuned version of [meta-llama/Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) specialized for **robot task planning** in construction environments.
|
21 |
+
|
22 |
+
The model converts natural language commands into structured task sequences for construction robots including excavators and dump trucks.
|
23 |
+
|
24 |
+
## Model Details
|
25 |
+
|
26 |
+
- **Base Model**: meta-llama/Llama-3.1-8B
|
27 |
+
- **Fine-tuning Method**: QLoRA (4-bit quantization + LoRA)
|
28 |
+
- **LoRA Rank**: 16
|
29 |
+
- **LoRA Alpha**: 32
|
30 |
+
- **Target Modules**: q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj
|
31 |
+
|
32 |
+
## Usage
|
33 |
+
|
34 |
+
```python
|
35 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
36 |
+
from peft import PeftModel
|
37 |
+
|
38 |
+
# Load tokenizer and base model
|
39 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.1-8B")
|
40 |
+
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B")
|
41 |
+
|
42 |
+
# Load LoRA adapter
|
43 |
+
model = PeftModel.from_pretrained(base_model, "YongdongWang/llama-3.1-8b-dart-qlora")
|
44 |
+
|
45 |
+
# Generate robot task sequence
|
46 |
+
command = "Deploy Excavator 1 to Soil Area 1 for excavation"
|
47 |
+
inputs = tokenizer(command, return_tensors="pt")
|
48 |
+
outputs = model.generate(**inputs, max_new_tokens=512)
|
49 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
50 |
+
print(response)
|
51 |
+
```
|
52 |
+
|
53 |
+
## Training Details
|
54 |
+
|
55 |
+
- **Training Data**: DART LLM Tasks - Robot command and task planning dataset
|
56 |
+
- **Domain**: Construction robotics (excavators, dump trucks, soil/rock areas)
|
57 |
+
- **Training Epochs**: 5
|
58 |
+
- **Batch Size**: 16 (with gradient accumulation)
|
59 |
+
- **Learning Rate**: 2e-4
|
60 |
+
- **Optimizer**: paged_adamw_8bit
|
61 |
+
|
62 |
+
## Capabilities
|
63 |
+
|
64 |
+
- **Multi-robot coordination**: Handle multiple excavators and dump trucks
|
65 |
+
- **Task dependencies**: Generate proper task sequences with dependencies
|
66 |
+
- **Spatial reasoning**: Understand soil areas, rock areas, puddles, and navigation
|
67 |
+
- **Action planning**: Convert commands to structured JSON task definitions
|
68 |
+
|
69 |
+
## Example Output
|
70 |
+
|
71 |
+
The model generates structured task sequences in JSON format for robot execution.
|
72 |
+
|
73 |
+
## Limitations
|
74 |
+
|
75 |
+
This model is specifically trained for construction robotics scenarios and may not generalize to other domains without additional fine-tuning.
|
adapter_config.json
CHANGED
@@ -25,12 +25,12 @@
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
-
"
|
29 |
-
"gate_proj",
|
30 |
"v_proj",
|
|
|
31 |
"o_proj",
|
32 |
-
"
|
33 |
-
"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
+
"up_proj",
|
|
|
29 |
"v_proj",
|
30 |
+
"gate_proj",
|
31 |
"o_proj",
|
32 |
+
"q_proj",
|
33 |
+
"down_proj"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167832240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0db982df9cb0f4027019591b4dbed63879790c994db46e7854dc9c28a18915fd
|
3 |
size 167832240
|
checkpoint-24/adapter_config.json
CHANGED
@@ -25,12 +25,12 @@
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
-
"
|
29 |
-
"gate_proj",
|
30 |
"v_proj",
|
|
|
31 |
"o_proj",
|
32 |
-
"
|
33 |
-
"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
+
"up_proj",
|
|
|
29 |
"v_proj",
|
30 |
+
"gate_proj",
|
31 |
"o_proj",
|
32 |
+
"q_proj",
|
33 |
+
"down_proj"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
checkpoint-24/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167832240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:147edd45999f9fd86edaa853b1974dff9b4b86f3dc23bbf9aebbc796ef8e4782
|
3 |
size 167832240
|
checkpoint-24/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 85728532
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb5da80ca3998ac6feb84f67496a67e5dc7c7b0dc77a7c882b36663fd205e22c
|
3 |
size 85728532
|
checkpoint-24/trainer_state.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"best_global_step": 24,
|
3 |
-
"best_metric": 0.
|
4 |
"best_model_checkpoint": "./outputs/llama3.1-8b-lora-qlora-dart-llm/checkpoint-24",
|
5 |
"epoch": 4.0,
|
6 |
"eval_steps": 500,
|
@@ -11,62 +11,62 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 0.8791208791208791,
|
14 |
-
"grad_norm": 0.
|
15 |
"learning_rate": 0.00019594929736144976,
|
16 |
-
"loss": 0.
|
17 |
"step": 5
|
18 |
},
|
19 |
{
|
20 |
"epoch": 1.0,
|
21 |
-
"eval_loss": 0.
|
22 |
-
"eval_runtime":
|
23 |
-
"eval_samples_per_second":
|
24 |
-
"eval_steps_per_second":
|
25 |
"step": 6
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.7032967032967035,
|
29 |
-
"grad_norm":
|
30 |
"learning_rate": 0.00015406408174555976,
|
31 |
-
"loss": 0.
|
32 |
"step": 10
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
-
"eval_loss": 0.
|
37 |
-
"eval_runtime":
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second":
|
40 |
"step": 12
|
41 |
},
|
42 |
{
|
43 |
"epoch": 2.5274725274725274,
|
44 |
-
"grad_norm": 0.
|
45 |
"learning_rate": 8.57685161726715e-05,
|
46 |
-
"loss": 0.
|
47 |
"step": 15
|
48 |
},
|
49 |
{
|
50 |
"epoch": 3.0,
|
51 |
-
"eval_loss": 0.
|
52 |
-
"eval_runtime": 2.
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second":
|
55 |
"step": 18
|
56 |
},
|
57 |
{
|
58 |
"epoch": 3.3516483516483517,
|
59 |
-
"grad_norm": 0.
|
60 |
"learning_rate": 2.4425042564574184e-05,
|
61 |
-
"loss": 0.
|
62 |
"step": 20
|
63 |
},
|
64 |
{
|
65 |
"epoch": 4.0,
|
66 |
-
"eval_loss": 0.
|
67 |
-
"eval_runtime":
|
68 |
-
"eval_samples_per_second":
|
69 |
-
"eval_steps_per_second":
|
70 |
"step": 24
|
71 |
}
|
72 |
],
|
|
|
1 |
{
|
2 |
"best_global_step": 24,
|
3 |
+
"best_metric": 0.02759450115263462,
|
4 |
"best_model_checkpoint": "./outputs/llama3.1-8b-lora-qlora-dart-llm/checkpoint-24",
|
5 |
"epoch": 4.0,
|
6 |
"eval_steps": 500,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 0.8791208791208791,
|
14 |
+
"grad_norm": 0.5126284956932068,
|
15 |
"learning_rate": 0.00019594929736144976,
|
16 |
+
"loss": 0.6177,
|
17 |
"step": 5
|
18 |
},
|
19 |
{
|
20 |
"epoch": 1.0,
|
21 |
+
"eval_loss": 0.13462547957897186,
|
22 |
+
"eval_runtime": 1.9882,
|
23 |
+
"eval_samples_per_second": 5.533,
|
24 |
+
"eval_steps_per_second": 5.533,
|
25 |
"step": 6
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.7032967032967035,
|
29 |
+
"grad_norm": 4.223966598510742,
|
30 |
"learning_rate": 0.00015406408174555976,
|
31 |
+
"loss": 0.113,
|
32 |
"step": 10
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
+
"eval_loss": 0.041809309273958206,
|
37 |
+
"eval_runtime": 1.9899,
|
38 |
+
"eval_samples_per_second": 5.528,
|
39 |
+
"eval_steps_per_second": 5.528,
|
40 |
"step": 12
|
41 |
},
|
42 |
{
|
43 |
"epoch": 2.5274725274725274,
|
44 |
+
"grad_norm": 0.15816885232925415,
|
45 |
"learning_rate": 8.57685161726715e-05,
|
46 |
+
"loss": 0.0253,
|
47 |
"step": 15
|
48 |
},
|
49 |
{
|
50 |
"epoch": 3.0,
|
51 |
+
"eval_loss": 0.029711483046412468,
|
52 |
+
"eval_runtime": 2.0074,
|
53 |
+
"eval_samples_per_second": 5.48,
|
54 |
+
"eval_steps_per_second": 5.48,
|
55 |
"step": 18
|
56 |
},
|
57 |
{
|
58 |
"epoch": 3.3516483516483517,
|
59 |
+
"grad_norm": 0.10277726501226425,
|
60 |
"learning_rate": 2.4425042564574184e-05,
|
61 |
+
"loss": 0.0245,
|
62 |
"step": 20
|
63 |
},
|
64 |
{
|
65 |
"epoch": 4.0,
|
66 |
+
"eval_loss": 0.02759450115263462,
|
67 |
+
"eval_runtime": 1.9936,
|
68 |
+
"eval_samples_per_second": 5.518,
|
69 |
+
"eval_steps_per_second": 5.518,
|
70 |
"step": 24
|
71 |
}
|
72 |
],
|
checkpoint-24/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c7263e7d5295b74152f3cc3f85217bffc73db5e4dd4a80e6aeb76028cabe1f6
|
3 |
size 5432
|
checkpoint-25/adapter_config.json
CHANGED
@@ -25,12 +25,12 @@
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
-
"
|
29 |
-
"gate_proj",
|
30 |
"v_proj",
|
|
|
31 |
"o_proj",
|
32 |
-
"
|
33 |
-
"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
|
|
25 |
"revision": null,
|
26 |
"target_modules": [
|
27 |
"k_proj",
|
28 |
+
"up_proj",
|
|
|
29 |
"v_proj",
|
30 |
+
"gate_proj",
|
31 |
"o_proj",
|
32 |
+
"q_proj",
|
33 |
+
"down_proj"
|
34 |
],
|
35 |
"task_type": "CAUSAL_LM",
|
36 |
"trainable_token_indices": null,
|
checkpoint-25/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167832240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0db982df9cb0f4027019591b4dbed63879790c994db46e7854dc9c28a18915fd
|
3 |
size 167832240
|
checkpoint-25/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 85728532
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca872ede0234294689c0eb7b7851107caef12b43187810d77b7973fa2d241b01
|
3 |
size 85728532
|
checkpoint-25/trainer_state.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"best_global_step": 25,
|
3 |
-
"best_metric": 0.
|
4 |
"best_model_checkpoint": "./outputs/llama3.1-8b-lora-qlora-dart-llm/checkpoint-25",
|
5 |
"epoch": 4.175824175824176,
|
6 |
"eval_steps": 500,
|
@@ -11,77 +11,77 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 0.8791208791208791,
|
14 |
-
"grad_norm": 0.
|
15 |
"learning_rate": 0.00019594929736144976,
|
16 |
-
"loss": 0.
|
17 |
"step": 5
|
18 |
},
|
19 |
{
|
20 |
"epoch": 1.0,
|
21 |
-
"eval_loss": 0.
|
22 |
-
"eval_runtime":
|
23 |
-
"eval_samples_per_second":
|
24 |
-
"eval_steps_per_second":
|
25 |
"step": 6
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.7032967032967035,
|
29 |
-
"grad_norm":
|
30 |
"learning_rate": 0.00015406408174555976,
|
31 |
-
"loss": 0.
|
32 |
"step": 10
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
-
"eval_loss": 0.
|
37 |
-
"eval_runtime":
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second":
|
40 |
"step": 12
|
41 |
},
|
42 |
{
|
43 |
"epoch": 2.5274725274725274,
|
44 |
-
"grad_norm": 0.
|
45 |
"learning_rate": 8.57685161726715e-05,
|
46 |
-
"loss": 0.
|
47 |
"step": 15
|
48 |
},
|
49 |
{
|
50 |
"epoch": 3.0,
|
51 |
-
"eval_loss": 0.
|
52 |
-
"eval_runtime": 2.
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second":
|
55 |
"step": 18
|
56 |
},
|
57 |
{
|
58 |
"epoch": 3.3516483516483517,
|
59 |
-
"grad_norm": 0.
|
60 |
"learning_rate": 2.4425042564574184e-05,
|
61 |
-
"loss": 0.
|
62 |
"step": 20
|
63 |
},
|
64 |
{
|
65 |
"epoch": 4.0,
|
66 |
-
"eval_loss": 0.
|
67 |
-
"eval_runtime":
|
68 |
-
"eval_samples_per_second":
|
69 |
-
"eval_steps_per_second":
|
70 |
"step": 24
|
71 |
},
|
72 |
{
|
73 |
"epoch": 4.175824175824176,
|
74 |
-
"grad_norm": 0.
|
75 |
"learning_rate": 0.0,
|
76 |
-
"loss": 0.
|
77 |
"step": 25
|
78 |
},
|
79 |
{
|
80 |
"epoch": 4.175824175824176,
|
81 |
-
"eval_loss": 0.
|
82 |
-
"eval_runtime":
|
83 |
-
"eval_samples_per_second":
|
84 |
-
"eval_steps_per_second":
|
85 |
"step": 25
|
86 |
}
|
87 |
],
|
|
|
1 |
{
|
2 |
"best_global_step": 25,
|
3 |
+
"best_metric": 0.0275871679186821,
|
4 |
"best_model_checkpoint": "./outputs/llama3.1-8b-lora-qlora-dart-llm/checkpoint-25",
|
5 |
"epoch": 4.175824175824176,
|
6 |
"eval_steps": 500,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 0.8791208791208791,
|
14 |
+
"grad_norm": 0.5126284956932068,
|
15 |
"learning_rate": 0.00019594929736144976,
|
16 |
+
"loss": 0.6177,
|
17 |
"step": 5
|
18 |
},
|
19 |
{
|
20 |
"epoch": 1.0,
|
21 |
+
"eval_loss": 0.13462547957897186,
|
22 |
+
"eval_runtime": 1.9882,
|
23 |
+
"eval_samples_per_second": 5.533,
|
24 |
+
"eval_steps_per_second": 5.533,
|
25 |
"step": 6
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.7032967032967035,
|
29 |
+
"grad_norm": 4.223966598510742,
|
30 |
"learning_rate": 0.00015406408174555976,
|
31 |
+
"loss": 0.113,
|
32 |
"step": 10
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
+
"eval_loss": 0.041809309273958206,
|
37 |
+
"eval_runtime": 1.9899,
|
38 |
+
"eval_samples_per_second": 5.528,
|
39 |
+
"eval_steps_per_second": 5.528,
|
40 |
"step": 12
|
41 |
},
|
42 |
{
|
43 |
"epoch": 2.5274725274725274,
|
44 |
+
"grad_norm": 0.15816885232925415,
|
45 |
"learning_rate": 8.57685161726715e-05,
|
46 |
+
"loss": 0.0253,
|
47 |
"step": 15
|
48 |
},
|
49 |
{
|
50 |
"epoch": 3.0,
|
51 |
+
"eval_loss": 0.029711483046412468,
|
52 |
+
"eval_runtime": 2.0074,
|
53 |
+
"eval_samples_per_second": 5.48,
|
54 |
+
"eval_steps_per_second": 5.48,
|
55 |
"step": 18
|
56 |
},
|
57 |
{
|
58 |
"epoch": 3.3516483516483517,
|
59 |
+
"grad_norm": 0.10277726501226425,
|
60 |
"learning_rate": 2.4425042564574184e-05,
|
61 |
+
"loss": 0.0245,
|
62 |
"step": 20
|
63 |
},
|
64 |
{
|
65 |
"epoch": 4.0,
|
66 |
+
"eval_loss": 0.02759450115263462,
|
67 |
+
"eval_runtime": 1.9936,
|
68 |
+
"eval_samples_per_second": 5.518,
|
69 |
+
"eval_steps_per_second": 5.518,
|
70 |
"step": 24
|
71 |
},
|
72 |
{
|
73 |
"epoch": 4.175824175824176,
|
74 |
+
"grad_norm": 0.10169998556375504,
|
75 |
"learning_rate": 0.0,
|
76 |
+
"loss": 0.0178,
|
77 |
"step": 25
|
78 |
},
|
79 |
{
|
80 |
"epoch": 4.175824175824176,
|
81 |
+
"eval_loss": 0.0275871679186821,
|
82 |
+
"eval_runtime": 1.9861,
|
83 |
+
"eval_samples_per_second": 5.539,
|
84 |
+
"eval_steps_per_second": 5.539,
|
85 |
"step": 25
|
86 |
}
|
87 |
],
|
checkpoint-25/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c7263e7d5295b74152f3cc3f85217bffc73db5e4dd4a80e6aeb76028cabe1f6
|
3 |
size 5432
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c7263e7d5295b74152f3cc3f85217bffc73db5e4dd4a80e6aeb76028cabe1f6
|
3 |
size 5432
|