narekvslife commited on
Commit
beed70f
·
verified ·
1 Parent(s): f5265ca

dpo_f6rfgz12

Browse files
README.md CHANGED
@@ -17,15 +17,15 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model was trained from scratch on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.7021
21
- - Rewards/chosen: 0.5480
22
- - Rewards/rejected: 0.4854
23
- - Rewards/accuracies: 0.1242
24
- - Rewards/margins: 0.0626
25
- - Logps/rejected: -26.1813
26
- - Logps/chosen: -28.6110
27
- - Logits/rejected: -1.0475
28
- - Logits/chosen: -1.0554
29
 
30
  ## Model description
31
 
@@ -48,10 +48,12 @@ The following hyperparameters were used during training:
48
  - train_batch_size: 1
49
  - eval_batch_size: 1
50
  - seed: 0
 
 
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
53
  - lr_scheduler_warmup_steps: 100
54
- - training_steps: 2500
55
 
56
  ### Training results
57
 
 
17
 
18
  This model was trained from scratch on the None dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.7471
21
+ - Rewards/chosen: 1.4869
22
+ - Rewards/rejected: 1.1641
23
+ - Rewards/accuracies: 0.4383
24
+ - Rewards/margins: 0.3229
25
+ - Logps/rejected: -140.3850
26
+ - Logps/chosen: -153.8973
27
+ - Logits/rejected: -0.6448
28
+ - Logits/chosen: -0.6851
29
 
30
  ## Model description
31
 
 
48
  - train_batch_size: 1
49
  - eval_batch_size: 1
50
  - seed: 0
51
+ - gradient_accumulation_steps: 2
52
+ - total_train_batch_size: 2
53
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
54
  - lr_scheduler_type: cosine
55
  - lr_scheduler_warmup_steps: 100
56
+ - training_steps: 2000
57
 
58
  ### Training results
59
 
adapter_config.json CHANGED
@@ -16,17 +16,17 @@
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 64,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "wte",
25
- "q_proj",
26
  "fc_out",
 
 
27
  "out_proj",
 
28
  "v_proj",
29
- "fc_in"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "fc_out",
24
+ "k_proj",
25
+ "fc_in",
26
  "out_proj",
27
+ "q_proj",
28
  "v_proj",
29
+ "wte"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37c713439897aefab1d63b1f1938ec6a3ff714264132976bebe1d9780fd5dfe3
3
- size 75523504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3336b30496cedb477a68def19166b41054123f3cbd0f48e23a18bc33dafcf79
3
+ size 37774720
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b116eedb32c45e3728095b6935fbdfd4c2aa57a3764d638aa33e5a1d3237a04
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e8d935741839e6606b91e10fcc75f5571638a97b5871cec541b7b522e6905d6
3
  size 5560