sejaljn commited on
Commit
ab0da83
·
verified ·
1 Parent(s): 61b6e79

Model save

Browse files
README.md CHANGED
@@ -17,8 +17,6 @@ should probably proofread and complete it, then remove this comment. -->
17
  # Mistral-7B-3
18
 
19
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) on the None dataset.
20
- It achieves the following results on the evaluation set:
21
- - Loss: 0.0596
22
 
23
  ## Model description
24
 
@@ -51,9 +49,6 @@ The following hyperparameters were used during training:
51
 
52
  ### Training results
53
 
54
- | Training Loss | Epoch | Step | Validation Loss |
55
- |:-------------:|:-----:|:----:|:---------------:|
56
- | 0.5257 | 1.48 | 10 | 0.0596 |
57
 
58
 
59
  ### Framework versions
 
17
  # Mistral-7B-3
18
 
19
  This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) on the None dataset.
 
 
20
 
21
  ## Model description
22
 
 
49
 
50
  ### Training results
51
 
 
 
 
52
 
53
 
54
  ### Framework versions
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "gate_proj",
23
- "o_proj",
24
  "v_proj",
25
  "down_proj",
26
  "q_proj",
 
 
27
  "up_proj",
28
- "k_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
 
22
  "v_proj",
23
  "down_proj",
24
  "q_proj",
25
+ "k_proj",
26
+ "gate_proj",
27
  "up_proj",
28
+ "o_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b451a42491549fdf165ed94ed3795bd0b119cd7632dbcb4903480e06417a8986
3
  size 2684416208
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b649b9a60539ef3b29e4f5638657d2240535e1de7229025f7bd39d5eaada153
3
  size 2684416208
runs/Jan29_12-11-31_ip-172-31-10-206.us-east-2.compute.internal/events.out.tfevents.1738152754.ip-172-31-10-206.us-east-2.compute.internal.94585.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b648f6d9c4229dfa0016efcc463bff714c574b70648be9c5bca3b7fe976c14
3
+ size 4184
runs/Jan29_12-13-44_ip-172-31-10-206.us-east-2.compute.internal/events.out.tfevents.1738152832.ip-172-31-10-206.us-east-2.compute.internal.94585.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:791854437cd957a15a4dba8d4da74f17e939be94c1325d4c4bb5fc18fcff1fd3
3
+ size 4184
runs/Jan29_12-15-10_ip-172-31-10-206.us-east-2.compute.internal/events.out.tfevents.1738152911.ip-172-31-10-206.us-east-2.compute.internal.98020.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9506d392e7fae723d7a5c2c794f99f41c4bbe29e4a4c8dd5746835cac51b0f8c
3
+ size 5386
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:317d83bbbafd845ee730787d479fdbb37fa6cb48424889fe64902c4d56ffa35b
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d80a6acbc156e21739b01ad7eae4d3c9c2ac36c1f837de8711787270243c2b0
3
  size 4920