End of training
Browse files
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
base_model: HuggingFaceTB/SmolLM2-
|
3 |
library_name: transformers
|
4 |
model_name: SmolLM2-FT-MyDataset
|
5 |
tags:
|
@@ -13,7 +13,7 @@ licence: license
|
|
13 |
|
14 |
# Model Card for SmolLM2-FT-MyDataset
|
15 |
|
16 |
-
This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-
|
17 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
18 |
|
19 |
## Quick start
|
@@ -29,7 +29,7 @@ print(output["generated_text"])
|
|
29 |
|
30 |
## Training procedure
|
31 |
|
32 |
-
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/goutham_city/huggingface/runs/
|
33 |
|
34 |
|
35 |
This model was trained with SFT.
|
|
|
1 |
---
|
2 |
+
base_model: HuggingFaceTB/SmolLM2-360M
|
3 |
library_name: transformers
|
4 |
model_name: SmolLM2-FT-MyDataset
|
5 |
tags:
|
|
|
13 |
|
14 |
# Model Card for SmolLM2-FT-MyDataset
|
15 |
|
16 |
+
This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-360M](https://huggingface.co/HuggingFaceTB/SmolLM2-360M).
|
17 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
18 |
|
19 |
## Quick start
|
|
|
29 |
|
30 |
## Training procedure
|
31 |
|
32 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/goutham_city/huggingface/runs/l4yyyqt7)
|
33 |
|
34 |
|
35 |
This model was trained with SFT.
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "HuggingFaceTB/SmolLM2-
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
@@ -9,16 +9,16 @@
|
|
9 |
"eos_token_id": 2,
|
10 |
"head_dim": 64,
|
11 |
"hidden_act": "silu",
|
12 |
-
"hidden_size":
|
13 |
-
"initializer_range": 0.
|
14 |
-
"intermediate_size":
|
15 |
"is_llama_config": true,
|
16 |
"max_position_embeddings": 8192,
|
17 |
"mlp_bias": false,
|
18 |
"model_type": "llama",
|
19 |
-
"num_attention_heads":
|
20 |
-
"num_hidden_layers":
|
21 |
-
"num_key_value_heads":
|
22 |
"pad_token_id": 2,
|
23 |
"pretraining_tp": 1,
|
24 |
"rms_norm_eps": 1e-05,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "HuggingFaceTB/SmolLM2-360M",
|
3 |
"architectures": [
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
|
|
9 |
"eos_token_id": 2,
|
10 |
"head_dim": 64,
|
11 |
"hidden_act": "silu",
|
12 |
+
"hidden_size": 960,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 2560,
|
15 |
"is_llama_config": true,
|
16 |
"max_position_embeddings": 8192,
|
17 |
"mlp_bias": false,
|
18 |
"model_type": "llama",
|
19 |
+
"num_attention_heads": 15,
|
20 |
+
"num_hidden_layers": 32,
|
21 |
+
"num_key_value_heads": 5,
|
22 |
"pad_token_id": 2,
|
23 |
"pretraining_tp": 1,
|
24 |
"rms_norm_eps": 1e-05,
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1938eb01384ea35f9cb817ac4e110d3f4d8dd22ff964c74ca3b41012a53878d0
|
3 |
+
size 1447317080
|
runs/Jan01_13-54-51_315e11a7424e/events.out.tfevents.1735739697.315e11a7424e.1369.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05a5c04f99c296ccb3c8680944443bce8495380d1f4bb5ed2a7516cfd4f99347
|
3 |
+
size 5502
|
runs/Jan01_14-02-01_315e11a7424e/events.out.tfevents.1735740126.315e11a7424e.3894.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0ba47dbb03e7f67a84bb7b166ee6e017693b6b24f5a49acc5c4b2e16eeeb475
|
3 |
+
size 32372
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5624
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09b5e7574e177bdf55476c5db3762481fac76e879a5e741546f98f791ace1bca
|
3 |
size 5624
|