|
[INFO|2025-01-23 17:01:29] configuration_utils.py:679 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json |
|
|
|
[INFO|2025-01-23 17:01:29] configuration_utils.py:746 >> Model config Qwen2Config { |
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct", |
|
"architectures": [ |
|
"Qwen2ForCausalLM" |
|
], |
|
"attention_dropout": 0.0, |
|
"bos_token_id": 151643, |
|
"eos_token_id": 151645, |
|
"hidden_act": "silu", |
|
"hidden_size": 896, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 4864, |
|
"max_position_embeddings": 32768, |
|
"max_window_layers": 21, |
|
"model_type": "qwen2", |
|
"num_attention_heads": 14, |
|
"num_hidden_layers": 24, |
|
"num_key_value_heads": 2, |
|
"rms_norm_eps": 1e-06, |
|
"rope_scaling": null, |
|
"rope_theta": 1000000.0, |
|
"sliding_window": null, |
|
"tie_word_embeddings": true, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.46.1", |
|
"use_cache": true, |
|
"use_sliding_window": false, |
|
"vocab_size": 151936 |
|
} |
|
|
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json |
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt |
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer.json |
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at None |
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at None |
|
|
|
[INFO|2025-01-23 17:01:30] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:01:31] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. |
|
|
|
[INFO|2025-01-23 17:01:31] configuration_utils.py:679 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json |
|
|
|
[INFO|2025-01-23 17:01:31] configuration_utils.py:746 >> Model config Qwen2Config { |
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct", |
|
"architectures": [ |
|
"Qwen2ForCausalLM" |
|
], |
|
"attention_dropout": 0.0, |
|
"bos_token_id": 151643, |
|
"eos_token_id": 151645, |
|
"hidden_act": "silu", |
|
"hidden_size": 896, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 4864, |
|
"max_position_embeddings": 32768, |
|
"max_window_layers": 21, |
|
"model_type": "qwen2", |
|
"num_attention_heads": 14, |
|
"num_hidden_layers": 24, |
|
"num_key_value_heads": 2, |
|
"rms_norm_eps": 1e-06, |
|
"rope_scaling": null, |
|
"rope_theta": 1000000.0, |
|
"sliding_window": null, |
|
"tie_word_embeddings": true, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.46.1", |
|
"use_cache": true, |
|
"use_sliding_window": false, |
|
"vocab_size": 151936 |
|
} |
|
|
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file vocab.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file merges.txt from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer.json |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at None |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at None |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:01:32] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. |
|
|
|
[INFO|2025-01-23 17:01:32] logging.py:157 >> Add <|im_end|> to stop words. |
|
|
|
[INFO|2025-01-23 17:01:32] logging.py:157 >> Loading dataset alpaca_zh_xiaosui.json... |
|
|
|
[INFO|2025-01-23 17:01:33] logging.py:157 >> Loading dataset identity.json... |
|
|
|
[INFO|2025-01-23 17:01:42] configuration_utils.py:679 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json |
|
|
|
[INFO|2025-01-23 17:01:42] configuration_utils.py:746 >> Model config Qwen2Config { |
|
"_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct", |
|
"architectures": [ |
|
"Qwen2ForCausalLM" |
|
], |
|
"attention_dropout": 0.0, |
|
"bos_token_id": 151643, |
|
"eos_token_id": 151645, |
|
"hidden_act": "silu", |
|
"hidden_size": 896, |
|
"initializer_range": 0.02, |
|
"intermediate_size": 4864, |
|
"max_position_embeddings": 32768, |
|
"max_window_layers": 21, |
|
"model_type": "qwen2", |
|
"num_attention_heads": 14, |
|
"num_hidden_layers": 24, |
|
"num_key_value_heads": 2, |
|
"rms_norm_eps": 1e-06, |
|
"rope_scaling": null, |
|
"rope_theta": 1000000.0, |
|
"sliding_window": null, |
|
"tie_word_embeddings": true, |
|
"torch_dtype": "bfloat16", |
|
"transformers_version": "4.46.1", |
|
"use_cache": true, |
|
"use_sliding_window": false, |
|
"vocab_size": 151936 |
|
} |
|
|
|
|
|
[INFO|2025-01-23 17:02:06] modeling_utils.py:3937 >> loading weights file model.safetensors from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/model.safetensors |
|
|
|
[INFO|2025-01-23 17:02:06] modeling_utils.py:1670 >> Instantiating Qwen2ForCausalLM model under default dtype torch.bfloat16. |
|
|
|
[INFO|2025-01-23 17:02:06] configuration_utils.py:1096 >> Generate config GenerationConfig { |
|
"bos_token_id": 151643, |
|
"eos_token_id": 151645 |
|
} |
|
|
|
|
|
[INFO|2025-01-23 17:02:07] modeling_utils.py:4800 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM. |
|
|
|
|
|
[INFO|2025-01-23 17:02:07] modeling_utils.py:4808 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/Qwen2.5-0.5B-Instruct. |
|
If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training. |
|
|
|
[INFO|2025-01-23 17:02:07] configuration_utils.py:1051 >> loading configuration file generation_config.json from cache at /root/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/generation_config.json |
|
|
|
[INFO|2025-01-23 17:02:07] configuration_utils.py:1096 >> Generate config GenerationConfig { |
|
"bos_token_id": 151643, |
|
"do_sample": true, |
|
"eos_token_id": [ |
|
151645, |
|
151643 |
|
], |
|
"pad_token_id": 151643, |
|
"repetition_penalty": 1.1, |
|
"temperature": 0.7, |
|
"top_k": 20, |
|
"top_p": 0.8 |
|
} |
|
|
|
|
|
[INFO|2025-01-23 17:02:07] logging.py:157 >> Gradient checkpointing enabled. |
|
|
|
[INFO|2025-01-23 17:02:07] logging.py:157 >> Using torch SDPA for faster training and inference. |
|
|
|
[INFO|2025-01-23 17:02:07] logging.py:157 >> Upcasting trainable params to float32. |
|
|
|
[INFO|2025-01-23 17:02:07] logging.py:157 >> Fine-tuning method: Full |
|
|
|
[INFO|2025-01-23 17:02:07] logging.py:157 >> trainable params: 494,032,768 || all params: 494,032,768 || trainable%: 100.0000 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:698 >> Using auto half precision backend |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2313 >> ***** Running training ***** |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2314 >> Num examples = 3,199 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2315 >> Num Epochs = 3 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2316 >> Instantaneous batch size per device = 2 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2319 >> Total train batch size (w. parallel, distributed & accumulation) = 32 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2320 >> Gradient Accumulation steps = 8 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2321 >> Total optimization steps = 300 |
|
|
|
[INFO|2025-01-23 17:02:07] trainer.py:2322 >> Number of trainable parameters = 494,032,768 |
|
|
|
[INFO|2025-01-23 17:02:08] integration_utils.py:812 >> Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true" |
|
|
|
[INFO|2025-01-23 17:02:31] logging.py:157 >> {'loss': 2.4671, 'learning_rate': 4.9999e-05, 'epoch': 0.05} |
|
|
|
[INFO|2025-01-23 17:02:53] logging.py:157 >> {'loss': 2.4246, 'learning_rate': 4.9949e-05, 'epoch': 0.10} |
|
|
|
[INFO|2025-01-23 17:03:15] logging.py:157 >> {'loss': 2.5563, 'learning_rate': 4.9830e-05, 'epoch': 0.15} |
|
|
|
[INFO|2025-01-23 17:03:38] logging.py:157 >> {'loss': 2.5333, 'learning_rate': 4.9640e-05, 'epoch': 0.20} |
|
|
|
[INFO|2025-01-23 17:04:00] logging.py:157 >> {'loss': 2.5209, 'learning_rate': 4.9382e-05, 'epoch': 0.25} |
|
|
|
[INFO|2025-01-23 17:04:22] logging.py:157 >> {'loss': 2.8026, 'learning_rate': 4.9054e-05, 'epoch': 0.30} |
|
|
|
[INFO|2025-01-23 17:04:44] logging.py:157 >> {'loss': 2.8341, 'learning_rate': 4.8659e-05, 'epoch': 0.35} |
|
|
|
[INFO|2025-01-23 17:05:06] logging.py:157 >> {'loss': 2.7475, 'learning_rate': 4.8197e-05, 'epoch': 0.40} |
|
|
|
[INFO|2025-01-23 17:05:29] logging.py:157 >> {'loss': 2.7298, 'learning_rate': 4.7670e-05, 'epoch': 0.45} |
|
|
|
[INFO|2025-01-23 17:05:51] logging.py:157 >> {'loss': 2.6166, 'learning_rate': 4.7079e-05, 'epoch': 0.50} |
|
|
|
[INFO|2025-01-23 17:06:12] logging.py:157 >> {'loss': 2.5359, 'learning_rate': 4.6426e-05, 'epoch': 0.55} |
|
|
|
[INFO|2025-01-23 17:06:35] logging.py:157 >> {'loss': 2.4790, 'learning_rate': 4.5713e-05, 'epoch': 0.60} |
|
|
|
[INFO|2025-01-23 17:06:56] logging.py:157 >> {'loss': 2.4882, 'learning_rate': 4.4941e-05, 'epoch': 0.65} |
|
|
|
[INFO|2025-01-23 17:07:18] logging.py:157 >> {'loss': 2.3675, 'learning_rate': 4.4113e-05, 'epoch': 0.70} |
|
|
|
[INFO|2025-01-23 17:07:41] logging.py:157 >> {'loss': 2.4285, 'learning_rate': 4.3231e-05, 'epoch': 0.75} |
|
|
|
[INFO|2025-01-23 17:08:03] logging.py:157 >> {'loss': 2.5075, 'learning_rate': 4.2298e-05, 'epoch': 0.80} |
|
|
|
[INFO|2025-01-23 17:08:25] logging.py:157 >> {'loss': 2.4794, 'learning_rate': 4.1317e-05, 'epoch': 0.85} |
|
|
|
[INFO|2025-01-23 17:08:47] logging.py:157 >> {'loss': 2.5271, 'learning_rate': 4.0289e-05, 'epoch': 0.90} |
|
|
|
[INFO|2025-01-23 17:09:10] logging.py:157 >> {'loss': 2.4032, 'learning_rate': 3.9218e-05, 'epoch': 0.95} |
|
|
|
[INFO|2025-01-23 17:09:34] logging.py:157 >> {'loss': 2.4891, 'learning_rate': 3.8108e-05, 'epoch': 1.00} |
|
|
|
[INFO|2025-01-23 17:09:34] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100 |
|
|
|
[INFO|2025-01-23 17:09:34] configuration_utils.py:414 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100/config.json |
|
|
|
[INFO|2025-01-23 17:09:34] configuration_utils.py:865 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100/generation_config.json |
|
|
|
[INFO|2025-01-23 17:09:39] modeling_utils.py:3035 >> Model weights saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100/model.safetensors |
|
|
|
[INFO|2025-01-23 17:09:39] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:09:39] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-100/special_tokens_map.json |
|
|
|
[INFO|2025-01-23 17:10:09] logging.py:157 >> {'loss': 1.4120, 'learning_rate': 3.6960e-05, 'epoch': 1.05} |
|
|
|
[INFO|2025-01-23 17:10:33] logging.py:157 >> {'loss': 1.3875, 'learning_rate': 3.5779e-05, 'epoch': 1.10} |
|
|
|
[INFO|2025-01-23 17:10:54] logging.py:157 >> {'loss': 1.3676, 'learning_rate': 3.4567e-05, 'epoch': 1.15} |
|
|
|
[INFO|2025-01-23 17:11:16] logging.py:157 >> {'loss': 1.3438, 'learning_rate': 3.3328e-05, 'epoch': 1.20} |
|
|
|
[INFO|2025-01-23 17:11:39] logging.py:157 >> {'loss': 1.3895, 'learning_rate': 3.2066e-05, 'epoch': 1.25} |
|
|
|
[INFO|2025-01-23 17:12:01] logging.py:157 >> {'loss': 1.3399, 'learning_rate': 3.0785e-05, 'epoch': 1.30} |
|
|
|
[INFO|2025-01-23 17:12:23] logging.py:157 >> {'loss': 1.3101, 'learning_rate': 2.9486e-05, 'epoch': 1.35} |
|
|
|
[INFO|2025-01-23 17:12:45] logging.py:157 >> {'loss': 1.4303, 'learning_rate': 2.8175e-05, 'epoch': 1.40} |
|
|
|
[INFO|2025-01-23 17:13:07] logging.py:157 >> {'loss': 1.3833, 'learning_rate': 2.6856e-05, 'epoch': 1.45} |
|
|
|
[INFO|2025-01-23 17:13:30] logging.py:157 >> {'loss': 1.3684, 'learning_rate': 2.5531e-05, 'epoch': 1.50} |
|
|
|
[INFO|2025-01-23 17:13:53] logging.py:157 >> {'loss': 1.4123, 'learning_rate': 2.4204e-05, 'epoch': 1.55} |
|
|
|
[INFO|2025-01-23 17:14:15] logging.py:157 >> {'loss': 1.3913, 'learning_rate': 2.2880e-05, 'epoch': 1.60} |
|
|
|
[INFO|2025-01-23 17:14:36] logging.py:157 >> {'loss': 1.4338, 'learning_rate': 2.1562e-05, 'epoch': 1.65} |
|
|
|
[INFO|2025-01-23 17:14:58] logging.py:157 >> {'loss': 1.3506, 'learning_rate': 2.0253e-05, 'epoch': 1.70} |
|
|
|
[INFO|2025-01-23 17:15:22] logging.py:157 >> {'loss': 1.4525, 'learning_rate': 1.8958e-05, 'epoch': 1.75} |
|
|
|
[INFO|2025-01-23 17:15:45] logging.py:157 >> {'loss': 1.3534, 'learning_rate': 1.7679e-05, 'epoch': 1.80} |
|
|
|
[INFO|2025-01-23 17:16:07] logging.py:157 >> {'loss': 1.3220, 'learning_rate': 1.6422e-05, 'epoch': 1.85} |
|
|
|
[INFO|2025-01-23 17:16:29] logging.py:157 >> {'loss': 1.3326, 'learning_rate': 1.5188e-05, 'epoch': 1.90} |
|
|
|
[INFO|2025-01-23 17:16:53] logging.py:157 >> {'loss': 1.3804, 'learning_rate': 1.3982e-05, 'epoch': 1.95} |
|
|
|
[INFO|2025-01-23 17:17:15] logging.py:157 >> {'loss': 1.3938, 'learning_rate': 1.2808e-05, 'epoch': 2.00} |
|
|
|
[INFO|2025-01-23 17:17:15] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200 |
|
|
|
[INFO|2025-01-23 17:17:15] configuration_utils.py:414 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200/config.json |
|
|
|
[INFO|2025-01-23 17:17:15] configuration_utils.py:865 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200/generation_config.json |
|
|
|
[INFO|2025-01-23 17:17:20] modeling_utils.py:3035 >> Model weights saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200/model.safetensors |
|
|
|
[INFO|2025-01-23 17:17:20] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:17:20] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-200/special_tokens_map.json |
|
|
|
[INFO|2025-01-23 17:17:52] logging.py:157 >> {'loss': 0.7320, 'learning_rate': 1.1667e-05, 'epoch': 2.05} |
|
|
|
[INFO|2025-01-23 17:18:13] logging.py:157 >> {'loss': 0.6639, 'learning_rate': 1.0564e-05, 'epoch': 2.10} |
|
|
|
[INFO|2025-01-23 17:18:35] logging.py:157 >> {'loss': 0.5978, 'learning_rate': 9.5018e-06, 'epoch': 2.15} |
|
|
|
[INFO|2025-01-23 17:18:56] logging.py:157 >> {'loss': 0.6269, 'learning_rate': 8.4831e-06, 'epoch': 2.20} |
|
|
|
[INFO|2025-01-23 17:19:19] logging.py:157 >> {'loss': 0.6189, 'learning_rate': 7.5109e-06, 'epoch': 2.25} |
|
|
|
[INFO|2025-01-23 17:19:42] logging.py:157 >> {'loss': 0.6096, 'learning_rate': 6.5880e-06, 'epoch': 2.30} |
|
|
|
[INFO|2025-01-23 17:20:04] logging.py:157 >> {'loss': 0.5799, 'learning_rate': 5.7169e-06, 'epoch': 2.35} |
|
|
|
[INFO|2025-01-23 17:20:28] logging.py:157 >> {'loss': 0.6156, 'learning_rate': 4.9001e-06, 'epoch': 2.40} |
|
|
|
[INFO|2025-01-23 17:20:50] logging.py:157 >> {'loss': 0.6387, 'learning_rate': 4.1398e-06, 'epoch': 2.45} |
|
|
|
[INFO|2025-01-23 17:21:12] logging.py:157 >> {'loss': 0.6032, 'learning_rate': 3.4384e-06, 'epoch': 2.50} |
|
|
|
[INFO|2025-01-23 17:21:34] logging.py:157 >> {'loss': 0.5976, 'learning_rate': 2.7976e-06, 'epoch': 2.55} |
|
|
|
[INFO|2025-01-23 17:21:56] logging.py:157 >> {'loss': 0.5687, 'learning_rate': 2.2193e-06, 'epoch': 2.60} |
|
|
|
[INFO|2025-01-23 17:22:19] logging.py:157 >> {'loss': 0.5843, 'learning_rate': 1.7052e-06, 'epoch': 2.65} |
|
|
|
[INFO|2025-01-23 17:22:41] logging.py:157 >> {'loss': 0.5979, 'learning_rate': 1.2566e-06, 'epoch': 2.70} |
|
|
|
[INFO|2025-01-23 17:23:04] logging.py:157 >> {'loss': 0.5601, 'learning_rate': 8.7490e-07, 'epoch': 2.75} |
|
|
|
[INFO|2025-01-23 17:23:26] logging.py:157 >> {'loss': 0.6004, 'learning_rate': 5.6112e-07, 'epoch': 2.80} |
|
|
|
[INFO|2025-01-23 17:23:49] logging.py:157 >> {'loss': 0.6026, 'learning_rate': 3.1615e-07, 'epoch': 2.85} |
|
|
|
[INFO|2025-01-23 17:24:11] logging.py:157 >> {'loss': 0.6162, 'learning_rate': 1.4068e-07, 'epoch': 2.90} |
|
|
|
[INFO|2025-01-23 17:24:33] logging.py:157 >> {'loss': 0.6627, 'learning_rate': 3.5194e-08, 'epoch': 2.95} |
|
|
|
[INFO|2025-01-23 17:24:55] logging.py:157 >> {'loss': 0.5781, 'learning_rate': 0.0000e+00, 'epoch': 3.00} |
|
|
|
[INFO|2025-01-23 17:24:55] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300 |
|
|
|
[INFO|2025-01-23 17:24:55] configuration_utils.py:414 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300/config.json |
|
|
|
[INFO|2025-01-23 17:24:55] configuration_utils.py:865 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300/generation_config.json |
|
|
|
[INFO|2025-01-23 17:25:00] modeling_utils.py:3035 >> Model weights saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300/model.safetensors |
|
|
|
[INFO|2025-01-23 17:25:00] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:25:00] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/checkpoint-300/special_tokens_map.json |
|
|
|
[INFO|2025-01-23 17:25:08] trainer.py:2584 >> |
|
|
|
Training completed. Do not forget to share your model on huggingface.co/models =) |
|
|
|
|
|
|
|
[INFO|2025-01-23 17:25:08] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22 |
|
|
|
[INFO|2025-01-23 17:25:08] configuration_utils.py:414 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/config.json |
|
|
|
[INFO|2025-01-23 17:25:08] configuration_utils.py:865 >> Configuration saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/generation_config.json |
|
|
|
[INFO|2025-01-23 17:25:14] modeling_utils.py:3035 >> Model weights saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/model.safetensors |
|
|
|
[INFO|2025-01-23 17:25:14] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/tokenizer_config.json |
|
|
|
[INFO|2025-01-23 17:25:14] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/full/train_2025-01-23-16-59-22/special_tokens_map.json |
|
|
|
[WARNING|2025-01-23 17:25:15] logging.py:162 >> No metric eval_loss to plot. |
|
|
|
[WARNING|2025-01-23 17:25:15] logging.py:162 >> No metric eval_accuracy to plot. |
|
|
|
[INFO|2025-01-23 17:25:15] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields: |
|
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}} |
|
|
|
|