{ "architectures": [ "CustomGPTPreTrainedModel" ], "block_size": 1024, "model_type": "custom_gpt", "n_embd": 768, "n_head": 6, "n_layer": 12, "tokenizer_class": "GPT2Tokenizer", "torch_dtype": "float32", "transformers_version": "4.48.1", "vocab_size": 50304 }