{ "architectures": [ "GPTLanguageModel" ], "vocab_size": 32000, "n_embd": 384, "n_head": 6, "n_layer": 6, "block_size": 256, "dropout": 0.2, "tokenizer_class": "PreTrainedTokenizerFast", "tokenizer_file": "spm_model.model", "_name_or_path": "Duino/Darija-LM", "model_type": "gpt2" }