vinucmer-small / config.json
LKarlo's picture
training roberta structure with 4808259 samples, 2406 test samples, 500 vocab size, 3 hidden layers, 256 hidden size, 4 attention heads, 0.15 mlm probability, 10 num process, 512 max length, 0.0005 train test split, 50 min sub seq length, 2000 max sub seq length, 42 seed
ddd21f6
raw
history blame contribute delete
633 Bytes
{
"architectures": [
"RobertaForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"classifier_dropout": null,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 256,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "roberta",
"num_attention_heads": 4,
"num_hidden_layers": 3,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"torch_dtype": "float32",
"transformers_version": "4.35.1",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 500
}