{"model_type": "seq2seq", "num_encoder_layers": 6, "num_decoder_layers": 6, "d_model": 512, "d_ff": 2048, "num_heads": 8, "dropout": 0.1, "src_vocab_size": 10000, "tgt_vocab_size": 10000}