File size: 1,735 Bytes
bac9459 43c6c4d 15c2d0a 43c6c4d bac9459 43c6c4d bac9459 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
{
"type": "vqbet",
"n_obs_steps": 5,
"device": "cuda",
"use_amp": false,
"normalization_mapping": {
"VISUAL": "IDENTITY",
"STATE": "MIN_MAX",
"ACTION": "MIN_MAX"
},
"input_features": {
"observation.image": {
"type": "VISUAL",
"shape": [
3,
96,
96
]
},
"observation.state": {
"type": "STATE",
"shape": [
2
]
}
},
"output_features": {
"action": {
"type": "ACTION",
"shape": [
2
]
}
},
"n_action_pred_token": 3,
"action_chunk_size": 5,
"vision_backbone": "resnet18",
"crop_shape": [
84,
84
],
"crop_is_random": true,
"pretrained_backbone_weights": null,
"use_group_norm": true,
"spatial_softmax_num_keypoints": 32,
"n_vqvae_training_steps": 20000,
"vqvae_n_embed": 16,
"vqvae_embedding_dim": 256,
"vqvae_enc_hidden_dim": 128,
"gpt_block_size": 500,
"gpt_input_dim": 512,
"gpt_output_dim": 512,
"gpt_n_layer": 8,
"gpt_n_head": 8,
"gpt_hidden_dim": 512,
"dropout": 0.1,
"mlp_hidden_dim": 1024,
"offset_loss_weight": 10000.0,
"primary_code_loss_weight": 5.0,
"secondary_code_loss_weight": 0.5,
"bet_softmax_temperature": 0.1,
"sequentially_select": false,
"optimizer_lr": 0.0001,
"optimizer_betas": [
0.95,
0.999
],
"optimizer_eps": 1e-08,
"optimizer_weight_decay": 1e-06,
"optimizer_vqvae_lr": 0.001,
"optimizer_vqvae_weight_decay": 0.0001,
"scheduler_warmup_steps": 500
} |