carpit680 commited on
Commit
1d99133
·
verified ·
1 Parent(s): ad48cfa

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. config.json +54 -0
  2. model.safetensors +3 -0
  3. train_config.json +172 -0
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 6
14
+ ]
15
+ },
16
+ "observation.images.webcam": {
17
+ "type": "VISUAL",
18
+ "shape": [
19
+ 3,
20
+ 480,
21
+ 640
22
+ ]
23
+ }
24
+ },
25
+ "output_features": {
26
+ "action": {
27
+ "type": "ACTION",
28
+ "shape": [
29
+ 6
30
+ ]
31
+ }
32
+ },
33
+ "chunk_size": 100,
34
+ "n_action_steps": 100,
35
+ "vision_backbone": "resnet18",
36
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
37
+ "replace_final_stride_with_dilation": false,
38
+ "pre_norm": false,
39
+ "dim_model": 512,
40
+ "n_heads": 8,
41
+ "dim_feedforward": 3200,
42
+ "feedforward_activation": "relu",
43
+ "n_encoder_layers": 4,
44
+ "n_decoder_layers": 1,
45
+ "use_vae": true,
46
+ "latent_dim": 32,
47
+ "n_vae_encoder_layers": 4,
48
+ "temporal_ensemble_coeff": null,
49
+ "dropout": 0.1,
50
+ "kl_weight": 10.0,
51
+ "optimizer_lr": 1e-05,
52
+ "optimizer_weight_decay": 0.0001,
53
+ "optimizer_lr_backbone": 1e-05
54
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c709fe5133a1608ca9369267f44f907708a190844424d298ac7c2ff98d322b
3
+ size 206700800
train_config.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "carpit680/giraffe_sock_demo_2",
4
+ "episodes": null,
5
+ "image_transforms": {
6
+ "enable": false,
7
+ "max_num_transforms": 3,
8
+ "random_order": false,
9
+ "tfs": {
10
+ "brightness": {
11
+ "weight": 1.0,
12
+ "type": "ColorJitter",
13
+ "kwargs": {
14
+ "brightness": [
15
+ 0.8,
16
+ 1.2
17
+ ]
18
+ }
19
+ },
20
+ "contrast": {
21
+ "weight": 1.0,
22
+ "type": "ColorJitter",
23
+ "kwargs": {
24
+ "contrast": [
25
+ 0.8,
26
+ 1.2
27
+ ]
28
+ }
29
+ },
30
+ "saturation": {
31
+ "weight": 1.0,
32
+ "type": "ColorJitter",
33
+ "kwargs": {
34
+ "saturation": [
35
+ 0.5,
36
+ 1.5
37
+ ]
38
+ }
39
+ },
40
+ "hue": {
41
+ "weight": 1.0,
42
+ "type": "ColorJitter",
43
+ "kwargs": {
44
+ "hue": [
45
+ -0.05,
46
+ 0.05
47
+ ]
48
+ }
49
+ },
50
+ "sharpness": {
51
+ "weight": 1.0,
52
+ "type": "SharpnessJitter",
53
+ "kwargs": {
54
+ "sharpness": [
55
+ 0.5,
56
+ 1.5
57
+ ]
58
+ }
59
+ }
60
+ }
61
+ },
62
+ "local_files_only": false,
63
+ "use_imagenet_stats": true,
64
+ "video_backend": "pyav"
65
+ },
66
+ "env": null,
67
+ "policy": {
68
+ "type": "act",
69
+ "n_obs_steps": 1,
70
+ "normalization_mapping": {
71
+ "VISUAL": "MEAN_STD",
72
+ "STATE": "MEAN_STD",
73
+ "ACTION": "MEAN_STD"
74
+ },
75
+ "input_features": {
76
+ "observation.state": {
77
+ "type": "STATE",
78
+ "shape": [
79
+ 6
80
+ ]
81
+ },
82
+ "observation.images.webcam": {
83
+ "type": "VISUAL",
84
+ "shape": [
85
+ 3,
86
+ 480,
87
+ 640
88
+ ]
89
+ }
90
+ },
91
+ "output_features": {
92
+ "action": {
93
+ "type": "ACTION",
94
+ "shape": [
95
+ 6
96
+ ]
97
+ }
98
+ },
99
+ "chunk_size": 100,
100
+ "n_action_steps": 100,
101
+ "vision_backbone": "resnet18",
102
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
103
+ "replace_final_stride_with_dilation": false,
104
+ "pre_norm": false,
105
+ "dim_model": 512,
106
+ "n_heads": 8,
107
+ "dim_feedforward": 3200,
108
+ "feedforward_activation": "relu",
109
+ "n_encoder_layers": 4,
110
+ "n_decoder_layers": 1,
111
+ "use_vae": true,
112
+ "latent_dim": 32,
113
+ "n_vae_encoder_layers": 4,
114
+ "temporal_ensemble_coeff": null,
115
+ "dropout": 0.1,
116
+ "kl_weight": 10.0,
117
+ "optimizer_lr": 1e-05,
118
+ "optimizer_weight_decay": 0.0001,
119
+ "optimizer_lr_backbone": 1e-05
120
+ },
121
+ "output_dir": "outputs/train/act_giraffe_sock_demo_2",
122
+ "job_name": "act_giraffe_sock_demo_2",
123
+ "resume": false,
124
+ "device": "cuda",
125
+ "use_amp": false,
126
+ "seed": 1000,
127
+ "num_workers": 4,
128
+ "batch_size": 8,
129
+ "eval_freq": 20000,
130
+ "log_freq": 200,
131
+ "save_checkpoint": true,
132
+ "save_freq": 20000,
133
+ "offline": {
134
+ "steps": 100000
135
+ },
136
+ "online": {
137
+ "steps": 0,
138
+ "rollout_n_episodes": 1,
139
+ "rollout_batch_size": 1,
140
+ "steps_between_rollouts": null,
141
+ "sampling_ratio": 0.5,
142
+ "env_seed": null,
143
+ "buffer_capacity": null,
144
+ "buffer_seed_size": 0,
145
+ "do_rollout_async": false
146
+ },
147
+ "use_policy_training_preset": true,
148
+ "optimizer": {
149
+ "type": "adamw",
150
+ "lr": 1e-05,
151
+ "weight_decay": 0.0001,
152
+ "grad_clip_norm": 10.0,
153
+ "betas": [
154
+ 0.9,
155
+ 0.999
156
+ ],
157
+ "eps": 1e-08
158
+ },
159
+ "scheduler": null,
160
+ "eval": {
161
+ "n_episodes": 50,
162
+ "batch_size": 50,
163
+ "use_async_envs": false
164
+ },
165
+ "wandb": {
166
+ "enable": true,
167
+ "disable_artifact": false,
168
+ "project": "lerobot",
169
+ "entity": null,
170
+ "notes": null
171
+ }
172
+ }