Initial commit
Browse files- README.md +12 -4
- args.yml +11 -5
- config.yml +5 -1
- replay.mp4 +2 -2
- results.json +1 -1
- sac-seals-Hopper-v0.zip +2 -2
- sac-seals-Hopper-v0/_stable_baselines3_version +1 -1
- sac-seals-Hopper-v0/actor.optimizer.pth +1 -1
- sac-seals-Hopper-v0/critic.optimizer.pth +1 -1
- sac-seals-Hopper-v0/data +27 -25
- sac-seals-Hopper-v0/ent_coef_optimizer.pth +2 -2
- sac-seals-Hopper-v0/policy.pth +1 -1
- sac-seals-Hopper-v0/pytorch_variables.pth +1 -1
- sac-seals-Hopper-v0/system_info.txt +2 -2
- train_eval_metrics.zip +2 -2
README.md
CHANGED
@@ -10,7 +10,7 @@ model-index:
|
|
10 |
results:
|
11 |
- metrics:
|
12 |
- type: mean_reward
|
13 |
-
value:
|
14 |
name: mean_reward
|
15 |
task:
|
16 |
type: reinforcement-learning
|
@@ -37,15 +37,21 @@ SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
|
|
37 |
|
38 |
```
|
39 |
# Download model and save it into the logs/ folder
|
40 |
-
python -m
|
41 |
python enjoy.py --algo sac --env seals/Hopper-v0 -f logs/
|
42 |
```
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
## Training (with the RL Zoo)
|
45 |
```
|
46 |
python train.py --algo sac --env seals/Hopper-v0 -f logs/
|
47 |
# Upload the model and generate video (when possible)
|
48 |
-
python -m
|
49 |
```
|
50 |
|
51 |
## Hyperparameters
|
@@ -58,7 +64,9 @@ OrderedDict([('batch_size', 128),
|
|
58 |
('n_timesteps', 1000000.0),
|
59 |
('policy', 'MlpPolicy'),
|
60 |
('policy_kwargs',
|
61 |
-
'
|
|
|
|
|
62 |
('tau', 0.08),
|
63 |
('train_freq', 32),
|
64 |
('normalize', False)])
|
|
|
10 |
results:
|
11 |
- metrics:
|
12 |
- type: mean_reward
|
13 |
+
value: 1350.71 +/- 25.77
|
14 |
name: mean_reward
|
15 |
task:
|
16 |
type: reinforcement-learning
|
|
|
37 |
|
38 |
```
|
39 |
# Download model and save it into the logs/ folder
|
40 |
+
python -m rl_zoo3.load_from_hub --algo sac --env seals/Hopper-v0 -orga HumanCompatibleAI -f logs/
|
41 |
python enjoy.py --algo sac --env seals/Hopper-v0 -f logs/
|
42 |
```
|
43 |
|
44 |
+
If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
|
45 |
+
```
|
46 |
+
python -m rl_zoo3.load_from_hub --algo sac --env seals/Hopper-v0 -orga HumanCompatibleAI -f logs/
|
47 |
+
rl_zoo3 enjoy --algo sac --env seals/Hopper-v0 -f logs/
|
48 |
+
```
|
49 |
+
|
50 |
## Training (with the RL Zoo)
|
51 |
```
|
52 |
python train.py --algo sac --env seals/Hopper-v0 -f logs/
|
53 |
# Upload the model and generate video (when possible)
|
54 |
+
python -m rl_zoo3.push_to_hub --algo sac --env seals/Hopper-v0 -f logs/ -orga HumanCompatibleAI
|
55 |
```
|
56 |
|
57 |
## Hyperparameters
|
|
|
64 |
('n_timesteps', 1000000.0),
|
65 |
('policy', 'MlpPolicy'),
|
66 |
('policy_kwargs',
|
67 |
+
{'log_std_init': -1.6829391077276037,
|
68 |
+
'net_arch': [256, 256],
|
69 |
+
'use_sde': False}),
|
70 |
('tau', 0.08),
|
71 |
('train_freq', 32),
|
72 |
('normalize', False)])
|
args.yml
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
!!python/object/apply:collections.OrderedDict
|
2 |
- - - algo
|
3 |
- sac
|
|
|
|
|
4 |
- - device
|
5 |
- cpu
|
6 |
- - env
|
@@ -16,7 +18,7 @@
|
|
16 |
- - hyperparams
|
17 |
- null
|
18 |
- - log_folder
|
19 |
-
-
|
20 |
- - log_interval
|
21 |
- -1
|
22 |
- - max_total_trials
|
@@ -41,6 +43,8 @@
|
|
41 |
- null
|
42 |
- - optimize_hyperparameters
|
43 |
- false
|
|
|
|
|
44 |
- - pruner
|
45 |
- median
|
46 |
- - sampler
|
@@ -50,13 +54,13 @@
|
|
50 |
- - save_replay_buffer
|
51 |
- false
|
52 |
- - seed
|
53 |
-
-
|
54 |
- - storage
|
55 |
- null
|
56 |
- - study_name
|
57 |
- null
|
58 |
- - tensorboard_log
|
59 |
-
- runs/seals/Hopper-
|
60 |
- - track
|
61 |
- true
|
62 |
- - trained_agent
|
@@ -70,6 +74,8 @@
|
|
70 |
- - verbose
|
71 |
- 1
|
72 |
- - wandb_entity
|
73 |
-
-
|
74 |
- - wandb_project_name
|
75 |
-
- seals-experts-
|
|
|
|
|
|
1 |
!!python/object/apply:collections.OrderedDict
|
2 |
- - - algo
|
3 |
- sac
|
4 |
+
- - conf_file
|
5 |
+
- hyperparams/python/sac.py
|
6 |
- - device
|
7 |
- cpu
|
8 |
- - env
|
|
|
18 |
- - hyperparams
|
19 |
- null
|
20 |
- - log_folder
|
21 |
+
- logs
|
22 |
- - log_interval
|
23 |
- -1
|
24 |
- - max_total_trials
|
|
|
43 |
- null
|
44 |
- - optimize_hyperparameters
|
45 |
- false
|
46 |
+
- - progress
|
47 |
+
- false
|
48 |
- - pruner
|
49 |
- median
|
50 |
- - sampler
|
|
|
54 |
- - save_replay_buffer
|
55 |
- false
|
56 |
- - seed
|
57 |
+
- 5
|
58 |
- - storage
|
59 |
- null
|
60 |
- - study_name
|
61 |
- null
|
62 |
- - tensorboard_log
|
63 |
+
- runs/seals/Hopper-v0__sac__5__1672325329
|
64 |
- - track
|
65 |
- true
|
66 |
- - trained_agent
|
|
|
74 |
- - verbose
|
75 |
- 1
|
76 |
- - wandb_entity
|
77 |
+
- ernestum
|
78 |
- - wandb_project_name
|
79 |
+
- seals-experts-normalized
|
80 |
+
- - yaml_file
|
81 |
+
- null
|
config.yml
CHANGED
@@ -14,7 +14,11 @@
|
|
14 |
- - policy
|
15 |
- MlpPolicy
|
16 |
- - policy_kwargs
|
17 |
-
-
|
|
|
|
|
|
|
|
|
18 |
- - tau
|
19 |
- 0.08
|
20 |
- - train_freq
|
|
|
14 |
- - policy
|
15 |
- MlpPolicy
|
16 |
- - policy_kwargs
|
17 |
+
- log_std_init: -1.6829391077276037
|
18 |
+
net_arch:
|
19 |
+
- 256
|
20 |
+
- 256
|
21 |
+
use_sde: false
|
22 |
- - tau
|
23 |
- 0.08
|
24 |
- - train_freq
|
replay.mp4
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0320c2e609181e02cf1b3abcead357e24a8fc9f5b35673f1affdcc6c0aec8010
|
3 |
+
size 1555874
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward":
|
|
|
1 |
+
{"mean_reward": 1350.7082415, "std_reward": 25.772142362091774, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2022-12-31T18:49:05.850426"}
|
sac-seals-Hopper-v0.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7abbba9be28b4af60b0711e5c3ae3864405922a2f5335685d891df3afa4ec0b3
|
3 |
+
size 3142136
|
sac-seals-Hopper-v0/_stable_baselines3_version
CHANGED
@@ -1 +1 @@
|
|
1 |
-
1.6.
|
|
|
1 |
+
1.6.2
|
sac-seals-Hopper-v0/actor.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 571549
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16fb60e9bba299acb66c1356552b3e2a02558264f971dcb594f31b4535e704a6
|
3 |
size 571549
|
sac-seals-Hopper-v0/critic.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1131513
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1650bf0b2b6b18fb6da49b5bae10ae504ca74e467c83d7beb4fc4d2ed00f198
|
3 |
size 1131513
|
sac-seals-Hopper-v0/data
CHANGED
@@ -4,17 +4,17 @@
|
|
4 |
":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLnNhYy5wb2xpY2llc5SMCVNBQ1BvbGljeZSTlC4=",
|
5 |
"__module__": "stable_baselines3.sac.policies",
|
6 |
"__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
|
7 |
-
"__init__": "<function SACPolicy.__init__ at
|
8 |
-
"_build": "<function SACPolicy._build at
|
9 |
-
"_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at
|
10 |
-
"reset_noise": "<function SACPolicy.reset_noise at
|
11 |
-
"make_actor": "<function SACPolicy.make_actor at
|
12 |
-
"make_critic": "<function SACPolicy.make_critic at
|
13 |
-
"forward": "<function SACPolicy.forward at
|
14 |
-
"_predict": "<function SACPolicy._predict at
|
15 |
-
"set_training_mode": "<function SACPolicy.set_training_mode at
|
16 |
"__abstractmethods__": "frozenset()",
|
17 |
-
"_abc_impl": "<_abc_data object at
|
18 |
},
|
19 |
"verbose": 1,
|
20 |
"policy_kwargs": {
|
@@ -40,7 +40,7 @@
|
|
40 |
},
|
41 |
"action_space": {
|
42 |
":type:": "<class 'gym.spaces.box.Box'>",
|
43 |
-
":serialized:": "gAWV9QsAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLA4WUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWDAAAAAAAAAAAAIC/AACAvwAAgL+UaApLA4WUjAFDlHSUUpSMBGhpZ2iUaBIolgwAAAAAAAAAAACAPwAAgD8AAIA/lGgKSwOFlGgVdJRSlIwNYm91bmRlZF9iZWxvd5RoEiiWAwAAAAAAAAABAQGUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////
|
44 |
"dtype": "float32",
|
45 |
"_shape": [
|
46 |
3
|
@@ -55,17 +55,17 @@
|
|
55 |
"num_timesteps": 1000000,
|
56 |
"_total_timesteps": 1000000,
|
57 |
"_num_timesteps_at_start": 0,
|
58 |
-
"seed":
|
59 |
"action_noise": null,
|
60 |
-
"start_time":
|
61 |
"learning_rate": {
|
62 |
":type:": "<class 'function'>",
|
63 |
-
":serialized:": "
|
64 |
},
|
65 |
-
"tensorboard_log": "runs/seals/Hopper-
|
66 |
"lr_schedule": {
|
67 |
":type:": "<class 'function'>",
|
68 |
-
":serialized:": "
|
69 |
},
|
70 |
"_last_obs": null,
|
71 |
"_last_episode_starts": {
|
@@ -74,7 +74,7 @@
|
|
74 |
},
|
75 |
"_last_original_obs": {
|
76 |
":type:": "<class 'numpy.ndarray'>",
|
77 |
-
":serialized:": "
|
78 |
},
|
79 |
"_episode_num": 1000,
|
80 |
"use_sde": false,
|
@@ -82,7 +82,7 @@
|
|
82 |
"_current_progress_remaining": 0.0,
|
83 |
"ep_info_buffer": {
|
84 |
":type:": "<class 'collections.deque'>",
|
85 |
-
":serialized:": "gAWVgRAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////
|
86 |
},
|
87 |
"ep_success_buffer": {
|
88 |
":type:": "<class 'collections.deque'>",
|
@@ -100,13 +100,13 @@
|
|
100 |
":type:": "<class 'abc.ABCMeta'>",
|
101 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
102 |
"__module__": "stable_baselines3.common.buffers",
|
103 |
-
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device
|
104 |
-
"__init__": "<function ReplayBuffer.__init__ at
|
105 |
-
"add": "<function ReplayBuffer.add at
|
106 |
-
"sample": "<function ReplayBuffer.sample at
|
107 |
-
"_get_samples": "<function ReplayBuffer._get_samples at
|
108 |
"__abstractmethods__": "frozenset()",
|
109 |
-
"_abc_impl": "<_abc_data object at
|
110 |
},
|
111 |
"replay_buffer_kwargs": {},
|
112 |
"train_freq": {
|
@@ -116,5 +116,7 @@
|
|
116 |
"use_sde_at_warmup": false,
|
117 |
"target_entropy": -3.0,
|
118 |
"ent_coef": "auto",
|
119 |
-
"target_update_interval": 1
|
|
|
|
|
120 |
}
|
|
|
4 |
":serialized:": "gAWVMAAAAAAAAACMHnN0YWJsZV9iYXNlbGluZXMzLnNhYy5wb2xpY2llc5SMCVNBQ1BvbGljeZSTlC4=",
|
5 |
"__module__": "stable_baselines3.sac.policies",
|
6 |
"__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
|
7 |
+
"__init__": "<function SACPolicy.__init__ at 0x7f2ac127cee0>",
|
8 |
+
"_build": "<function SACPolicy._build at 0x7f2ac127cf70>",
|
9 |
+
"_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7f2ac1205040>",
|
10 |
+
"reset_noise": "<function SACPolicy.reset_noise at 0x7f2ac12050d0>",
|
11 |
+
"make_actor": "<function SACPolicy.make_actor at 0x7f2ac1205160>",
|
12 |
+
"make_critic": "<function SACPolicy.make_critic at 0x7f2ac12051f0>",
|
13 |
+
"forward": "<function SACPolicy.forward at 0x7f2ac1205280>",
|
14 |
+
"_predict": "<function SACPolicy._predict at 0x7f2ac1205310>",
|
15 |
+
"set_training_mode": "<function SACPolicy.set_training_mode at 0x7f2ac12053a0>",
|
16 |
"__abstractmethods__": "frozenset()",
|
17 |
+
"_abc_impl": "<_abc_data object at 0x7f2ac127bcf0>"
|
18 |
},
|
19 |
"verbose": 1,
|
20 |
"policy_kwargs": {
|
|
|
40 |
},
|
41 |
"action_space": {
|
42 |
":type:": "<class 'gym.spaces.box.Box'>",
|
43 |
+
":serialized:": "gAWV9QsAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLA4WUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWDAAAAAAAAAAAAIC/AACAvwAAgL+UaApLA4WUjAFDlHSUUpSMBGhpZ2iUaBIolgwAAAAAAAAAAACAPwAAgD8AAIA/lGgKSwOFlGgVdJRSlIwNYm91bmRlZF9iZWxvd5RoEiiWAwAAAAAAAAABAQGUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLA4WUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYDAAAAAAAAAAEBAZRoIUsDhZRoFXSUUpSMCl9ucF9yYW5kb22UjBRudW1weS5yYW5kb20uX3BpY2tsZZSMEl9fcmFuZG9tc3RhdGVfY3RvcpSTlIwHTVQxOTkzN5SFlFKUfZQojA1iaXRfZ2VuZXJhdG9ylGgwjAVzdGF0ZZR9lCiMA2tleZRoEiiWwAkAAAAAAAAAAACA6tN/xWH5wD0oAa1E8OERvEjDYHkfreNmqh4QKt4HW7JZN7H00QJoY2idx8vw52ILeV09wf7H9uSOspEY6SeEKg4LS00Ddxk7dv8QQctbl0qMB+rcHr+TWXcQcbAq35aY5P7OxpdMQRlbjJbbbfnmF1Hv43cL/nHbV1u4nUUjWFla8ouYUvfnLoE63QquxZMPaU4t5xfsVlVZEJitFjDnZAN8CmzaBN8BvxmdUYf/qFE1vlkhpFk4XZTv9UHpfbhwllQFJtkqXTULW1VsByAmZ1OFo5fpJCy0Mv5/U4jfrLFCNLFHmgtwHd+lkxeGKSScibI5vWrl0U161lQhpLgLZZEVQpVKoguvbBXMaTPYJlfu0BFSgoN/2yWossX+9AS1tclZqSU8SC47dMOBZUX3Q8zUe9EET2+7+DPHL2zzsqoKPTri33+Xrh2qtYokX18RehQ9kkLHQ8j2IZV8nshrU82myYzGkmBOUk/N7PSTjxXdhoAm6NKcvKcCjYF+LM1/zf1pwLtF1rZQH/S9J5btwynyaP+DfnBmdI/OKzBgzv5yFq9Rjc+S/g7AAllaFPZYKcLyxdGv6x+4tcVXsnQ4rmf2/DXa/9HtNjmUQ/mmCad+ooEatARFaJK6Vdkjr4xNhvV+HlUlDTD9B4c69tbif8dT6Xp3QYNzsHVgrNcKFtXJM3gg/4ymO97ZtAY908wcKiQn9fcXX+jz53zqaSYdrJwKjNqOMYgEm8vBaMIG8hrYKhY/6TF+6z8UAkv+4o9/SsPMmoemXYAefM8jf/QNc9GfWnFOhJ8j5/0Vs77smpx7ELrcj8Y2ERAUu3FZ9b3YtQFQGhwzgcwCH0yY6Ig04ersEOmGJp00Ejl+V5kNeG7rc/nMk9QBTFVqqBQ7TQjgUTjZyqVNPhEkiaLyhdw1bpw5wl24C9vUK6xIxXMDLA2PEu/XWZ/mrklBUGohRNOHmXjPJFxaVT1h0X7LTkP0bEZHOeOx334toT6fR0rx8oP28gFglY7LohEaM2ERFHYGsA9BVF2/Yc58iou71Vh6MOOkaj+aRx9roaYYmjASvXleLXdYaDNmUUato4bLxkS8oC89wu9SBBViVrowjYAIMAhLs6brp7IjIBqiyNiIPvT2roNPimRpFGt1q+9zwWmWcB2k83KGDYoAybWNVtx9gSD+qOtM5YefZU9hnC81EUXymxeOU3PqNew1uN/Xjrl9jOrbQ7rU/A5MHHEIsCfY88LaRr2Z7GZZioCgemcv66XEBQRxyRqGkJicgCt8pQkOBZICaNOBvLNMSRylajx9R/EMpho+5Csx1c05XIOEV661JWxrPAcgtAzriNDWA+Vp+bk1heFvFhO30GtFqhDpxbGsBbDbp9mx5PksBDYUeYLRzER8V4mIHqjO5ziQLmYeQa4Qg0D4GcEF62dTBrD1n420bx9C+/OwmIPa1eEOwc2EFQh5JX+cVA5IgNeLcyX36mrWhuUEMYFZHz/zzRr27dmFCUZcsr0bP8D1pvLNJsZ09G2rYmFaq6UXqiCHa8Q6/MaqHrQwo18aluCLh/6avB9bPlN7F1v90xwCLHkFFla3Paakp+NwZfTz5zLDLCMrKX9Mu6Ke3QUR8ZlckvxKV7Hw+z2OntpCfVfDh0160gQAAHBS5qhEBsuyVQ3P8TVuBzFIrgsEFg/k2DoL3LEhz/nfhybKHB9BNJwvnlE/AtmkoEdBkFKSfsFjWzdINaN/HlNI5ZdKRhYrMRqj9WLJ36WJj26RF8dTTkVzlYJcRMPqiHz02VD2v/YIclDfISll5JtNDQHAcde4vGF4tkVdm/6SSmd1Ca58AzO/vUTRm0ewnwgAmBe528CpHpQyinIehsj6ab2uRk5bn6yCtoRzMYURXxLhyr6aYgr40LZ9srkuU9aTxdd0pzmXkvY/DVoxsmBM8cZjgG3HNhKlrYOVbOXtJsHGmPdnBdJTGfMXfjwRRPO04FVEDkFVmhlj5eZLZiG2Hc6GTDp0RD2rtrrMKZUfCViSoNvlEBNhmKL+wkaVWJyc2L/traE6bMd13hB007gsQ100pwpVkAzveKf8vEWiC1KRCRFpsouPP6w2/FdW5aKAPxUQQWuBm6uLojNxPcMLigD+mxJ2UtR4dJQ8u6Mg0H++Pazi2Dad/+2hB+x4IrohYhcC8Iyp3c1zt2Gddp5U+xR1tSZH1q1XDd7nuaC6eSsvtdCeShv13GLmR/AaeHlTWN+gzholHKKOYR4MdvCFBPOpIO/vWoqAlX9qf3K3k+Z5PPzE1E5IZbWNb9OEF5owiMBdIU9X12IrESH47szKCX5U7i/3SyDAkW11hFZtw5Ez7aKdzveU8K24QUa5GlAFnuq/abt790qkVfTmk5xD5XmHrjeV1fXK2ydiCcHcfHHSlYYWCpyNKVAcT0XVrx5l4/21HWs1KuboY+07aLtGO8LWFhTe90oKRh4dUAMUqmVlahO++B0/m/wkTFZYMTxE7/FxL3CICtMzxq45kzD56h4/F7jVFZEEotD5oh4m2nZWQAVvYgRJ5ZygY/yKGqa1/WFjRSuX75cnQcSUNgMJAOtYb7Xk/9x25mv+NMk3MsE/nfEW5++J60cHLBrpR/4DLoaznQNyeBxpnhbZ+2DKiLhqW1ennLder6LxlRZIRZbZtpknrP5D3emYhfUMIfuA3MJowL73IQOYxodNGBH4bQZsqZWsIVPCJVIKb3ydqK3ktqjdi2w6y9o4lMFFu/0yQH3L0wBC1C3Dp8u9AGnHx3Obq/WHKtrFqxu0+JAKD4ulBujifw99/VSZCadmz8qOpqcyYeys/z4t7aowfJ8E+diddt1UEW+mY3PSyanIXtptsiEja3umEClp3Ev+lYsthI/PfbZpkHHlwWQiuzZ0fH6vZbbC8ImoNQTraXNbppwag86MJKFttkOFerkl2iSyVScEYKmC+pAt988ddz0TbCIkJUXNhHjZPNXhw4xvXZ/jjLO61U2/WeV6vj75Jdf7yPl5/GOQJAja6p68DLmmVJn1BURa05iwhL3jc+9WeW/0ZJs6pQRoDtuwZMeGogotgxsusha14t4WXbYJupB7fxIKSKO2dS+TQMnKDgangIRiYZTAPeTMifmChFb4E0aG+wuJzbMhHzLAFvfW4218KXyFkiqCB9w+kj9I/CS941KoazHt/9FE+MG1tTJtXiyLwPEcIbQeaAnTG4CHMwNTUJS0ILtX2siBj/oI/WZffZz9IOEpt0E/55ZCB82x8SrLzu19Q6dL7Wp0FfVASHqw/FMBOgcbZ/0KpMIIlaQ2z6Ca0o8Bd6eiw5nRJ/KSpXhE0m2MFDJ9RzSUaAeMAnU0lImIh5RSlChLA2gLTk5OSv////9K/////0sAdJRiTXAChZRoFXSUUpSMA3Bvc5RNcAJ1jAloYXNfZ2F1c3OUSwCMBWdhdXNzlEcAAAAAAAAAAHVidWIu",
|
44 |
"dtype": "float32",
|
45 |
"_shape": [
|
46 |
3
|
|
|
55 |
"num_timesteps": 1000000,
|
56 |
"_total_timesteps": 1000000,
|
57 |
"_num_timesteps_at_start": 0,
|
58 |
+
"seed": 9,
|
59 |
"action_noise": null,
|
60 |
+
"start_time": 1672325346434471552,
|
61 |
"learning_rate": {
|
62 |
":type:": "<class 'function'>",
|
63 |
+
":serialized:": "gAWVhQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMUy9ob21lL21heGltaWxpYW4vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgEMCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP1wDdAZveNiFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
|
64 |
},
|
65 |
+
"tensorboard_log": "runs/seals/Hopper-v0__sac__5__1672325329/seals-Hopper-v0",
|
66 |
"lr_schedule": {
|
67 |
":type:": "<class 'function'>",
|
68 |
+
":serialized:": "gAWVhQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMUy9ob21lL21heGltaWxpYW4vdmVudi9saWIvcHl0aG9uMy44L3NpdGUtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lIwEZnVuY5RLgEMCAAGUjAN2YWyUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UjBhzdGFibGVfYmFzZWxpbmVzMy5jb21tb26UjAhfX25hbWVfX5SMHnN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi51dGlsc5SMCF9fZmlsZV9flGgMdU5OaACMEF9tYWtlX2VtcHR5X2NlbGyUk5QpUpSFlHSUUpSMHGNsb3VkcGlja2xlLmNsb3VkcGlja2xlX2Zhc3SUjBJfZnVuY3Rpb25fc2V0c3RhdGWUk5RoHn2UfZQoaBZoDYwMX19xdWFsbmFtZV9flIwZY29uc3RhbnRfZm4uPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lIwOX19rd2RlZmF1bHRzX1+UTowMX19kZWZhdWx0c19flE6MCl9fbW9kdWxlX1+UaBeMB19fZG9jX1+UTowLX19jbG9zdXJlX1+UaACMCl9tYWtlX2NlbGyUk5RHP1wDdAZveNiFlFKUhZSMF19jbG91ZHBpY2tsZV9zdWJtb2R1bGVzlF2UjAtfX2dsb2JhbHNfX5R9lHWGlIZSMC4="
|
69 |
},
|
70 |
"_last_obs": null,
|
71 |
"_last_episode_starts": {
|
|
|
74 |
},
|
75 |
"_last_original_obs": {
|
76 |
":type:": "<class 'numpy.ndarray'>",
|
77 |
+
":serialized:": "gAWV1QAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJZgAAAAAAAAAMtLi4FMGyJAUcEg4nVLzz/ijz8elPz3P/Binrx3/ci//ydGT5dLdT8+AvYhNSPcP6w7tYxOwOQ/FgX0jtB09r+Ei6hbdNXeP7HxFxdT1+o/5NAfO2emv79IyymI4AAEwJSMBW51bXB5lIwFZHR5cGWUk5SMAmY4lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGJLAUsMhpSMAUOUdJRSlC4="
|
78 |
},
|
79 |
"_episode_num": 1000,
|
80 |
"use_sde": false,
|
|
|
82 |
"_current_progress_remaining": 0.0,
|
83 |
"ep_info_buffer": {
|
84 |
":type:": "<class 'collections.deque'>",
|
85 |
+
":serialized:": "gAWVgRAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIAknYt4PJlECUhpRSlIwBbJRN6AOMAXSUR0Cf6gCu2Zy/dX2UKGgGaAloD0MIv56vWW4AlUCUhpRSlGgVTegDaBZHQJ/wcVFhG6R1fZQoaAZoCWgPQwhrm+JxgS+UQJSGlFKUaBVN6ANoFkdAn/a0L2HtW3V9lChoBmgJaA9DCKbwoNm1xZRAlIaUUpRoFU3oA2gWR0Cf/MvZh8YydX2UKGgGaAloD0MI9Q8iGZISlECUhpRSlGgVTegDaBZHQKABhga3qiZ1fZQoaAZoCWgPQwitNCkFDUCVQJSGlFKUaBVN6ANoFkdAoAST7j1f3XV9lChoBmgJaA9DCHnou1vJYpVAlIaUUpRoFU3oA2gWR0CgB6MMRYigdX2UKGgGaAloD0MI1e3sK3+xlUCUhpRSlGgVTegDaBZHQKAK2Q5FPSF1fZQoaAZoCWgPQwixaaUQmFWUQJSGlFKUaBVN6ANoFkdAoA4O8K5TZXV9lChoBmgJaA9DCIoFvqLLfZJAlIaUUpRoFU3oA2gWR0CgEQstTUAldX2UKGgGaAloD0MICmr4FqYvk0CUhpRSlGgVTegDaBZHQKAUHaxoqTd1fZQoaAZoCWgPQwjTakjcszWVQJSGlFKUaBVN6ANoFkdAoBcyFZgXuXV9lChoBmgJaA9DCMFwrmFGIJVAlIaUUpRoFU3oA2gWR0CgGk08mrsCdX2UKGgGaAloD0MIN/3Zj8RIk0CUhpRSlGgVTegDaBZHQKAdfMRHww11fZQoaAZoCWgPQwjexmZHyp6TQJSGlFKUaBVN6ANoFkdAoCCR+OOsDHV9lChoBmgJaA9DCFSPNLj99JJAlIaUUpRoFU3oA2gWR0CgI630wrUcdX2UKGgGaAloD0MIrYcvEyXmlUCUhpRSlGgVTegDaBZHQKAm1LaEi+t1fZQoaAZoCWgPQwjyIhPwaz2UQJSGlFKUaBVN6ANoFkdAoCnQlhPTHHV9lChoBmgJaA9DCNB7YwigRJVAlIaUUpRoFU3oA2gWR0CgLOxiobXIdX2UKGgGaAloD0MIdVYL7AGtlECUhpRSlGgVTegDaBZHQKAwAIBzV+Z1fZQoaAZoCWgPQwg6deWzDIeUQJSGlFKUaBVN6ANoFkdAoDNe0/nnuHV9lChoBmgJaA9DCIANiBCXkZNAlIaUUpRoFU3oA2gWR0CgNoh+fAbidX2UKGgGaAloD0MIRwINNpW8lECUhpRSlGgVTegDaBZHQKA5lobn5i51fZQoaAZoCWgPQwjbhlEQXGSVQJSGlFKUaBVN6ANoFkdAoDzSfzz3AXV9lChoBmgJaA9DCGrecYpe7JRAlIaUUpRoFU3oA2gWR0CgP/HxjJ+2dX2UKGgGaAloD0MI0V0SZ0Wuk0CUhpRSlGgVTegDaBZHQKBMQ7r9l3B1fZQoaAZoCWgPQwgdr0D01FqUQJSGlFKUaBVN6ANoFkdAoE9dW6shgXV9lChoBmgJaA9DCNPaNLZHdJVAlIaUUpRoFU3oA2gWR0CgUn8m8dxRdX2UKGgGaAloD0MID4Ejgfa0k0CUhpRSlGgVTegDaBZHQKBVwmCyyD91fZQoaAZoCWgPQwhNFYxK6rCTQJSGlFKUaBVN6ANoFkdAoFjwUlAu7HV9lChoBmgJaA9DCCvCTUZlnJJAlIaUUpRoFU3oA2gWR0CgXBCosI3SdX2UKGgGaAloD0MIY2GInN7Tk0CUhpRSlGgVTegDaBZHQKBfHU/fO2R1fZQoaAZoCWgPQwgpdck4JpGUQJSGlFKUaBVN6ANoFkdAoGJCf4AS4HV9lChoBmgJaA9DCE7RkVx+BJNAlIaUUpRoFU3oA2gWR0CgZWNxMnJDdX2UKGgGaAloD0MI+3YSEZ4VlECUhpRSlGgVTegDaBZHQKBocKWLP2R1fZQoaAZoCWgPQwjekEYFjtGTQJSGlFKUaBVN6ANoFkdAoGuCa9bosHV9lChoBmgJaA9DCE+y1eW075NAlIaUUpRoFU3oA2gWR0Cgbq8M3IdVdX2UKGgGaAloD0MIZjIczxeulUCUhpRSlGgVTegDaBZHQKBxzj81n/V1fZQoaAZoCWgPQwiVK7zLJYOVQJSGlFKUaBVN6ANoFkdAoHT7XOGCZnV9lChoBmgJaA9DCGq8dJM4pJRAlIaUUpRoFU3oA2gWR0CgeBoXTEzgdX2UKGgGaAloD0MIzTrj+1LXlECUhpRSlGgVTegDaBZHQKB7NSLIgeR1fZQoaAZoCWgPQwivCWmNEVmSQJSGlFKUaBVN6ANoFkdAoH5HTiKiwnV9lChoBmgJaA9DCGixFMkn+ZRAlIaUUpRoFU3oA2gWR0CggWcNYr8SdX2UKGgGaAloD0MIs7ES88x3lECUhpRSlGgVTegDaBZHQKCEfo3aSLZ1fZQoaAZoCWgPQwiPqiaIitySQJSGlFKUaBVN6ANoFkdAoIecL0BfbHV9lChoBmgJaA9DCBR2UfSAB5NAlIaUUpRoFU3oA2gWR0CgiqTSThYOdX2UKGgGaAloD0MIZeJWQcwZkUCUhpRSlGgVTegDaBZHQKCNozu4PPN1fZQoaAZoCWgPQwgCYhIuVPGRQJSGlFKUaBVN6ANoFkdAoJC0EvCdjHV9lChoBmgJaA9DCHvbTIVIapNAlIaUUpRoFU3oA2gWR0Cgk846fapQdX2UKGgGaAloD0MIh2wgXZy6lUCUhpRSlGgVTegDaBZHQKCW0jPfKp11fZQoaAZoCWgPQwgH8BZIUEmVQJSGlFKUaBVN6ANoFkdAoKLUDQqqfnV9lChoBmgJaA9DCM3K9iHfNpZAlIaUUpRoFU3oA2gWR0CgpeSWRigCdX2UKGgGaAloD0MIrimQ2enRlECUhpRSlGgVTegDaBZHQKCpBYU34sV1fZQoaAZoCWgPQwg1e6AV+KCUQJSGlFKUaBVN6ANoFkdAoKwkFnqVyHV9lChoBmgJaA9DCLYsX5cB5pNAlIaUUpRoFU3oA2gWR0CgrxdLxqfwdX2UKGgGaAloD0MItLCnHc7HlUCUhpRSlGgVTegDaBZHQKCyNMlkYoB1fZQoaAZoCWgPQwgstklF8yaUQJSGlFKUaBVN6ANoFkdAoLVhPhybQXV9lChoBmgJaA9DCNm0UgiU8ZVAlIaUUpRoFU3oA2gWR0CguHHW8RL9dX2UKGgGaAloD0MIK702G7ullUCUhpRSlGgVTegDaBZHQKC7dd+ocaR1fZQoaAZoCWgPQwiVRPZB1puVQJSGlFKUaBVN6ANoFkdAoL5vp6hQFnV9lChoBmgJaA9DCJmfG5pSzJVAlIaUUpRoFU3oA2gWR0CgwZ7utwJgdX2UKGgGaAloD0MICp5CrtQfl0CUhpRSlGgVTegDaBZHQKDEvTBInSh1fZQoaAZoCWgPQwhWgVoMbi6VQJSGlFKUaBVN6ANoFkdAoMfJJGvwE3V9lChoBmgJaA9DCG4T7pXpspRAlIaUUpRoFU3oA2gWR0CgytsN2C/XdX2UKGgGaAloD0MI3X2Oj4bIk0CUhpRSlGgVTegDaBZHQKDN63pfQa91fZQoaAZoCWgPQwhzvALRQwaTQJSGlFKUaBVN6ANoFkdAoND1w71ZknV9lChoBmgJaA9DCDoDIy+L0pNAlIaUUpRoFU3oA2gWR0Cg0/RZU1htdX2UKGgGaAloD0MIPl5IhzcxlUCUhpRSlGgVTegDaBZHQKDXBd+ocaR1fZQoaAZoCWgPQwjisDTwwxuWQJSGlFKUaBVN6ANoFkdAoNo5oIv8InV9lChoBmgJaA9DCPT8aaNKI5VAlIaUUpRoFU3oA2gWR0Cg3WGALApKdX2UKGgGaAloD0MIYW2MnWAelECUhpRSlGgVTegDaBZHQKDgiEHt4Rp1fZQoaAZoCWgPQwguVz82WZOVQJSGlFKUaBVN6ANoFkdAoON+7nPmgnV9lChoBmgJaA9DCEOOrWfY2ZNAlIaUUpRoFU3oA2gWR0Cg5npTVDrrdX2UKGgGaAloD0MIcvp6vjZblECUhpRSlGgVTegDaBZHQKDpjbPhQ3x1fZQoaAZoCWgPQwhpi2t8FmSUQJSGlFKUaBVN6ANoFkdAoOyicZtNz3V9lChoBmgJaA9DCHVz8bf9lZRAlIaUUpRoFU3oA2gWR0Cg+LnlwLmZdX2UKGgGaAloD0MIu4CXGeYilUCUhpRSlGgVTegDaBZHQKD7zwQ176Z1fZQoaAZoCWgPQwgsZoS3V0SUQJSGlFKUaBVN6ANoFkdAoP7W1SflIXV9lChoBmgJaA9DCLtCHyzTBpZAlIaUUpRoFU3oA2gWR0ChAfcm0E5idX2UKGgGaAloD0MIFLAdjAhElUCUhpRSlGgVTegDaBZHQKEFHL6DXe51fZQoaAZoCWgPQwhgHccPBSaVQJSGlFKUaBVN6ANoFkdAoQgn7JnxrnV9lChoBmgJaA9DCCaMZmVrs5VAlIaUUpRoFU3oA2gWR0ChC0U+TvAodX2UKGgGaAloD0MIE7u2t1vQlUCUhpRSlGgVTegDaBZHQKEOZtBv73x1fZQoaAZoCWgPQwifHtsyYACVQJSGlFKUaBVN6ANoFkdAoRGJS9/SY3V9lChoBmgJaA9DCIif/x78J5ZAlIaUUpRoFU3oA2gWR0ChFLerdWQwdX2UKGgGaAloD0MIPdf34aDOlkCUhpRSlGgVTegDaBZHQKEX5fek56t1fZQoaAZoCWgPQwi0AG2rSeiVQJSGlFKUaBVN6ANoFkdAoRrvgpBomHV9lChoBmgJaA9DCIEhq1sNl5RAlIaUUpRoFU3oA2gWR0ChHhSQ5myxdX2UKGgGaAloD0MI98snKwZelUCUhpRSlGgVTegDaBZHQKEhQIBzV+Z1fZQoaAZoCWgPQwi3m+Cb9uWTQJSGlFKUaBVN6ANoFkdAoSRaL876pHV9lChoBmgJaA9DCNnQzf7Q+JRAlIaUUpRoFU3oA2gWR0ChJ1HPeHi4dX2UKGgGaAloD0MIABx79vxAlUCUhpRSlGgVTegDaBZHQKEqfQ79ycV1fZQoaAZoCWgPQwgzqDY4URqVQJSGlFKUaBVN6ANoFkdAoS2ZrJr+HnV9lChoBmgJaA9DCAvPS8X2PZZAlIaUUpRoFU3oA2gWR0ChMJi8FpwkdX2UKGgGaAloD0MIILjKExjQlUCUhpRSlGgVTegDaBZHQKEzquK4x1x1fZQoaAZoCWgPQwjHn6hsGLmVQJSGlFKUaBVN6ANoFkdAoTa8VHnU2HV9lChoBmgJaA9DCFPnUfGfq5VAlIaUUpRoFU3oA2gWR0ChOdmJWNm2dX2UKGgGaAloD0MImxw+6XRIlUCUhpRSlGgVTegDaBZHQKE81tpEhJR1fZQoaAZoCWgPQwhYq3ZNCO2VQJSGlFKUaBVN6ANoFkdAoT/8Xxe9jHV9lChoBmgJaA9DCJFkVu8QDZRAlIaUUpRoFU3oA2gWR0ChQxnl4keIdWUu"
|
86 |
},
|
87 |
"ep_success_buffer": {
|
88 |
":type:": "<class 'collections.deque'>",
|
|
|
100 |
":type:": "<class 'abc.ABCMeta'>",
|
101 |
":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
|
102 |
"__module__": "stable_baselines3.common.buffers",
|
103 |
+
"__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
|
104 |
+
"__init__": "<function ReplayBuffer.__init__ at 0x7f2ac12510d0>",
|
105 |
+
"add": "<function ReplayBuffer.add at 0x7f2ac1251160>",
|
106 |
+
"sample": "<function ReplayBuffer.sample at 0x7f2ac12511f0>",
|
107 |
+
"_get_samples": "<function ReplayBuffer._get_samples at 0x7f2ac1251280>",
|
108 |
"__abstractmethods__": "frozenset()",
|
109 |
+
"_abc_impl": "<_abc_data object at 0x7f2ac12d3330>"
|
110 |
},
|
111 |
"replay_buffer_kwargs": {},
|
112 |
"train_freq": {
|
|
|
116 |
"use_sde_at_warmup": false,
|
117 |
"target_entropy": -3.0,
|
118 |
"ent_coef": "auto",
|
119 |
+
"target_update_interval": 1,
|
120 |
+
"batch_norm_stats": [],
|
121 |
+
"batch_norm_stats_target": []
|
122 |
}
|
sac-seals-Hopper-v0/ent_coef_optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c298c1751d5d7f1c30d98d3d71aeb9c0a7eedf164564411b6d4ba75871193a45
|
3 |
+
size 1507
|
sac-seals-Hopper-v0/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1415493
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30b3709d693e14f07beb0b6bf2da988e9dffdd4662a80fdf6bf57b53204ad261
|
3 |
size 1415493
|
sac-seals-Hopper-v0/pytorch_variables.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 747
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d55cd4d044669e9a0a071dd18d5c2e286f0b00ab494bce2727a7f97e7cc402aa
|
3 |
size 747
|
sac-seals-Hopper-v0/system_info.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
OS: Linux-5.4.0-
|
2 |
Python: 3.8.10
|
3 |
-
Stable-Baselines3: 1.6.
|
4 |
PyTorch: 1.11.0+cu102
|
5 |
GPU Enabled: False
|
6 |
Numpy: 1.22.3
|
|
|
1 |
+
OS: Linux-5.4.0-125-generic-x86_64-with-glibc2.29 #141-Ubuntu SMP Wed Aug 10 13:42:03 UTC 2022
|
2 |
Python: 3.8.10
|
3 |
+
Stable-Baselines3: 1.6.2
|
4 |
PyTorch: 1.11.0+cu102
|
5 |
GPU Enabled: False
|
6 |
Numpy: 1.22.3
|
train_eval_metrics.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:88b097cfbcba3985668e1ecaeed3012c518f71a3b9a4e006324f2e3029219a10
|
3 |
+
size 33677
|