second attempt
Browse files- README.md +7 -104
- config.json +1 -1
- ppo-LunarLander-v2.zip +2 -2
- ppo-LunarLander-v2/_stable_baselines3_version +1 -1
- ppo-LunarLander-v2/data +48 -44
- ppo-LunarLander-v2/policy.optimizer.pth +1 -1
- ppo-LunarLander-v2/policy.pth +2 -2
- ppo-LunarLander-v2/system_info.txt +8 -6
- replay.mp4 +0 -0
- results.json +1 -1
README.md
CHANGED
@@ -6,7 +6,7 @@ tags:
|
|
6 |
- reinforcement-learning
|
7 |
- stable-baselines3
|
8 |
model-index:
|
9 |
-
- name:
|
10 |
results:
|
11 |
- task:
|
12 |
type: reinforcement-learning
|
@@ -16,119 +16,22 @@ model-index:
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
-
value:
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
23 |
|
24 |
-
# **
|
25 |
-
This is a trained model of a **
|
26 |
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
|
27 |
|
28 |
## Usage (with Stable-baselines3)
|
29 |
-
|
30 |
|
31 |
|
32 |
```python
|
33 |
-
|
34 |
-
from
|
35 |
-
|
36 |
-
virtual_display = Display(visible=0, size=(1400, 900))
|
37 |
-
virtual_display.start()
|
38 |
-
|
39 |
-
# We create our environment with gym.make("<name_of_the_environment>")
|
40 |
-
env = gym.make("LunarLander-v2")
|
41 |
-
env.reset()
|
42 |
-
print("_____OBSERVATION SPACE_____ \n")
|
43 |
-
print("Observation Space Shape", env.observation_space.shape)
|
44 |
-
print("Sample observation", env.observation_space.sample()) # Get a random observation
|
45 |
-
|
46 |
-
print("\n _____ACTION SPACE_____ \n")
|
47 |
-
print("Action Space Shape", env.action_space.n)
|
48 |
-
print("Action Space Sample", env.action_space.sample()) # Take a random action
|
49 |
-
|
50 |
-
# Create the environment
|
51 |
-
env = make_vec_env('LunarLander-v2', n_envs=16)
|
52 |
-
|
53 |
-
# TODO: Define a PPO MlpPolicy architecture
|
54 |
-
# We use MultiLayerPerceptron (MLPPolicy) because the input is a vector,
|
55 |
-
# if we had frames as input we would use CnnPolicy
|
56 |
-
# SOLUTION
|
57 |
-
# We added some parameters to accelerate the training
|
58 |
-
model = PPO(
|
59 |
-
policy = 'MlpPolicy',
|
60 |
-
env = env,
|
61 |
-
n_steps = 1024,
|
62 |
-
batch_size = 64,
|
63 |
-
n_epochs = 4,
|
64 |
-
gamma = 0.999,
|
65 |
-
gae_lambda = 0.98,
|
66 |
-
ent_coef = 0.01,
|
67 |
-
verbose=1)
|
68 |
-
|
69 |
-
# Train it for 1,000,000 timesteps
|
70 |
-
model.learn(total_timesteps=1000000)
|
71 |
-
# Save the model
|
72 |
-
model_name = "ppo-LunarLander-v2"
|
73 |
-
model.save(model_name)
|
74 |
-
|
75 |
-
|
76 |
-
# Create a new environment for evaluation
|
77 |
-
eval_env = gym.make("LunarLander-v2")
|
78 |
-
|
79 |
-
# Evaluate the model with 10 evaluation episodes and deterministic=True
|
80 |
-
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
|
81 |
-
|
82 |
-
# Print the results
|
83 |
-
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
|
84 |
|
85 |
...
|
86 |
```
|
87 |
-
|
88 |
-
Then upload it using huggingFace library
|
89 |
-
|
90 |
-
notebook_login()
|
91 |
-
|
92 |
-
!git config --global credential.helper store
|
93 |
-
|
94 |
-
and
|
95 |
-
|
96 |
-
```python
|
97 |
-
import gym
|
98 |
-
from stable_baselines3.common.vec_env import DummyVecEnv
|
99 |
-
from stable_baselines3.common.env_util import make_vec_env
|
100 |
-
|
101 |
-
from huggingface_sb3 import package_to_hub
|
102 |
-
|
103 |
-
## TODO: Define a repo_id
|
104 |
-
## repo_id is the id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2
|
105 |
-
repo_id = "rlucasz93/ppo-LunarLander-v2"
|
106 |
-
|
107 |
-
# TODO: Define the name of the environment
|
108 |
-
env_id = "LunarLander-v2"
|
109 |
-
|
110 |
-
# Create the evaluation env
|
111 |
-
eval_env = DummyVecEnv([lambda: gym.make(env_id)])
|
112 |
-
|
113 |
-
|
114 |
-
# TODO: Define the model architecture we used
|
115 |
-
model_architecture = "PPO"
|
116 |
-
|
117 |
-
## TODO: Define the commit message
|
118 |
-
commit_message = "first model upload"
|
119 |
-
|
120 |
-
# method save, evaluate, generate a model card and record a replay video of your agent before pushing the repo to the hub
|
121 |
-
package_to_hub(model=model, # Our trained model
|
122 |
-
model_name=model_name, # The name of our trained model
|
123 |
-
model_architecture=model_architecture, # The model architecture we used: in our case PPO
|
124 |
-
env_id=env_id, # Name of the environment
|
125 |
-
eval_env=eval_env, # Evaluation Environment
|
126 |
-
repo_id=repo_id, # id of the model repository from the Hugging Face Hub (repo_id = {organization}/{repo_name} for instance ThomasSimonini/ppo-LunarLander-v2
|
127 |
-
commit_message=commit_message)
|
128 |
-
|
129 |
-
# Note: if after running the package_to_hub function and it gives an issue of rebasing, please run the following code
|
130 |
-
# cd <path_to_repo> && git add . && git commit -m "Add message" && git pull
|
131 |
-
# And don't forget to do a "git push" at the end to push the change to the hub.
|
132 |
-
...
|
133 |
-
```
|
134 |
-
|
|
|
6 |
- reinforcement-learning
|
7 |
- stable-baselines3
|
8 |
model-index:
|
9 |
+
- name: ppo
|
10 |
results:
|
11 |
- task:
|
12 |
type: reinforcement-learning
|
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: 253.80 +/- 22.65
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
23 |
|
24 |
+
# **ppo** Agent playing **LunarLander-v2**
|
25 |
+
This is a trained model of a **ppo** agent playing **LunarLander-v2**
|
26 |
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
|
27 |
|
28 |
## Usage (with Stable-baselines3)
|
29 |
+
TODO: Add your code
|
30 |
|
31 |
|
32 |
```python
|
33 |
+
from stable_baselines3 import ...
|
34 |
+
from huggingface_sb3 import load_from_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
...
|
37 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7fe7eeb6e1f0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fe7eeb6e280>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fe7eeb6e310>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fe7eeb6e3a0>", "_build": "<function ActorCriticPolicy._build at 0x7fe7eeb6e430>", "forward": "<function ActorCriticPolicy.forward at 0x7fe7eeb6e4c0>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7fe7eeb6e550>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fe7eeb6e5e0>", "_predict": "<function ActorCriticPolicy._predict at 0x7fe7eeb6e670>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fe7eeb6e700>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fe7eeb6e790>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fe7eeb6e820>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc_data object at 0x7fe7eeb687e0>"}, "verbose": 1, "policy_kwargs": {}, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVnwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/5RoCksIhZSMAUOUdJRSlIwEaGlnaJRoEiiWIAAAAAAAAAAAAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAf5RoCksIhZRoFXSUUpSMDWJvdW5kZWRfYmVsb3eUaBIolggAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYIAAAAAAAAAAAAAAAAAAAAlGghSwiFlGgVdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False]", "bounded_above": "[False False False False False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.discrete.Discrete'>", ":serialized:": "gAWVggAAAAAAAACME2d5bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpRLBIwGX3NoYXBllCmMBWR0eXBllIwFbnVtcHmUaAeTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZROdWIu", "n": 4, "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1675206623555722241, "learning_rate": 0.0003, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/M6kqMFUyYYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAM3rDj3fkLk+bkh1PREcNL5i3eo8vFuqvAAAAAAAAAAAM/QrvUjvgrr1Yxk6JsQgtshv3jrZpjK5AACAPwAAgD9mIfi8SL+gukMx7jrvkak1+qCEOtgwCboAAIA/AACAP40PAz5cCgw+N3cIvi7tQr7TIka9OlRQPQAAAAAAAAAAAP/FPMNxV7rlaCK8fjrvNRlSjTudy1W1AACAPwAAgD8z5ua89mx+um0nk7rvQGW0oHuTN0zaqzkAAIA/AACAP5rhfzt7ZpS6A+7ZOuDVqjVSWEa6Xdf7uQAAgD8AAIA/zU6QvIT9qj9rihG+lJOtvpGfA71js5u9AAAAAAAAAACaUXw82jSdPpzOhr1eaT2+2kDuvC3XFD0AAAAAAAAAAObZ0b1iny0+8SWvPpFaer45PG09veKZPAAAAAAAAAAAzXglPFw3DroYnX+7T+Ivt1gTCztagZc6AACAPwAAgD9m14Y8riGkuqiJ5zmucVe1r6qAOqMOVbQAAIA/AACAP4AZfj0f5b44BFKQOyo8IziFslC6xxezuAAAgD8AAIA/Mytcu1yjOLrJoZc6oKGSNQIqhbpywrK5AACAPwAAgD/mVT49uF7zuar977kzd+2yiEyBu88iDjkAAIA/AACAP02edT3hvKC6aP00OHFlDTOAWgm5xv5PtwAAgD8AAIA/lIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVfxAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIrrfNVIj0ZECUhpRSlIwBbJRN6AOMAXSUR0CQH4IfKZDzdX2UKGgGaAloD0MITkaVYVwIYUCUhpRSlGgVTegDaBZHQJAoolByCFt1fZQoaAZoCWgPQwiRRgVONhJlQJSGlFKUaBVN6ANoFkdAkCj2O+7DmHV9lChoBmgJaA9DCCaKkLodfGBAlIaUUpRoFU3oA2gWR0CQK3NOdoWYdX2UKGgGaAloD0MI3NYWnpcmYECUhpRSlGgVTegDaBZHQJAvnBJqZc91fZQoaAZoCWgPQwjRdeEHZxJkQJSGlFKUaBVN6ANoFkdAkDGkEC/47HV9lChoBmgJaA9DCDFAogkUEGhAlIaUUpRoFU3oA2gWR0CQMmCmMwUQdX2UKGgGaAloD0MIdc3km+2hYECUhpRSlGgVTegDaBZHQJA0ZB3Roh91fZQoaAZoCWgPQwigxr35jZpgQJSGlFKUaBVN6ANoFkdAkDih1LamGnV9lChoBmgJaA9DCBk3NdB83lxAlIaUUpRoFU3oA2gWR0CQOVJlar3kdX2UKGgGaAloD0MIGArYDkbDZECUhpRSlGgVTegDaBZHQJA5cxYaHbh1fZQoaAZoCWgPQwjmriXkg2xkQJSGlFKUaBVN6ANoFkdAkDobUkOZs3V9lChoBmgJaA9DCIS9iSE5AUBAlIaUUpRoFUvVaBZHQJA6tA/s3Q51fZQoaAZoCWgPQwithsQ9lk1iQJSGlFKUaBVN6ANoFkdAkETlMdtEX3V9lChoBmgJaA9DCKPIWkOpIFxAlIaUUpRoFU3oA2gWR0CQSLWEbo8qdX2UKGgGaAloD0MItahPcofDY0CUhpRSlGgVTegDaBZHQJBJ0FPi1iR1fZQoaAZoCWgPQwjgaMcNv7pkQJSGlFKUaBVN6ANoFkdAkGZNHtnf23V9lChoBmgJaA9DCDDZeLBFcmBAlIaUUpRoFU3oA2gWR0CQddWj4593dX2UKGgGaAloD0MILxfxnRjCakCUhpRSlGgVTWIBaBZHQJB/sK/mDDl1fZQoaAZoCWgPQwjWqfI9IwtlQJSGlFKUaBVN6ANoFkdAkH/afOD8L3V9lChoBmgJaA9DCNlCkIOS1WFAlIaUUpRoFU3oA2gWR0CQgDgeA/cGdX2UKGgGaAloD0MILgCN0iX4ZkCUhpRSlGgVTegDaBZHQJCDDK2a2F51fZQoaAZoCWgPQwg3GsBboKlhQJSGlFKUaBVN6ANoFkdAkIowdn0033V9lChoBmgJaA9DCETDYtQ1MmZAlIaUUpRoFU3oA2gWR0CQiwuJDVpcdX2UKGgGaAloD0MIzc03ontfZUCUhpRSlGgVTegDaBZHQJCNbAGjbi91fZQoaAZoCWgPQwhV3o5wWrpjQJSGlFKUaBVN6ANoFkdAkJIcVDa4+nV9lChoBmgJaA9DCEfKFkk7+mFAlIaUUpRoFU3oA2gWR0CQkuDgIhQndX2UKGgGaAloD0MIu7n42x6zY0CUhpRSlGgVTegDaBZHQJCTAlJHy3F1fZQoaAZoCWgPQwjtYprpXrhiQJSGlFKUaBVN6ANoFkdAkJO9nbqQinV9lChoBmgJaA9DCD7nbtfLWGJAlIaUUpRoFU3oA2gWR0CQlF8f3evZdX2UKGgGaAloD0MIONxHbs2WZECUhpRSlGgVTegDaBZHQJCeGG/N7jV1fZQoaAZoCWgPQwiuuaP/ZYFlQJSGlFKUaBVN6ANoFkdAkKF5b2USqXV9lChoBmgJaA9DCGe0VUlkfWJAlIaUUpRoFU3oA2gWR0CQoiskpqh2dX2UKGgGaAloD0MIM/s8Rvn6aECUhpRSlGgVTegDaBZHQJDH+ZuyeI51fZQoaAZoCWgPQwhV2uIaH71iQJSGlFKUaBVN6ANoFkdAkNGJNKyv93V9lChoBmgJaA9DCO3WMhmOqmdAlIaUUpRoFU3oA2gWR0CQ0bE3sHB2dX2UKGgGaAloD0MIb7n6sUmpY0CUhpRSlGgVTegDaBZHQJDSBpmEoOR1fZQoaAZoCWgPQwhZTdcT3YJiQJSGlFKUaBVN6ANoFkdAkNS6MrEtNHV9lChoBmgJaA9DCOVGkbUGVGBAlIaUUpRoFU3oA2gWR0CQ28KtxMnJdX2UKGgGaAloD0MIbwwBwDG+YUCUhpRSlGgVTegDaBZHQJDcsIIF/x51fZQoaAZoCWgPQwjV6UDWU51nQJSGlFKUaBVN6ANoFkdAkN8xC+lCTnV9lChoBmgJaA9DCJVIopdRjDpAlIaUUpRoFU0FAWgWR0CQ4pFUhmoSdX2UKGgGaAloD0MI+62dKIkQYkCUhpRSlGgVTegDaBZHQJDkEWdmQKd1fZQoaAZoCWgPQwizlgLS/qxmQJSGlFKUaBVN6ANoFkdAkOTIR28qWnV9lChoBmgJaA9DCGMOgo5WLWFAlIaUUpRoFU3oA2gWR0CQ5OhStNi6dX2UKGgGaAloD0MIBrggWxZaY0CUhpRSlGgVTegDaBZHQJDlj8wYced1fZQoaAZoCWgPQwjg2omSEB5kQJSGlFKUaBVN6ANoFkdAkOYmszVMEnV9lChoBmgJaA9DCKs/wjBgnF5AlIaUUpRoFU3oA2gWR0CQ8EBPsRg7dX2UKGgGaAloD0MILj2a6sm6XkCUhpRSlGgVTegDaBZHQJD0Gzw+dLB1fZQoaAZoCWgPQwhuowG8hZNiQJSGlFKUaBVN6ANoFkdAkPTlvhqCYnV9lChoBmgJaA9DCB5ssdvnXWZAlIaUUpRoFU3oA2gWR0CRHEoSL61tdX2UKGgGaAloD0MIiiDOw4nWZUCUhpRSlGgVTegDaBZHQJEmAgB91EF1fZQoaAZoCWgPQwjuJCL8i/ljQJSGlFKUaBVN6ANoFkdAkSYrm+0w8HV9lChoBmgJaA9DCINStHKvmGFAlIaUUpRoFU3oA2gWR0CRKTKfFrEcdX2UKGgGaAloD0MI8u1dg76wY0CUhpRSlGgVTegDaBZHQJEv6pwS8J51fZQoaAZoCWgPQwisPIGwU/ZhQJSGlFKUaBVN6ANoFkdAkTDJqM3qA3V9lChoBmgJaA9DCC6qRUQxBWVAlIaUUpRoFU3oA2gWR0CRMzg6EJ0GdX2UKGgGaAloD0MIe4SaIVU6YECUhpRSlGgVTegDaBZHQJE2cqtozvZ1fZQoaAZoCWgPQwg1mfG2UpdgQJSGlFKUaBVN6ANoFkdAkTfelKsdUHV9lChoBmgJaA9DCCHM7V7uul9AlIaUUpRoFU3oA2gWR0CROJpqREF4dX2UKGgGaAloD0MISmHe40yzYECUhpRSlGgVTegDaBZHQJE4unNxEOR1fZQoaAZoCWgPQwhKmj+mtR1lQJSGlFKUaBVN6ANoFkdAkTlrdznzQXV9lChoBmgJaA9DCDKR0mweMWRAlIaUUpRoFU3oA2gWR0CROgiqABkqdX2UKGgGaAloD0MIFa3cC8zrZECUhpRSlGgVTegDaBZHQJFE9f+jua51fZQoaAZoCWgPQwhCzCVV25VeQJSGlFKUaBVN6ANoFkdAkUj36MzdlHV9lChoBmgJaA9DCOlJmdTQ92RAlIaUUpRoFU3oA2gWR0CRScpJPIn0dX2UKGgGaAloD0MIol7waU5GbUCUhpRSlGgVTT4CaBZHQJFngNjLB9F1fZQoaAZoCWgPQwid8X1xqVRaQJSGlFKUaBVN6ANoFkdAkXIjx0+1SnV9lChoBmgJaA9DCPD5YYRw+2ZAlIaUUpRoFU3oA2gWR0CRe75JK8L8dX2UKGgGaAloD0MI0vvG1x4vZECUhpRSlGgVTegDaBZHQJF77Cm/Fit1fZQoaAZoCWgPQwgK3LqbpyNiQJSGlFKUaBVN6ANoFkdAkX7+mFaje3V9lChoBmgJaA9DCIZUUbxKFGFAlIaUUpRoFU3oA2gWR0CRhyT5ftx/dX2UKGgGaAloD0MIumddo2WmYkCUhpRSlGgVTegDaBZHQJGJ6BUaQ3h1fZQoaAZoCWgPQwhBR6taUvpiQJSGlFKUaBVN6ANoFkdAkY2r39JjD3V9lChoBmgJaA9DCGx3D9B9Z2JAlIaUUpRoFU3oA2gWR0CRjyi+cpb2dX2UKGgGaAloD0MIvY44ZAN5ZkCUhpRSlGgVTegDaBZHQJGQBdLQHA11fZQoaAZoCWgPQwhLPQtC+dxjQJSGlFKUaBVN6ANoFkdAkZAoTwlSj3V9lChoBmgJaA9DCM5Q3PEm111AlIaUUpRoFU3oA2gWR0CRkOEK3NLUdX2UKGgGaAloD0MIAWiULn1XYUCUhpRSlGgVTegDaBZHQJGReEZiuuB1fZQoaAZoCWgPQwjcLckBO2dlQJSGlFKUaBVN6ANoFkdAkZuzfvWpZXV9lChoBmgJaA9DCETbMXVXLkJAlIaUUpRoFUvoaBZHQJGezZYgaFV1fZQoaAZoCWgPQwh0t+ulKYhkQJSGlFKUaBVN6ANoFkdAkZ+NLlFMI3V9lChoBmgJaA9DCC7lfLF3HmVAlIaUUpRoFU3oA2gWR0CRoFeaa1CxdX2UKGgGaAloD0MIgsr49xkBX0CUhpRSlGgVTegDaBZHQJG8B3LV4HJ1fZQoaAZoCWgPQwgVjiCV4oJgQJSGlFKUaBVN6ANoFkdAkcVYGdI5HXV9lChoBmgJaA9DCA1xrIvbpWBAlIaUUpRoFU3oA2gWR0CRzqE8JUo8dX2UKGgGaAloD0MI0uRiDCywZkCUhpRSlGgVTegDaBZHQJHOx2ZAprl1fZQoaAZoCWgPQwiR0mweh1dfQJSGlFKUaBVN6ANoFkdAkdH9XYDkl3V9lChoBmgJaA9DCIUmiSVlx25AlIaUUpRoFU08A2gWR0CR1zgvDgqFdX2UKGgGaAloD0MIfH+D9uqTYECUhpRSlGgVTegDaBZHQJHZjmp2ll91fZQoaAZoCWgPQwiCcXDpmGhkQJSGlFKUaBVN6ANoFkdAkdvPNA1NxnV9lChoBmgJaA9DCBssnKT5aGVAlIaUUpRoFU3oA2gWR0CR3u+b3Gn5dX2UKGgGaAloD0MIUADFyBKnaECUhpRSlGgVTegDaBZHQJHgNcIJJGx1fZQoaAZoCWgPQwhMGTigpTNmQJSGlFKUaBVN6ANoFkdAkeD9CZ4Oc3V9lChoBmgJaA9DCNOgaB7AyWJAlIaUUpRoFU3oA2gWR0CR4aTg2qDLdX2UKGgGaAloD0MI9UpZhriLZ0CUhpRSlGgVTegDaBZHQJHsPvc8DCB1fZQoaAZoCWgPQwhN2H4yRiFlQJSGlFKUaBVN6ANoFkdAke8IOtnwonV9lChoBmgJaA9DCBdH5Sbqw2BAlIaUUpRoFU3oA2gWR0CR77yyUs4DdX2UKGgGaAloD0MITaJe8GnOXUCUhpRSlGgVTegDaBZHQJHwdzIV/MJ1fZQoaAZoCWgPQwigFoOH6XJkQJSGlFKUaBVN6ANoFkdAkfiq/mDDj3VlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/yZmZmZmZmoWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "system_info": {"OS": "Linux-5.10.147+-x86_64-with-glibc2.29 # 1 SMP Sat Dec 10 16:00:40 UTC 2022", "Python": "3.8.10", "Stable-Baselines3": "1.7.0", "PyTorch": "1.13.1+cu116", "GPU Enabled": "True", "Numpy": "1.21.6", "Gym": "0.21.0"}}
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7ff2ee0dd5a0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7ff2ee0dd630>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7ff2ee0dd6c0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7ff2ee0dd750>", "_build": "<function ActorCriticPolicy._build at 0x7ff2ee0dd7e0>", "forward": "<function ActorCriticPolicy.forward at 0x7ff2ee0dd870>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7ff2ee0dd900>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7ff2ee0dd990>", "_predict": "<function ActorCriticPolicy._predict at 0x7ff2ee0dda20>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7ff2ee0ddab0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7ff2ee0ddb40>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7ff2ee0ddbd0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7ff2ee0e7cc0>"}, "verbose": 1, "policy_kwargs": {}, "num_timesteps": 1015808, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1684361343558260766, "learning_rate": 0.0003, "tensorboard_log": null, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAE3BNb32/Hi6dRQwua9mOrRHAuQ6g5xNOAAAgD8AAIA/zUyFOeHsiLoiA3I8R51AtrsjozqJ0DW1AACAPwAAgD/Nmww9ezKYugXX6TnqsBA1lx5ROn9kBrkAAIA/AACAP5qWvrzDoXK6Wh7GOxZCVzguY8+62pqHuAAAgD8AAIA/ZtaXuvY0LLrrimu7RmBiNdxYhLhK58y0AACAPwAAgD8zi8284RCVugoJRDjgfzwzxgtbOhjlYrcAAIA/AACAP5qhUTsUaKK6Fe2fO+tHHzifFo86ZV09uAAAgD8AAIA/zcJHvFz3dLqz50k7kQwZNRpHHLuYS2a6AACAPwAAgD8A6K47KZgnus1/EDwlS/s10RSRutbf9TQAAIA/AACAP5o9fD0pREi6h0M/utrSnLVI4RM7Ri5fOQAAgD8AAIA/M4Meu0gPh7oVNeU5QszkNBU8SzvyXwW5AACAPwAAgD89bJA+WGthP6gfSz4QTNa+dsqNPj3GXr0AAAAAAAAAAJqRMLzhXJ267ihiOUZ5VTRLcGg23oSCuAAAgD8AAIA/jR6+Pfa8CrrnLDg4hwDNM34JOTvzeVe3AACAPwAAgD8AwkI9wwlrum9aGjnN59Az/XFYu0pdNLgAAIA/AACAP0A2rj24Ps65vnR/OYHAizRH1ik7EHSWuAAAgD8AAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVgwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSxCFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": -0.015808000000000044, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVQwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGZYgW8AaNyMAWyUTegDjAF0lEdAkcOAMtsen3V9lChoBkdAZDAn1FpfyGgHTegDaAhHQJHELjaPCEZ1fZQoaAZHQGElnPmgam5oB03oA2gIR0CR3lpeNT99dX2UKGgGR0Bkb6KxcE/0aAdN6ANoCEdAkeBokRjBmHV9lChoBkdAZdCD3/Pw/mgHTegDaAhHQJHlhW7voeR1fZQoaAZHQGKP1FQVKwpoB03oA2gIR0CR5rnrY5DJdX2UKGgGR0BmxPuVopQUaAdN6ANoCEdAkekAZCOWB3V9lChoBkdAYltEFW4mTmgHTegDaAhHQJHsOCmMwUR1fZQoaAZHQGVtTQmeDnNoB03oA2gIR0CR7zDCgsbvdX2UKGgGR0BmeqKxcE/0aAdN6ANoCEdAkfEf7aZhKHV9lChoBkdAZzVcMVk+YGgHTegDaAhHQJHxrKnvUjN1fZQoaAZHQGWaCqQzUI9oB03oA2gIR0CR9fALiMo+dX2UKGgGR0BjSvkaMrEtaAdN6ANoCEdAkgEcDwH7g3V9lChoBkdAYZoY4yXUpmgHTegDaAhHQJIHeRcNYr91fZQoaAZHQGK+qJuVHFxoB03oA2gIR0CSEc8uSOindX2UKGgGR0Bh5DDl5nlGaAdN6ANoCEdAkhNaL0jC53V9lChoBkdAYuaRJVbRnmgHTegDaAhHQJITWCBf8dh1fZQoaAZHQGWjd6LOzIFoB03oA2gIR0CSFAlDneSCdX2UKGgGR0BegIkNWluWaAdN6ANoCEdAkjJk5p8F6nV9lChoBkdAZja/gR9PUWgHTegDaAhHQJIz+6asp5N1fZQoaAZHQGFJr3bmEGtoB03oA2gIR0CSN7iV0Lc9dX2UKGgGR0BknpllK9PDaAdN6ANoCEdAkjihxYJVsHV9lChoBkdAYzxtVJcxCmgHTegDaAhHQJI6Z1s+FDh1fZQoaAZHQGCDxfWtlqdoB03oA2gIR0CSPNuGKyfMdX2UKGgGR0BkIhr56+nJaAdN6ANoCEdAkj/P1QIldHV9lChoBkdAZjVD8cdYGWgHTegDaAhHQJJCGoS+QEJ1fZQoaAZHQGM+00m+j/NoB03oA2gIR0CSQsFcpsoEdX2UKGgGR0Bjkq9CeEqUaAdN6ANoCEdAkkegOSW7e3V9lChoBkdAX+sz3yqdYmgHTegDaAhHQJJVoALiMpB1fZQoaAZHQGVWkVFhG6RoB03oA2gIR0CSXsbzbvgFdX2UKGgGR0Bfbv8AJb+taAdN6ANoCEdAkmoMCT2WZHV9lChoBkdAY97kS26TXGgHTegDaAhHQJJrjkgfU4J1fZQoaAZHQGOGLfcer+5oB03oA2gIR0CSa4x8D0UXdX2UKGgGR0BlSljG1hLHaAdN6ANoCEdAkmxAZKnNxHV9lChoBkdAYvmSXdCVr2gHTegDaAhHQJKFw0vXbud1fZQoaAZHQGYnkIgNgBtoB03oA2gIR0CSh2H5JsfrdX2UKGgGR0BkOYDeTFERaAdN6ANoCEdAkouJcHGCI3V9lChoBkdAYRspqASWaGgHTegDaAhHQJKM7iHZbpx1fZQoaAZHQGdzeSSvC/JoB03oA2gIR0CSj0LhJiAldX2UKGgGR0BdJybx3FDOaAdN6ANoCEdAkpJjzAeq73V9lChoBkdAaCJKzRhMJ2gHTegDaAhHQJKWiOOsDGN1fZQoaAZHQGBoKr7wazhoB03oA2gIR0CSma4h2W6cdX2UKGgGR0BksjBVMmF8aAdN6ANoCEdAkpqpEDyOJnV9lChoBkdAYubjurp7kWgHTegDaAhHQJKh23LFGXp1fZQoaAZHQGZQyYPXkHVoB03oA2gIR0CSryc9GI9DdX2UKGgGR0BevQkxASnMaAdN6ANoCEdAkrSnr2QGOnV9lChoBkdAaCEcNH6MzmgHTegDaAhHQJK9/lHSWqt1fZQoaAZHQGS5scp9ZzRoB03oA2gIR0CSv2rtmcvvdX2UKGgGR0Bj4q1Vo6CEaAdN6ANoCEdAkr9oWLxZuHV9lChoBkdAaEQ4gA6uGWgHTegDaAhHQJLAE10knkV1fZQoaAZHQGTz6QFLWZtoB03oA2gIR0CSyLA8SwnqdX2UKGgGR0BhU89wFTvRaAdN6ANoCEdAkt98xKxs23V9lChoBkdAXZl44ZMtb2gHTegDaAhHQJLjHUb1h9d1fZQoaAZHQGWrWK/EfkpoB03oA2gIR0CS5APi1iOOdX2UKGgGR0BlbdlRP421aAdN6ANoCEdAkuW4ZdfLLnV9lChoBkdAY64XYUWVNmgHTegDaAhHQJLoMA0bcXZ1fZQoaAZHQHBfF6/qPfdoB02WA2gIR0CS6H3QD3dsdX2UKGgGR0BlrMm4RVZLaAdN6ANoCEdAkurMMy8BdXV9lChoBkdAYdmqebutwWgHTegDaAhHQJLtT6dlNDd1fZQoaAZHQGWQdXko4MpoB03oA2gIR0CS8Z8+A3DOdX2UKGgGR0BmGLmQr+YMaAdN6ANoCEdAkvxJU5uIh3V9lChoBkdAZPY2ETQE6mgHTegDaAhHQJMB6Wkadc11fZQoaAZHQGKzyuhbnoxoB03oA2gIR0CTDVguh9LIdX2UKGgGR0BmGXlKbrkbaAdN6ANoCEdAkw88h1Tzd3V9lChoBkdAYordrO7g9GgHTegDaAhHQJMPPNSqEOB1fZQoaAZHQGjH6QNkOI9oB03oA2gIR0CTEDV8kUsWdX2UKGgGR0Blh6o60Y0maAdN6ANoCEdAkxqob83uNXV9lChoBkdAZni9B8hLXmgHTegDaAhHQJMb6/oJRfp1fZQoaAZHQGDpZLqUu+RoB03oA2gIR0CTLvfNzKcNdX2UKGgGR0BknycoYvWZaAdN6ANoCEdAky+1sDW9UXV9lChoBkdAZN0eCCjDbmgHTegDaAhHQJMxEFY+0PZ1fZQoaAZHQGOXlwT/Q0JoB03oA2gIR0CTMwM495hSdX2UKGgGR0Bkit/H5rP/aAdN6ANoCEdAkzNDxPO6d3V9lChoBkdAQTNWyTpxFWgHS9ZoCEdAkzRwhGH58HV9lChoBkdAYuFOnl4keWgHTegDaAhHQJM1M4ku6Et1fZQoaAZHQGMK25Yoy9FoB03oA2gIR0CTN1IP9UCJdX2UKGgGR0BgkobdadMCaAdN6ANoCEdAkzsPgFX7tXV9lChoBkdAYnRLxqfvnmgHTegDaAhHQJNHrkNnXd11fZQoaAZHQGHVRMN+b3JoB03oA2gIR0CTUFgk1MufdX2UKGgGR0BmXCreZXuFaAdN6ANoCEdAk1rMz2vjfnV9lChoBkdAYpvy4nWrfmgHTegDaAhHQJNcUsOG0u11fZQoaAZHQGd13gk1MuhoB03oA2gIR0CTXFDFId2gdX2UKGgGR0BkAQKKHfuUaAdN6ANoCEdAk10NX1anrXV9lChoBkdAZYE7g88s+WgHTegDaAhHQJNoL+uNgjR1fZQoaAZHQGUJz7l7tzFoB03oA2gIR0CTfTF6AvtddX2UKGgGR0Bmd2fwqiGnaAdN6ANoCEdAk36ltO2y9nV9lChoBkdAYc2Z88cMmWgHTegDaAhHQJOBISh8IAx1fZQoaAZHQGHUvJaJQ+FoB03oA2gIR0CThJ7N0NjLdX2UKGgGR0Bj3RUBGQS0aAdN6ANoCEdAk4UU0SAYpHV9lChoBkdAZ8TlS0jTrmgHTegDaAhHQJOG/CdjG1h1fZQoaAZHQGGLXu/k/8loB03oA2gIR0CTiBybQTmGdX2UKGgGR0BtfBx5s0pFaAdNnAFoCEdAk4lKUzKs+3V9lChoBkdAZqQ+Sr5qM2gHTegDaAhHQJOLQUDdP+J1fZQoaAZHQGJHiBPKuCBoB03oA2gIR0CTj6b2USqVdX2UKGgGR0BhaM6ij+JhaAdN6ANoCEdAk5qWYfGMoHV9lChoBkdAYqXTgl4TsmgHTegDaAhHQJOguji4rjJ1fZQoaAZHQGOx3FtKqXFoB03oA2gIR0CTqs78ejmCdX2UKGgGR0Bk/+ki2UjcaAdN6ANoCEdAk6xTm0VrRHV9lChoBkdAZ5W2nbZezGgHTegDaAhHQJOsUWYWtU51fZQoaAZHQGPuKhlDneVoB03oA2gIR0CTukMyrPt2dWUu"}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 248, "observation_space": {":type:": "<class 'gymnasium.spaces.box.Box'>", ":serialized:": "gAWVcAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoB4wCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoECiWCAAAAAAAAAABAQEBAQEBAZRoFEsIhZRoGHSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBAoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaApLCIWUaBh0lFKUjARoaWdolGgQKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgKSwiFlGgYdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_shape": [8], "low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]", "high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]", "_np_random": null}, "action_space": {":type:": "<class 'gymnasium.spaces.discrete.Discrete'>", ":serialized:": "gAWV1QAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCloCmgOjApfbnBfcmFuZG9tlE51Yi4=", "n": "4", "start": "0", "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 16, "n_steps": 1024, "gamma": 0.999, "gae_lambda": 0.98, "ent_coef": 0.01, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 64, "n_epochs": 4, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "system_info": {"OS": "Linux-5.15.107+-x86_64-with-glibc2.31 # 1 SMP Sat Apr 29 09:15:28 UTC 2023", "Python": "3.10.11", "Stable-Baselines3": "2.0.0a5", "PyTorch": "2.0.0+cu118", "GPU Enabled": "True", "Numpy": "1.22.4", "Cloudpickle": "2.2.1", "Gymnasium": "0.28.1", "OpenAI Gym": "0.25.2"}}
|
ppo-LunarLander-v2.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a79ac3a38020fa4b5ac548b67c3814530de349ca4d21717d4f267d09f327a404
|
3 |
+
size 146755
|
ppo-LunarLander-v2/_stable_baselines3_version
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
2.0.0a5
|
ppo-LunarLander-v2/data
CHANGED
@@ -4,60 +4,34 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
24 |
-
"observation_space": {
|
25 |
-
":type:": "<class 'gym.spaces.box.Box'>",
|
26 |
-
":serialized:": "gAWVnwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/5RoCksIhZSMAUOUdJRSlIwEaGlnaJRoEiiWIAAAAAAAAAAAAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAf5RoCksIhZRoFXSUUpSMDWJvdW5kZWRfYmVsb3eUaBIolggAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYIAAAAAAAAAAAAAAAAAAAAlGghSwiFlGgVdJRSlIwKX25wX3JhbmRvbZROdWIu",
|
27 |
-
"dtype": "float32",
|
28 |
-
"_shape": [
|
29 |
-
8
|
30 |
-
],
|
31 |
-
"low": "[-inf -inf -inf -inf -inf -inf -inf -inf]",
|
32 |
-
"high": "[inf inf inf inf inf inf inf inf]",
|
33 |
-
"bounded_below": "[False False False False False False False False]",
|
34 |
-
"bounded_above": "[False False False False False False False False]",
|
35 |
-
"_np_random": null
|
36 |
-
},
|
37 |
-
"action_space": {
|
38 |
-
":type:": "<class 'gym.spaces.discrete.Discrete'>",
|
39 |
-
":serialized:": "gAWVggAAAAAAAACME2d5bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpRLBIwGX3NoYXBllCmMBWR0eXBllIwFbnVtcHmUaAeTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZROdWIu",
|
40 |
-
"n": 4,
|
41 |
-
"_shape": [],
|
42 |
-
"dtype": "int64",
|
43 |
-
"_np_random": null
|
44 |
-
},
|
45 |
-
"n_envs": 16,
|
46 |
"num_timesteps": 1015808,
|
47 |
"_total_timesteps": 1000000,
|
48 |
"_num_timesteps_at_start": 0,
|
49 |
"seed": null,
|
50 |
"action_noise": null,
|
51 |
-
"start_time":
|
52 |
"learning_rate": 0.0003,
|
53 |
"tensorboard_log": null,
|
54 |
-
"lr_schedule": {
|
55 |
-
":type:": "<class 'function'>",
|
56 |
-
":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOC9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/M6kqMFUyYYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
57 |
-
},
|
58 |
"_last_obs": {
|
59 |
":type:": "<class 'numpy.ndarray'>",
|
60 |
-
":serialized:": "
|
61 |
},
|
62 |
"_last_episode_starts": {
|
63 |
":type:": "<class 'numpy.ndarray'>",
|
@@ -68,15 +42,41 @@
|
|
68 |
"use_sde": false,
|
69 |
"sde_sample_freq": -1,
|
70 |
"_current_progress_remaining": -0.015808000000000044,
|
|
|
71 |
"ep_info_buffer": {
|
72 |
":type:": "<class 'collections.deque'>",
|
73 |
-
":serialized:": "
|
74 |
},
|
75 |
"ep_success_buffer": {
|
76 |
":type:": "<class 'collections.deque'>",
|
77 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
78 |
},
|
79 |
"_n_updates": 248,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
"n_steps": 1024,
|
81 |
"gamma": 0.999,
|
82 |
"gae_lambda": 0.98,
|
@@ -87,9 +87,13 @@
|
|
87 |
"n_epochs": 4,
|
88 |
"clip_range": {
|
89 |
":type:": "<class 'function'>",
|
90 |
-
":serialized:": "
|
91 |
},
|
92 |
"clip_range_vf": null,
|
93 |
"normalize_advantage": true,
|
94 |
-
"target_kl": null
|
|
|
|
|
|
|
|
|
95 |
}
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x7ff2ee0dd5a0>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7ff2ee0dd630>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7ff2ee0dd6c0>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7ff2ee0dd750>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x7ff2ee0dd7e0>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x7ff2ee0dd870>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x7ff2ee0dd900>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7ff2ee0dd990>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x7ff2ee0dda20>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7ff2ee0ddab0>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7ff2ee0ddb40>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x7ff2ee0ddbd0>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc._abc_data object at 0x7ff2ee0e7cc0>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
"num_timesteps": 1015808,
|
25 |
"_total_timesteps": 1000000,
|
26 |
"_num_timesteps_at_start": 0,
|
27 |
"seed": null,
|
28 |
"action_noise": null,
|
29 |
+
"start_time": 1684361343558260766,
|
30 |
"learning_rate": 0.0003,
|
31 |
"tensorboard_log": null,
|
|
|
|
|
|
|
|
|
32 |
"_last_obs": {
|
33 |
":type:": "<class 'numpy.ndarray'>",
|
34 |
+
":serialized:": "gAWVdQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYAAgAAAAAAAE3BNb32/Hi6dRQwua9mOrRHAuQ6g5xNOAAAgD8AAIA/zUyFOeHsiLoiA3I8R51AtrsjozqJ0DW1AACAPwAAgD/Nmww9ezKYugXX6TnqsBA1lx5ROn9kBrkAAIA/AACAP5qWvrzDoXK6Wh7GOxZCVzguY8+62pqHuAAAgD8AAIA/ZtaXuvY0LLrrimu7RmBiNdxYhLhK58y0AACAPwAAgD8zi8284RCVugoJRDjgfzwzxgtbOhjlYrcAAIA/AACAP5qhUTsUaKK6Fe2fO+tHHzifFo86ZV09uAAAgD8AAIA/zcJHvFz3dLqz50k7kQwZNRpHHLuYS2a6AACAPwAAgD8A6K47KZgnus1/EDwlS/s10RSRutbf9TQAAIA/AACAP5o9fD0pREi6h0M/utrSnLVI4RM7Ri5fOQAAgD8AAIA/M4Meu0gPh7oVNeU5QszkNBU8SzvyXwW5AACAPwAAgD89bJA+WGthP6gfSz4QTNa+dsqNPj3GXr0AAAAAAAAAAJqRMLzhXJ267ihiOUZ5VTRLcGg23oSCuAAAgD8AAIA/jR6+Pfa8CrrnLDg4hwDNM34JOTvzeVe3AACAPwAAgD8AwkI9wwlrum9aGjnN59Az/XFYu0pdNLgAAIA/AACAP0A2rj24Ps65vnR/OYHAizRH1ik7EHSWuAAAgD8AAAAAlIwFbnVtcHmUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYksQSwiGlIwBQ5R0lFKULg=="
|
35 |
},
|
36 |
"_last_episode_starts": {
|
37 |
":type:": "<class 'numpy.ndarray'>",
|
|
|
42 |
"use_sde": false,
|
43 |
"sde_sample_freq": -1,
|
44 |
"_current_progress_remaining": -0.015808000000000044,
|
45 |
+
"_stats_window_size": 100,
|
46 |
"ep_info_buffer": {
|
47 |
":type:": "<class 'collections.deque'>",
|
48 |
+
":serialized:": "gAWVQwwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQGZYgW8AaNyMAWyUTegDjAF0lEdAkcOAMtsen3V9lChoBkdAZDAn1FpfyGgHTegDaAhHQJHELjaPCEZ1fZQoaAZHQGElnPmgam5oB03oA2gIR0CR3lpeNT99dX2UKGgGR0Bkb6KxcE/0aAdN6ANoCEdAkeBokRjBmHV9lChoBkdAZdCD3/Pw/mgHTegDaAhHQJHlhW7voeR1fZQoaAZHQGKP1FQVKwpoB03oA2gIR0CR5rnrY5DJdX2UKGgGR0BmxPuVopQUaAdN6ANoCEdAkekAZCOWB3V9lChoBkdAYltEFW4mTmgHTegDaAhHQJHsOCmMwUR1fZQoaAZHQGVtTQmeDnNoB03oA2gIR0CR7zDCgsbvdX2UKGgGR0BmeqKxcE/0aAdN6ANoCEdAkfEf7aZhKHV9lChoBkdAZzVcMVk+YGgHTegDaAhHQJHxrKnvUjN1fZQoaAZHQGWaCqQzUI9oB03oA2gIR0CR9fALiMo+dX2UKGgGR0BjSvkaMrEtaAdN6ANoCEdAkgEcDwH7g3V9lChoBkdAYZoY4yXUpmgHTegDaAhHQJIHeRcNYr91fZQoaAZHQGK+qJuVHFxoB03oA2gIR0CSEc8uSOindX2UKGgGR0Bh5DDl5nlGaAdN6ANoCEdAkhNaL0jC53V9lChoBkdAYuaRJVbRnmgHTegDaAhHQJITWCBf8dh1fZQoaAZHQGWjd6LOzIFoB03oA2gIR0CSFAlDneSCdX2UKGgGR0BegIkNWluWaAdN6ANoCEdAkjJk5p8F6nV9lChoBkdAZja/gR9PUWgHTegDaAhHQJIz+6asp5N1fZQoaAZHQGFJr3bmEGtoB03oA2gIR0CSN7iV0Lc9dX2UKGgGR0BknpllK9PDaAdN6ANoCEdAkjihxYJVsHV9lChoBkdAYzxtVJcxCmgHTegDaAhHQJI6Z1s+FDh1fZQoaAZHQGCDxfWtlqdoB03oA2gIR0CSPNuGKyfMdX2UKGgGR0BkIhr56+nJaAdN6ANoCEdAkj/P1QIldHV9lChoBkdAZjVD8cdYGWgHTegDaAhHQJJCGoS+QEJ1fZQoaAZHQGM+00m+j/NoB03oA2gIR0CSQsFcpsoEdX2UKGgGR0Bjkq9CeEqUaAdN6ANoCEdAkkegOSW7e3V9lChoBkdAX+sz3yqdYmgHTegDaAhHQJJVoALiMpB1fZQoaAZHQGVWkVFhG6RoB03oA2gIR0CSXsbzbvgFdX2UKGgGR0Bfbv8AJb+taAdN6ANoCEdAkmoMCT2WZHV9lChoBkdAY97kS26TXGgHTegDaAhHQJJrjkgfU4J1fZQoaAZHQGOGLfcer+5oB03oA2gIR0CSa4x8D0UXdX2UKGgGR0BlSljG1hLHaAdN6ANoCEdAkmxAZKnNxHV9lChoBkdAYvmSXdCVr2gHTegDaAhHQJKFw0vXbud1fZQoaAZHQGYnkIgNgBtoB03oA2gIR0CSh2H5JsfrdX2UKGgGR0BkOYDeTFERaAdN6ANoCEdAkouJcHGCI3V9lChoBkdAYRspqASWaGgHTegDaAhHQJKM7iHZbpx1fZQoaAZHQGdzeSSvC/JoB03oA2gIR0CSj0LhJiAldX2UKGgGR0BdJybx3FDOaAdN6ANoCEdAkpJjzAeq73V9lChoBkdAaCJKzRhMJ2gHTegDaAhHQJKWiOOsDGN1fZQoaAZHQGBoKr7wazhoB03oA2gIR0CSma4h2W6cdX2UKGgGR0BksjBVMmF8aAdN6ANoCEdAkpqpEDyOJnV9lChoBkdAYubjurp7kWgHTegDaAhHQJKh23LFGXp1fZQoaAZHQGZQyYPXkHVoB03oA2gIR0CSryc9GI9DdX2UKGgGR0BevQkxASnMaAdN6ANoCEdAkrSnr2QGOnV9lChoBkdAaCEcNH6MzmgHTegDaAhHQJK9/lHSWqt1fZQoaAZHQGS5scp9ZzRoB03oA2gIR0CSv2rtmcvvdX2UKGgGR0Bj4q1Vo6CEaAdN6ANoCEdAkr9oWLxZuHV9lChoBkdAaEQ4gA6uGWgHTegDaAhHQJLAE10knkV1fZQoaAZHQGTz6QFLWZtoB03oA2gIR0CSyLA8SwnqdX2UKGgGR0BhU89wFTvRaAdN6ANoCEdAkt98xKxs23V9lChoBkdAXZl44ZMtb2gHTegDaAhHQJLjHUb1h9d1fZQoaAZHQGWrWK/EfkpoB03oA2gIR0CS5APi1iOOdX2UKGgGR0BlbdlRP421aAdN6ANoCEdAkuW4ZdfLLnV9lChoBkdAY64XYUWVNmgHTegDaAhHQJLoMA0bcXZ1fZQoaAZHQHBfF6/qPfdoB02WA2gIR0CS6H3QD3dsdX2UKGgGR0BlrMm4RVZLaAdN6ANoCEdAkurMMy8BdXV9lChoBkdAYdmqebutwWgHTegDaAhHQJLtT6dlNDd1fZQoaAZHQGWQdXko4MpoB03oA2gIR0CS8Z8+A3DOdX2UKGgGR0BmGLmQr+YMaAdN6ANoCEdAkvxJU5uIh3V9lChoBkdAZPY2ETQE6mgHTegDaAhHQJMB6Wkadc11fZQoaAZHQGKzyuhbnoxoB03oA2gIR0CTDVguh9LIdX2UKGgGR0BmGXlKbrkbaAdN6ANoCEdAkw88h1Tzd3V9lChoBkdAYordrO7g9GgHTegDaAhHQJMPPNSqEOB1fZQoaAZHQGjH6QNkOI9oB03oA2gIR0CTEDV8kUsWdX2UKGgGR0Blh6o60Y0maAdN6ANoCEdAkxqob83uNXV9lChoBkdAZni9B8hLXmgHTegDaAhHQJMb6/oJRfp1fZQoaAZHQGDpZLqUu+RoB03oA2gIR0CTLvfNzKcNdX2UKGgGR0BknycoYvWZaAdN6ANoCEdAky+1sDW9UXV9lChoBkdAZN0eCCjDbmgHTegDaAhHQJMxEFY+0PZ1fZQoaAZHQGOXlwT/Q0JoB03oA2gIR0CTMwM495hSdX2UKGgGR0Bkit/H5rP/aAdN6ANoCEdAkzNDxPO6d3V9lChoBkdAQTNWyTpxFWgHS9ZoCEdAkzRwhGH58HV9lChoBkdAYuFOnl4keWgHTegDaAhHQJM1M4ku6Et1fZQoaAZHQGMK25Yoy9FoB03oA2gIR0CTN1IP9UCJdX2UKGgGR0BgkobdadMCaAdN6ANoCEdAkzsPgFX7tXV9lChoBkdAYnRLxqfvnmgHTegDaAhHQJNHrkNnXd11fZQoaAZHQGHVRMN+b3JoB03oA2gIR0CTUFgk1MufdX2UKGgGR0BmXCreZXuFaAdN6ANoCEdAk1rMz2vjfnV9lChoBkdAYpvy4nWrfmgHTegDaAhHQJNcUsOG0u11fZQoaAZHQGd13gk1MuhoB03oA2gIR0CTXFDFId2gdX2UKGgGR0BkAQKKHfuUaAdN6ANoCEdAk10NX1anrXV9lChoBkdAZYE7g88s+WgHTegDaAhHQJNoL+uNgjR1fZQoaAZHQGUJz7l7tzFoB03oA2gIR0CTfTF6AvtddX2UKGgGR0Bmd2fwqiGnaAdN6ANoCEdAk36ltO2y9nV9lChoBkdAYc2Z88cMmWgHTegDaAhHQJOBISh8IAx1fZQoaAZHQGHUvJaJQ+FoB03oA2gIR0CThJ7N0NjLdX2UKGgGR0Bj3RUBGQS0aAdN6ANoCEdAk4UU0SAYpHV9lChoBkdAZ8TlS0jTrmgHTegDaAhHQJOG/CdjG1h1fZQoaAZHQGGLXu/k/8loB03oA2gIR0CTiBybQTmGdX2UKGgGR0BtfBx5s0pFaAdNnAFoCEdAk4lKUzKs+3V9lChoBkdAZqQ+Sr5qM2gHTegDaAhHQJOLQUDdP+J1fZQoaAZHQGJHiBPKuCBoB03oA2gIR0CTj6b2USqVdX2UKGgGR0BhaM6ij+JhaAdN6ANoCEdAk5qWYfGMoHV9lChoBkdAYqXTgl4TsmgHTegDaAhHQJOguji4rjJ1fZQoaAZHQGOx3FtKqXFoB03oA2gIR0CTqs78ejmCdX2UKGgGR0Bk/+ki2UjcaAdN6ANoCEdAk6xTm0VrRHV9lChoBkdAZ5W2nbZezGgHTegDaAhHQJOsUWYWtU51fZQoaAZHQGPuKhlDneVoB03oA2gIR0CTukMyrPt2dWUu"
|
49 |
},
|
50 |
"ep_success_buffer": {
|
51 |
":type:": "<class 'collections.deque'>",
|
52 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
53 |
},
|
54 |
"_n_updates": 248,
|
55 |
+
"observation_space": {
|
56 |
+
":type:": "<class 'gymnasium.spaces.box.Box'>",
|
57 |
+
":serialized:": "gAWVcAIAAAAAAACMFGd5bW5hc2l1bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMDWJvdW5kZWRfYmVsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWCAAAAAAAAAABAQEBAQEBAZRoB4wCYjGUiYiHlFKUKEsDjAF8lE5OTkr/////Sv////9LAHSUYksIhZSMAUOUdJRSlIwNYm91bmRlZF9hYm92ZZRoECiWCAAAAAAAAAABAQEBAQEBAZRoFEsIhZRoGHSUUpSMBl9zaGFwZZRLCIWUjANsb3eUaBAoliAAAAAAAAAAAAC0wgAAtMIAAKDAAACgwNsPScAAAKDAAAAAgAAAAICUaApLCIWUaBh0lFKUjARoaWdolGgQKJYgAAAAAAAAAAAAtEIAALRCAACgQAAAoEDbD0lAAACgQAAAgD8AAIA/lGgKSwiFlGgYdJRSlIwIbG93X3JlcHKUjFtbLTkwLiAgICAgICAgLTkwLiAgICAgICAgIC01LiAgICAgICAgIC01LiAgICAgICAgIC0zLjE0MTU5MjcgIC01LgogIC0wLiAgICAgICAgIC0wLiAgICAgICBdlIwJaGlnaF9yZXBylIxTWzkwLiAgICAgICAgOTAuICAgICAgICAgNS4gICAgICAgICA1LiAgICAgICAgIDMuMTQxNTkyNyAgNS4KICAxLiAgICAgICAgIDEuICAgICAgIF2UjApfbnBfcmFuZG9tlE51Yi4=",
|
58 |
+
"dtype": "float32",
|
59 |
+
"bounded_below": "[ True True True True True True True True]",
|
60 |
+
"bounded_above": "[ True True True True True True True True]",
|
61 |
+
"_shape": [
|
62 |
+
8
|
63 |
+
],
|
64 |
+
"low": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]",
|
65 |
+
"high": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]",
|
66 |
+
"low_repr": "[-90. -90. -5. -5. -3.1415927 -5.\n -0. -0. ]",
|
67 |
+
"high_repr": "[90. 90. 5. 5. 3.1415927 5.\n 1. 1. ]",
|
68 |
+
"_np_random": null
|
69 |
+
},
|
70 |
+
"action_space": {
|
71 |
+
":type:": "<class 'gymnasium.spaces.discrete.Discrete'>",
|
72 |
+
":serialized:": "gAWV1QAAAAAAAACMGWd5bW5hc2l1bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpSMFW51bXB5LmNvcmUubXVsdGlhcnJheZSMBnNjYWxhcpSTlIwFbnVtcHmUjAVkdHlwZZSTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYkMIBAAAAAAAAACUhpRSlIwFc3RhcnSUaAhoDkMIAAAAAAAAAACUhpRSlIwGX3NoYXBllCloCmgOjApfbnBfcmFuZG9tlE51Yi4=",
|
73 |
+
"n": "4",
|
74 |
+
"start": "0",
|
75 |
+
"_shape": [],
|
76 |
+
"dtype": "int64",
|
77 |
+
"_np_random": null
|
78 |
+
},
|
79 |
+
"n_envs": 16,
|
80 |
"n_steps": 1024,
|
81 |
"gamma": 0.999,
|
82 |
"gae_lambda": 0.98,
|
|
|
87 |
"n_epochs": 4,
|
88 |
"clip_range": {
|
89 |
":type:": "<class 'function'>",
|
90 |
+
":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"
|
91 |
},
|
92 |
"clip_range_vf": null,
|
93 |
"normalize_advantage": true,
|
94 |
+
"target_kl": null,
|
95 |
+
"lr_schedule": {
|
96 |
+
":type:": "<class 'function'>",
|
97 |
+
":serialized:": "gAWVxQIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSS91c3IvbG9jYWwvbGliL3B5dGhvbjMuMTAvZGlzdC1wYWNrYWdlcy9zdGFibGVfYmFzZWxpbmVzMy9jb21tb24vdXRpbHMucHmUjARmdW5jlEuEQwIEAZSMA3ZhbJSFlCl0lFKUfZQojAtfX3BhY2thZ2VfX5SMGHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbpSMCF9fbmFtZV9flIwec3RhYmxlX2Jhc2VsaW5lczMuY29tbW9uLnV0aWxzlIwIX19maWxlX1+UjEkvdXNyL2xvY2FsL2xpYi9weXRob24zLjEwL2Rpc3QtcGFja2FnZXMvc3RhYmxlX2Jhc2VsaW5lczMvY29tbW9uL3V0aWxzLnB5lHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB99lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz8zqSowVTJhhZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"
|
98 |
+
}
|
99 |
}
|
ppo-LunarLander-v2/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 87929
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34c587c8c4a126fff964721372f800e7a08abf808c89b9e68bdcf85846c052c9
|
3 |
size 87929
|
ppo-LunarLander-v2/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acbad40dadeb74d25fdc21505f7c1bdb6e40c4ad4a038e1f02f734e654432583
|
3 |
+
size 43329
|
ppo-LunarLander-v2/system_info.txt
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
-
- OS: Linux-5.
|
2 |
-
- Python: 3.
|
3 |
-
- Stable-Baselines3:
|
4 |
-
- PyTorch:
|
5 |
- GPU Enabled: True
|
6 |
-
- Numpy: 1.
|
7 |
-
-
|
|
|
|
|
|
1 |
+
- OS: Linux-5.15.107+-x86_64-with-glibc2.31 # 1 SMP Sat Apr 29 09:15:28 UTC 2023
|
2 |
+
- Python: 3.10.11
|
3 |
+
- Stable-Baselines3: 2.0.0a5
|
4 |
+
- PyTorch: 2.0.0+cu118
|
5 |
- GPU Enabled: True
|
6 |
+
- Numpy: 1.22.4
|
7 |
+
- Cloudpickle: 2.2.1
|
8 |
+
- Gymnasium: 0.28.1
|
9 |
+
- OpenAI Gym: 0.25.2
|
replay.mp4
CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
|
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward":
|
|
|
1 |
+
{"mean_reward": 253.80201564603243, "std_reward": 22.654923727736282, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-05-17T22:35:32.708055"}
|