When I try to combine the SB3 vec_env
with AtariWrapper
, I get an error -
import gymnasium as gym
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.atari_wrappers import AtariWrapper
# Parallel environments
# vec_env = gym.make("PongNoFrameskip-v4")
vec_env = make_vec_env("PongNoFrameskip-v4", n_envs=2, seed=3)
vec_env = AtariWrapper(vec_env)
model = PPO("CnnPolicy", vec_env, verbose=1, n_steps=128, n_epochs=4,
batch_size=256, learning_rate=2.5e-4, clip_range=0.1,
vf_coef=0.5, ent_coef=0.01)
model.learn(total_timesteps=1e7)
model.save("ppo_cartpole")
I get this error -
A.L.E: Arcade Learning Environment (version 0.8.1+53f58b7)
[Powered by Stella]
Traceback (most recent call last):
File "D:\q_learning\sb3_ppo.py", line 10, in <module>
vec_env = AtariWrapper(vec_env)
File "C:\Users\thoma\anaconda3\envs\torch_2\lib\site-packages\stable_baselines3\common\atari_wrappers.py", line 294, in __init__
env = NoopResetEnv(env, noop_max=noop_max)
File "C:\Users\thoma\anaconda3\envs\torch_2\lib\site-packages\stable_baselines3\common\atari_wrappers.py", line 57, in __init__
assert env.unwrapped.get_action_meanings()[0] == "NOOP" # type: ignore[attr-defined]
AttributeError: 'DummyVecEnv' object has no attribute 'get_action_meanings'
Process finished with exit code 1
However, I don't get an error if I use the AtariWrapper
with a gymnasium
environment -
vec_env = gym.make("PongNoFrameskip-v4")
# vec_env = make_vec_env("PongNoFrameskip-v4", n_envs=2, seed=3)
vec_env = AtariWrapper(vec_env)
model = PPO("CnnPolicy", vec_env, verbose=1, n_steps=128, n_epochs=4,
batch_size=256, learning_rate=2.5e-4, clip_range=0.1,
vf_coef=0.5, ent_coef=0.01)
model.learn(total_timesteps=1e7)
model.save("ppo_cartpole")
I managed to solve using
vec_env.envs[0].unwrapped.get_action_meanings()
envs is a list of environments