typewriter
typewriter copied to clipboard
TypeError with Boltzmann Exploration for Discrete Custom Gym-Environment
Coach version : 1.0.1 Ubuntu version : 19.10 Environment : Discrete Custom Gym-Environment Algorithm : DQN with Boltzmann Exploration
Hi, I created a custom gym environment which uses a discrete action space with 5 actions. It works with E-Greedy Exploration, but with Boltzmann Exploration it yields this error:
rl_coach/exploration_policies/boltzmann.py", line 61, in get_action
action = np.random.choice(range(self.action_space.shape), p=probabilities)
TypeError: only integer scalar arrays can be converted to a scalar index
It seems to me that discrete action spaces, which are converted from gym environments always have shape 1 (rl_coach/spaces.py):
class DiscreteActionSpace(ActionSpace):
"""
A discrete action space with action indices as actions
"""
def __init__(self, num_actions: int, descriptions: Union[None, List, Dict]=None, default_action: np.ndarray=None,
filtered_action_space=None):
super().__init__(1, low=0, high=num_actions-1, descriptions=descriptions)
# the number of actions is mapped to high
# default action
if default_action is None:
self.default_action = 0
else:
self.default_action = default_action
if filtered_action_space is not None:
self.filtered_action_space = filtered_action_space
I don't know the reason, but it seems weird to me.
I use this Preset:
from rl_coach.agents.dqn_agent import DQNAgentParameters
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity
from rl_coach.architectures.layers import Dense
from rl_coach.schedules import ConstantSchedule
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.filters.filter import InputFilter
from rl_coach.filters.observation.observation_stacking_filter import ObservationStackingFilter
from rl_coach.base_parameters import EmbedderScheme
from rl_coach.exploration_policies.boltzmann import BoltzmannParameters
import tm_rl
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(10000)
#########
# Agent #
#########
agent_params = DQNAgentParameters()
# DQN params
agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
agent_params.algorithm.discount = 0.99
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(1)
agent_params.algorithm.rate_for_copying_weights_to_target = 0.001
# NN configuration
agent_params.network_wrappers['main'].learning_rate = 0.001
agent_params.network_wrappers['main'].replace_mse_with_huber_loss = True
agent_params.network_wrappers['main'].input_embedders_parameters = {
'ball_pos': InputEmbedderParameters(scheme=EmbedderScheme.Empty),
'goal_pos': InputEmbedderParameters(scheme=EmbedderScheme.Empty)
}
agent_params.network_wrappers['main'].middleware_parameters = FCMiddlewareParameters(scheme=[Dense(24), Dense(24), Dense(24)])
agent_params.network_wrappers['main'].batch_size = 512
# ER size
agent_params.memory.max_size = (MemoryGranularity.Transitions, 50000)
# E-Greedy schedule
agent_params.exploration = BoltzmannParameters()
agent_params.exploration.temperature_schedule = ConstantSchedule(1)
################
# Environment #
################
env_params = GymVectorEnvironment(level="tm_rl.envs.envs.discrete:Discrete")
tilt_maze_input_filter = InputFilter()
tilt_maze_input_filter.add_observation_filter('ball_pos', 'stacking', ObservationStackingFilter(4))
env_params.default_input_filter = tilt_maze_input_filter
env_params.additional_simulator_parameters = {"maze_list": [10],
"step_limit": 1000}
########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test = True
preset_validation_params.min_reward_threshold = 150
preset_validation_params.max_episodes_to_achieve_reward = 250
vis_params = VisualizationParameters(native_rendering=False)
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=vis_params,
preset_validation_params=preset_validation_params)