Neuroevolution strategies for episodic reinforcement learning
Created by W.Langdon from
gp-bibliography.bib Revision:1.8120
- @Article{HeidrichMeisner2009152,
-
author = "Verena Heidrich-Meisner and Christian Igel",
-
title = "Neuroevolution strategies for episodic reinforcement
learning",
-
journal = "Journal of Algorithms",
-
year = "2009",
-
volume = "64",
-
number = "4",
-
pages = "152--168",
-
month = oct,
-
note = "Special Issue: Reinforcement Learning",
-
keywords = "genetic algorithms, genetic programming, Reinforcement
learning, Evolution strategy, Covariance matrix
adaptation, Partially observable Markov decision
process, Direct policy search",
-
ISSN = "0196-6774",
-
broken = "http://www.sciencedirect.com/science/article/B6WH3-4W7RY8J-3/2/22f7075bc25dab10a8ff3714e2fee303",
-
DOI = "doi:10.1016/j.jalgor.2009.04.002",
-
abstract = "Because of their convincing performance, there is a
growing interest in using evolutionary algorithms for
reinforcement learning. We propose learning of neural
network policies by the covariance matrix adaptation
evolution strategy (CMA-ES), a randomised
variable-metric search algorithm for continuous
optimisation. We argue that this approach, which we
refer to as CMA Neuroevolution Strategy (CMA-NeuroES),
is ideally suited for reinforcement learning, in
particular because it is based on ranking policies (and
therefore robust against noise), efficiently detects
correlations between parameters, and infers a search
direction from scalar reinforcement signals. We
evaluate the CMA-NeuroES on five different (Markovian
and non-Markovian) variants of the common pole
balancing problem. The results are compared to those
described in a recent study covering several RL
algorithms, and the CMA-NeuroES shows the overall best
performance.",
-
notes = "compared against CE \cite{gruau:1996:ceVdeGNN}",
- }
Genetic Programming entries for
Verena Heidrich-Meisner
Christian Igel
Citations