• Default Ordering • Sorted by Date • Classified by Publication Type • Classified by Research Category •
Jan Hendrik Metzen, Mark Edgington, Yohannes Kassahun, and Frank Kirchner. Analysis of an evolutionary reinforcement learning method in a multiagent domain. In Proceedings of the 7th International Conference on Autonomous Agents and Multiagent Systems, pp. 291–298, AAMAS '08, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, May 2008.
Many multiagent problems comprise subtasks which can be considered as reinforcement learning (RL) problems. In addition to classical temporal difference methods, evolutionary algorithms are among the most promising approaches for such RL problems. The relative performance of these approaches in certain subdomains (e.g. multiagent learning) of the general RL problem remains an open question at this time. In addition to theoretical analysis, benchmarks are one of the most important tools for comparing different RL methods in certain problem domains. A recently proposed multiagent RL benchmark problem is the RoboCup Keepaway benchmark. This benchmark is one of the most challenging multiagent learning problems because its state-space is continuous and high dimensional, and both the sensors and the actuators are noisy. In this paper we analyze the performance of the neuroevolutionary approach called Evolutionary Acquisition of Neural Topologies (EANT) in the Keepaway benchmark, and compare the results obtained using EANT with the results of other algorithms tested on the same benchmark.
@inproceedings{Metzen:AAMAS:2008,
address = {Richland, {SC}},
title = {Analysis of an evolutionary reinforcement learning method in a multiagent domain},
isbn = {978-0-9817381-0-9},
location = {Estoril, Portugal},
url = {http://portal.acm.org/citation.cfm?id=1402428},
series = {{AAMAS} '08},
booktitle = {Proceedings of the 7th International Conference on Autonomous Agents and Multiagent Systems},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
author = {Jan Hendrik Metzen and Mark Edgington and Yohannes Kassahun and Frank Kirchner},
month = may,
year = {2008},
pages = {291--298},
abstract = {Many multiagent problems comprise subtasks which can be considered as reinforcement learning (RL) problems. In addition to classical temporal difference methods, evolutionary algorithms are among the most promising approaches for such RL problems. The relative performance of these approaches in certain subdomains (e.\,g. multiagent learning) of the general RL problem remains an open question at this time. In addition to theoretical analysis, benchmarks are one of the most important tools for comparing different RL methods in certain problem domains. A recently proposed multiagent RL benchmark problem is the RoboCup Keepaway benchmark. This benchmark is one of the most challenging multiagent learning problems because its state-space is continuous and high dimensional, and both the sensors and the actuators are noisy. In this paper we analyze the performance of the neuroevolutionary approach called Evolutionary Acquisition of Neural Topologies (EANT) in the Keepaway benchmark, and compare the results obtained using EANT with the results of other algorithms tested on the same benchmark.},
bib2html_pubtype = {Refereed Conference},
bib2html_rescat = {Neuroevolution, Reinforcement Learning}
}
Generated by bib2html.pl (written by Patrick Riley ) on Thu May 23, 2013 11:36:00