@inproceedings{84318221d71b4521a989f9fbe0d60c7a,
title = "Confidence Backup Updates for Aggregating MDP State Values in Monte-Carlo Tree Search",
abstract = "Monte-Carlo Tree Search (MCTS) algorithms estimate the value of MDP states based on rewards received by performing multiple random simulations. MCTS algorithms can use different strategies to aggregate these rewards and provide an estimation for the states{\textquoteright} values. The most common aggregation method is to store the mean reward of all simulations. Another common approach stores the best observed reward from each state. Both of these methods have complementary benefits and drawbacks. In this paper, we show that both of these methods are biased estimators for the real expected value of MDP states. We propose an hybrid approach that uses the best reward for states with low noise, and otherwise uses the mean. Experimental results on the Sailing MDP domain show that our method has a considerable advantage when the rewards are drawn from a noisy distribution.",
author = "Zahy Bnaya and Alon Palombo and Rami Puzis and Ariel Felner",
note = "Publisher Copyright: Copyright {\textcopyright} 2015, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.; 8th Annual Symposium on Combinatorial Search, SoCS 2015 ; Conference date: 11-06-2015 Through 13-06-2015",
year = "2015",
month = jan,
day = "1",
language = "English",
series = "Proceedings of the 8th Annual Symposium on Combinatorial Search, SoCS 2015",
publisher = "AAAI press",
pages = "156--160",
editor = "Levi Lelis and Roni Stern",
booktitle = "Proceedings of the 8th Annual Symposium on Combinatorial Search, SoCS 2015",
}