@inproceedings{carpentier2015simple, abstract = {We consider a stochastic bandit problem with infinitely many arms. In this setting, the learner has no chance of trying all the arms even once and has to dedicate its limited number of samples only to a certain number of arms. All previous algorithms for this setting were designed for minimizing the cumulative regret of the learner. In this paper, we propose an algorithm aiming at minimizing the simple regret. As in the cumulative regret setting of infinitely many armed bandits, the rate of the simple regret will depend on a parameter \$\backslash beta\$ characterizing the distribution of the near-optimal arms. We prove that depending on \$\backslash beta\$, our algorithm is minimax optimal either up to a multiplicative constant or up to a \$\backslash log(n)\$ factor. We also provide extensions to several important cases: when \$\backslash beta\$ is unknown, in a natural setting where the near-optimal arms have a small variance, and in the case of unknown time horizon.}, author = {Carpentier, Alexandra and Valko, Michal}, booktitle = {International Conference on Machine Learning}, title = {{Simple regret for infinitely many armed bandits}}, year = {2015} }