@inproceedings{kocak2016onlinea, abstract = {We consider adversarial multi-armed bandit problems where the learner is allowed to observe losses of a number of arms beside the arm that it actually chose. We study the case where all non-chosen arms reveal their loss with an unknown probability rt, independently of each other and the action of the learner. Moreover, we allow rt to change in every round t, which rules out the possibility of estimating rt by a well-concentrated sample average. We propose an algorithm which operates under the assumption that rt is large enough to warrant at least one side observation with high probability. We show that after T rounds in a bandit problem with N arms, the expected regret of our algorithm is of order O(sqrt(sum(t=1)T (1/rt) log N )), given that rt less than log T / (2N-2) for all t. All our bounds are within logarithmic factors of the best achievable performance of any algorithm that is even allowed to know exact values of rt.}, author = {Koc{\'{a}}k, Tom{\'{a}}{\v{s}} and Neu, Gergely and Valko, Michal}, booktitle = {Uncertainty in Artificial Intelligence}, title = {{Online learning with Erdős-R{\'{e}}nyi side-observation graphs}}, year = {2016} }