@inproceedings{calandriello2017distributed,
abstract = {Most kernel-based methods, such as kernel regression, kernel PCA, ICA, or k-means clustering, do not scale to large datasets, because constructing and storing the kernel matrix Kn requires at least O(n2) time and space for n samples. Recent works (Alaoui 2014, Musco 2016) show that sampling points with replacement according to their ridge leverage scores (RLS) generates small dictionaries of relevant points with strong spectral approximation guarantees for Kn. The drawback of RLS-based methods is that computing exact RLS requires constructing and storing the whole kernel matrix. In this paper, we introduce SQUEAK, a new algorithm for kernel approximation based on RLS sampling that sequentially processes the dataset, storing a dictionary which creates accurate kernel matrix approximations with a number of points that only depends on the effective dimension deffgamma of the dataset. Moreover since all the RLS estimations are efficiently performed using only the small dictionary, SQUEAK never constructs the whole matrix kermatrixn, runs in linear time widetildeO(ndeffgamma3) w.r.t.n, and requires only a single pass over the dataset. We also propose a parallel and distributed version of SQUEAK achieving similar accuracy in as little as widetildeO(log(n)deffgamma3) time.},
author = {Calandriello, Daniele and Lazaric, Alessandro and Valko, Michal},
booktitle = {International Conference on Artificial Intelligence and Statistics},
file = {:Users/miki/Dropbox/research/daniele{\_}research/2016/SUB{\_}2016{\_}aistat{\_}parallel{\_}kernel{\_}rls/camera{\_}ready/parallel{\_}kernel{\_}rls.pdf:pdf},
title = {{Distributed adaptive sampling for kernel matrix approximation}},
year = {2017}
}