@inproceedings{41fab4dcd3544955ad2d7e3b21a44f80,
title = "Kernel matrix regularization via shrinkage estimation",
abstract = "The “kernel trick” is a fundamental approach that allows expanding many machine learning algorithms. The kernel matrix obtained from the data requires inner products in the feature space, while the sample covariance matrix of the same data requires outer products. Consequently, both matrices share corresponding eigenvalues up to a constant. The use of kernels often involves a large number of features, compared to the number of observations which reflects a situation of an ill-conditioned or non-invertible sample covariance matrix. To improve the situation mentioned above, we propose to regularize the kernel matrix in a way that reflects a better alternative to the sample covariance matrix, i.e., by shrinking the latter matrix to a well-conditioned matrix with the aim of minimizing the mean-squared error. We demonstrate through numerical simulations that the proposed regularization is useful in classification tasks.",
keywords = "Covariance estimation, Kernel trick, Minimum mean-squared error, Shrinkage estimator",
author = "Tomer Lancewicki",
note = "Publisher Copyright: {\textcopyright} Springer Nature Switzerland AG 2019.; Computing Conference, 2018 ; Conference date: 10-07-2018 Through 12-07-2018",
year = "2019",
month = jan,
day = "1",
doi = "10.1007/978-3-030-01177-2\_94",
language = "English",
isbn = "9783030011765",
series = "Advances in Intelligent Systems and Computing",
publisher = "Springer Verlag",
pages = "1292--1305",
editor = "Supriya Kapoor and Rahul Bhatia and Kohei Arai",
booktitle = "Intelligent Computing - Proceedings of the 2018 Computing Conference",
address = "Germany",
}