@inproceedings{04252a7a58c244c295e8f58cd958adc9,
title = "Learning to Personalize Equalization for High-Fidelity Spatial Audio Reproduction",
abstract = "Reproducing accurate and perceptually realistic spatial audio for augmented and virtual reality (AR/VR) requires the headphones to have a flat frequency response. This can be achieved by equalizing the headphone transducers' output given the transfer function between the transducer and the human ear, referred to as Ear Acoustic Response (EAR). EAR is unique to every individual and is a function of the transducer characteristics, the user's anthropometric features (e.g. ear and head shape) and the interactions between the two. This paper proposes a novel method to infer the EAR given the ear features of any listener using a probabilistic framework and a sub-sample of the population as prior. We introduce an approach to assess the level of personalization achieved and benchmark the improvements delivered by the proposed algorithm relative to a generic solution.",
keywords = "AR/VR, EAR, Gaussian Processes, HRTF, HpTF, Personalized Recommendation, Spatial Audio",
author = "Arjun Gupta and Hoffmann, \{Pablo F.\} and Sebastian Prepelita and Philip Robinson and Ithapu, \{Vamsi K.\} and Alon, \{David L.\}",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 48th IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP 2023 ; Conference date: 04-06-2023 Through 10-06-2023",
year = "2023",
month = jan,
day = "1",
doi = "10.1109/ICASSP49357.2023.10096846",
language = "English",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers",
booktitle = "ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing, Proceedings",
address = "United States",
}