@inproceedings{3bf264b4ca164646bf1bef3eaaed2b81,
title = "Lexical Generalization Improves with Larger Models and Longer Training",
abstract = "While fine-tuned language models perform well on many tasks, they were also shown to rely on superficial surface features such as lexical overlap. Excessive utilization of such heuristics can lead to failure on challenging inputs. We analyze the use of lexical overlap heuristics in natural language inference, paraphrase detection, and reading comprehension (using a novel contrastive dataset), and find that larger models are much less susceptible to adopting lexical overlap heuristics. We also find that longer training leads models to abandon lexical overlap heuristics. Finally, we provide evidence that the disparity between models size has its source in the pre-trained model.",
author = "Elron Bandel and Yoav Goldberg and Yanai Elazar",
note = "Publisher Copyright: {\textcopyright} 2022 Association for Computational Linguistics.; 2022 Findings of the Association for Computational Linguistics: EMNLP 2022 ; Conference date: 07-12-2022 Through 11-12-2022",
year = "2022",
month = jan,
day = "1",
doi = "10.18653/v1/2022.findings-emnlp.426",
language = "English",
series = "Findings of the Association for Computational Linguistics: EMNLP 2022",
publisher = "Association for Computational Linguistics (ACL)",
pages = "4427--4439",
editor = "Yoav Goldberg and Zornitsa Kozareva and Yue Zhang",
booktitle = "Findings of the Association for Computational Linguistics",
address = "United States",
}