@inproceedings{3e6d30a141e5424fafd55e32370d86c5,
title = "SemEval-2021 Task 12: Learning with Disagreements",
abstract = "Disagreement between coders is ubiquitous in virtually all datasets annotated with human judgements in both natural language processing and computer vision. However, most supervised machine learning methods assume that a single preferred interpretation exists for each item, which is at best an idealization. The aim of the SemEval-2021 shared task on Learning with Disagreements (Le-wi-Di) was to provide a unified testing framework for methods for learning from data containing multiple and possibly contradictory annotations covering the best-known datasets containing information about disagreements for interpreting language and classifying images. In this paper we describe the shared task and its results.",
author = "Alexandra Uma and Tommaso Fornaciari and Anca Dumitrache and Tristan Miller and Jon Chamberlain and Barbara Plank and Edwin Simpson and Massimo Poesio",
note = "Publisher Copyright: {\textcopyright} 2021 Association for Computational Linguistics.; 15th International Workshop on Semantic Evaluation, SemEval 2021 ; Conference date: 05-08-2021 Through 06-08-2021",
year = "2021",
language = "English",
series = "SemEval 2021 - 15th International Workshop on Semantic Evaluation, Proceedings of the Workshop",
publisher = "Association for Computational Linguistics",
pages = "338--347",
editor = "Alexis Palmer and Nathan Schneider and Natalie Schluter and Guy Emerson and Aurelie Herbelot and Xiaodan Zhu",
booktitle = "SemEval 2021 - 15th International Workshop on Semantic Evaluation, Proceedings of the Workshop",
address = "United States",
}