@inbook{45979de292fc4dc3b88bf509bf4c5486,
title = "Metrics for Evaluating Explainable Recommender Systems",
abstract = "Recommender systems aim to support their users by reducing information overload so that they can make better decisions. Recommender systems must be transparent, so users can form mental models about the system{\textquoteright}s goals, internal state, and capabilities, that are in line with their actual design. Explanations and transparent behaviour of the system should inspire trust and, ultimately, lead to more persuasive recommendations. Here, explanations convey reasons why a recommendation is given or how the system forms its recommendations. This paper focuses on the question how such claims about effectiveness of explanations can be evaluated. Accordingly, we investigate various models that are used to assess the effects of explanations and recommendations. We discuss objective and subjective measurement and argue that both are needed. We define a set of metrics for measuring the effectiveness of explanations and recommendations. The feasibility of using these metrics is discussed in the context of a specific explainable recommender system in the food and health domain.",
author = "Joris Hulstijn and Igor Tchappi and Amro Najjar and Reyhan Aydoğan",
year = "2023",
month = sep,
day = "5",
doi = "10.1007/978-3-031-40878-6_12",
language = "English",
isbn = "978-3-031-40877-9",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = " 212–230",
booktitle = "Explainable and Transparent AI and Multi-Agent Systems",
address = "Germany",
edition = "1",
}