About us
Publications
Home
Publications
2020
Ingrid Nunes; Dietmar Jannach
A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems Journal Article
In: User-Modeling and User-Adapted Interaction, vol. 27, no. 3-5, pp. 393-444, 2020, (Pre SFI).
Abstract | BibTeX | Tags: Artificial Intelligence, Decision Support System, Expert System, Explanation, Knowledge-based system, Machine Learning, Recommender systems, Systematic review, Trust, WP2: User Modeling Personalization and Engagement | Links:
@article{Nunes2020,
title = {A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems},
author = {Ingrid Nunes and Dietmar Jannach},
url = {https://arxiv.org/pdf/2006.08672.pdf},
doi = {10.1007/s11257-017-9195-0},
year = {2020},
date = {2020-06-15},
journal = {User-Modeling and User-Adapted Interaction},
volume = {27},
number = {3-5},
pages = {393-444},
abstract = {With the recent advances in the field of artificial intelligence, an increasing number of decision-making tasks are delegated to software systems. A key requirement for the success and adoption of such systems is that users must trust system choices or even fully automated decisions. To achieve this, explanation facilities have been widely investigated as a means of establishing trust in these systems since the early years of expert systems. With today's increasingly sophisticated machine learning algorithms, new challenges in the context of explanations, accountability, and trust towards such systems constantly arise. In this work, we systematically review the literature on explanations in advice-giving systems. This is a family of systems that includes recommender systems, which is one of the most successful classes of advice-giving software in practice. We investigate the purposes of explanations as well as how they are generated, presented to users, and evaluated. As a result, we derive a novel comprehensive taxonomy of aspects to be considered when designing explanation facilities for current and future decision support systems. The taxonomy includes a variety of different facets, such as explanation objective, responsiveness, content and presentation. Moreover, we identified several challenges that remain unaddressed so far, for example related to fine-grained issues associated with the presentation of explanations and how explanation facilities are evaluated.},
note = {Pre SFI},
keywords = {Artificial Intelligence, Decision Support System, Expert System, Explanation, Knowledge-based system, Machine Learning, Recommender systems, Systematic review, Trust, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
Alain D. Starke; Martijn C. Willemsen; Chris C.P. Snijders
With a little help from my peers: depicting social norms in a recommender interface to promote energy conservation Conference
no. March 2020, 2020.
Abstract | BibTeX | Tags: Decision Support System, Human computer interaction, Human-centered computing, Information Systems, User studies | Links:
@conference{Starke2020b,
title = {With a little help from my peers: depicting social norms in a recommender interface to promote energy conservation},
author = {Alain D. Starke and Martijn C. Willemsen and Chris C.P. Snijders},
url = {https://dl.acm.org/doi/10.1145/3377325.3377518},
doi = {10.1145/3377325.3377518},
year = {2020},
date = {2020-03-17},
number = {March 2020},
pages = {1-11},
abstract = {How can recommender interfaces help users to adopt new behaviors? In the behavioral change literature, nudges and norms are studied to understand how to convince people to take action (e.g. towel re-use is boosted when stating that `75% of hotel guests' do so), but what is advised is typically not personalized. Most recommender systems know what to recommend in a personalized way, but not much research has considered how to present such advice to help users to change their current habits. We examine the value of presenting normative messages (e.g. `75% of users do X') based on actual user data in a personalized energy recommender interface called `Saving Aid'. In a study among 207 smart thermostat owners, we compared three different normative explanations (`Global', `Similar', and `Experienced' norm rates) to a non-social baseline (`kWh savings'). Although none of the norms increased the total number of chosen measures directly, we show evidence that the effect of norms seems to be mediated by the perceived feasibility of the measures. Also, how norms were presented (i.e. specific source, adoption rate) affected which measures were chosen within our Saving Aid interface.},
keywords = {Decision Support System, Human computer interaction, Human-centered computing, Information Systems, User studies},
pubstate = {published},
tppubtype = {conference}
}