About us
Publications
Home
Publications
2020
Ingrid Nunes; Dietmar Jannach
A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems Journal Article
In: User-Modeling and User-Adapted Interaction, vol. 27, no. 3-5, pp. 393-444, 2020, (Pre SFI).
Abstract | BibTeX | Tags: Artificial Intelligence, Decision Support System, Expert System, Explanation, Knowledge-based system, Machine Learning, Recommender systems, Systematic review, Trust, WP2: User Modeling Personalization and Engagement | Links:
@article{Nunes2020,
title = {A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems},
author = {Ingrid Nunes and Dietmar Jannach},
url = {https://arxiv.org/pdf/2006.08672.pdf},
doi = {10.1007/s11257-017-9195-0},
year = {2020},
date = {2020-06-15},
journal = {User-Modeling and User-Adapted Interaction},
volume = {27},
number = {3-5},
pages = {393-444},
abstract = {With the recent advances in the field of artificial intelligence, an increasing number of decision-making tasks are delegated to software systems. A key requirement for the success and adoption of such systems is that users must trust system choices or even fully automated decisions. To achieve this, explanation facilities have been widely investigated as a means of establishing trust in these systems since the early years of expert systems. With today's increasingly sophisticated machine learning algorithms, new challenges in the context of explanations, accountability, and trust towards such systems constantly arise. In this work, we systematically review the literature on explanations in advice-giving systems. This is a family of systems that includes recommender systems, which is one of the most successful classes of advice-giving software in practice. We investigate the purposes of explanations as well as how they are generated, presented to users, and evaluated. As a result, we derive a novel comprehensive taxonomy of aspects to be considered when designing explanation facilities for current and future decision support systems. The taxonomy includes a variety of different facets, such as explanation objective, responsiveness, content and presentation. Moreover, we identified several challenges that remain unaddressed so far, for example related to fine-grained issues associated with the presentation of explanations and how explanation facilities are evaluated.},
note = {Pre SFI},
keywords = {Artificial Intelligence, Decision Support System, Expert System, Explanation, Knowledge-based system, Machine Learning, Recommender systems, Systematic review, Trust, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
2019
Beishui Liao; Marija Slavkovik; Leendert van der Torre
Building Jiminy Cricket: An architecture for moral agreements among stakeholders Conference
Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society, 2019, (Pre SFI).
Abstract | BibTeX | Tags: Artificial Intelligence, WP2: User Modeling Personalization and Engagement | Links:
@conference{Liao2019,
title = {Building Jiminy Cricket: An architecture for moral agreements among stakeholders},
author = {Beishui Liao and Marija Slavkovik and Leendert van der Torre},
url = {https://arxiv.org/pdf/1812.04741.pdf},
year = {2019},
date = {2019-03-07},
booktitle = {Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society},
pages = {147–153},
abstract = {An autonomous system is constructed by a manufacturer, operates in a society subject to norms and laws, and is interacting with end-users. We address the
challenge of how the moral values and views of all stakeholders can be integrated
and reflected in the moral behaviour of the autonomous system. We propose an artificial moral agent architecture that uses techniques from normative systems and
formal argumentation to reach moral agreements among stakeholders. We show
how our architecture can be used not only for ethical practical reasoning and collaborative decision-making, but also for the explanation of such moral behavior.},
note = {Pre SFI},
keywords = {Artificial Intelligence, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {conference}
}
challenge of how the moral values and views of all stakeholders can be integrated
and reflected in the moral behaviour of the autonomous system. We propose an artificial moral agent architecture that uses techniques from normative systems and
formal argumentation to reach moral agreements among stakeholders. We show
how our architecture can be used not only for ethical practical reasoning and collaborative decision-making, but also for the explanation of such moral behavior.
2018
Sjur Dyrkolbotn; Truls Pedersen; Marija Slavkovik
On the distinction between implicit and explicit ethical agency Conference
Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society (AIES '18), 2018, (Pre SFI).
Abstract | BibTeX | Tags: Agency, Artificial Intelligence, Autonomy, Epistemology, Ethics, WP2: User Modeling Personalization and Engagement | Links:
@conference{Dyrkolbotn2018,
title = {On the distinction between implicit and explicit ethical agency},
author = {Sjur Dyrkolbotn and Truls Pedersen and Marija Slavkovik},
url = {https://dl.acm.org/doi/pdf/10.1145/3278721.3278769},
doi = {https://doi.org/10.1145/3278721.3278769},
year = {2018},
date = {2018-12-01},
booktitle = {Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society (AIES '18)},
pages = {74–80},
abstract = {With recent advances in artificial intelligence and the rapidly increasing importance of autonomous intelligent systems in society, it is becoming clear that artificial agents will have to be designed to comply with complex ethical standards. As we work to develop moral machines, we also push the boundaries of existing legal categories. The most pressing question is what kind of ethical decision-making our machines are actually able to engage in. Both in law and in ethics, the concept of agency forms a basis for further legal and ethical categorisations, pertaining to decision-making ability. Hence, without a cross-disciplinary understanding of what we mean by ethical agency in machines, the question of responsibility and liability cannot be clearly addressed. Here we make first steps towards a comprehensive definition, by suggesting ways to distinguish between implicit and explicit forms of ethical agency.},
note = {Pre SFI},
keywords = {Agency, Artificial Intelligence, Autonomy, Epistemology, Ethics, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {conference}
}