2021
|
“Serving Each User”: Supporting Different Eating Goals Through a Multi-List Recommender Interface Inproceedings Alain Dominique Starke; Edis Asotic; Christoph Trattner In: Association for Computing Machinery (ACM), 2021. @inproceedings{cristin1956504,
title = {“Serving Each User”: Supporting Different Eating Goals Through a Multi-List Recommender Interface},
author = {Alain Dominique Starke and Edis Asotic and Christoph Trattner},
url = {https://app.cristin.no/results/show.jsf?id=1956504, Cristin},
doi = {https://doi.org/10.1145/3460231.3474232},
year = {2021},
date = {2021-01-01},
booktitle = {Association for Computing Machinery (ACM)},
keywords = {Cristin},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Exploring the effects of natural language justifications on food recommender systems Inproceedings Cataldo Musto; Alain Dominique Starke; Christoph Trattner; Amon Rapp; Giovanni Semeraro In: Association for Computing Machinery (ACM), 2021. @inproceedings{cristin1956541,
title = {Exploring the effects of natural language justifications on food recommender systems},
author = {Cataldo Musto and Alain Dominique Starke and Christoph Trattner and Amon Rapp and Giovanni Semeraro},
url = {https://app.cristin.no/results/show.jsf?id=1956541, Cristin},
year = {2021},
date = {2021-01-01},
booktitle = {Association for Computing Machinery (ACM)},
keywords = {Cristin},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Changing Salty Food Preferences with Visual and Textual Explanations in a Search Interface Inproceedings Arngeir Berge; Vegard Velle Sjøen; Alain Dominique Starke; Christoph Trattner In: Association for Computing Machinery (ACM), 2021. @inproceedings{cristin1956563,
title = {Changing Salty Food Preferences with Visual and Textual Explanations in a Search Interface},
author = {Arngeir Berge and Vegard Velle Sjøen and Alain Dominique Starke and Christoph Trattner},
url = {https://app.cristin.no/results/show.jsf?id=1956563, Cristin},
year = {2021},
date = {2021-01-01},
booktitle = {Association for Computing Machinery (ACM)},
keywords = {Cristin},
pubstate = {published},
tppubtype = {inproceedings}
}
|
Beyond Algorithmic Fairness in Recommender Systems Inproceedings Mehdi Elahi; Himan Abdollahpouri; Masoud Mansoury; Helma Torkamaan In: Association for Computing Machinery (ACM), 2021. @inproceedings{cristin1956964,
title = {Beyond Algorithmic Fairness in Recommender Systems},
author = {Mehdi Elahi and Himan Abdollahpouri and Masoud Mansoury and Helma Torkamaan},
url = {https://app.cristin.no/results/show.jsf?id=1956964, Cristin
https://dl.acm.org/doi/abs/10.1145/3450614.3461685},
doi = {https://doi.org/https://doi.org/10.1145/3450614.3461685},
year = {2021},
date = {2021-01-01},
booktitle = {Association for Computing Machinery (ACM)},
keywords = {Cristin},
pubstate = {published},
tppubtype = {inproceedings}
}
|
MORS 2021: 1st Workshop on Multi Objective Recommender Systems Inproceedings Himan Abdollahpouri; Mehdi Elahi; Masoud Mansoury; Shaghayegh Sahebi; Zahra Nazari; Allison Chaney; Babak Loni In: Association for Computing Machinery (ACM), 2021. @inproceedings{cristin1956978,
title = {MORS 2021: 1st Workshop on Multi Objective Recommender Systems},
author = {Himan Abdollahpouri and Mehdi Elahi and Masoud Mansoury and Shaghayegh Sahebi and Zahra Nazari and Allison Chaney and Babak Loni},
url = {https://app.cristin.no/results/show.jsf?id=1956978, Cristin
https://dl.acm.org/doi/10.1145/3460231.3470936},
year = {2021},
date = {2021-01-01},
booktitle = {Association for Computing Machinery (ACM)},
abstract = {Historically, the main criterion for a successful recommender system was the relevance of the recommended items to the user. In other words, the only objective for the recommendation algorithm was to learn user’s preferences for different items and generate recommendations accordingly. However, real-world recommender systems are well beyond a simple objective and often need to take into account multiple objectives simultaneously. These objectives can be either from the users’ perspective or they could come from other stakeholders such as item providers or any party that could be impacted by the recommendations. Such multi-objective and multi-stakeholder recommenders present unique challenges and these challenges were the focus of the MORS workshop.},
keywords = {Cristin},
pubstate = {published},
tppubtype = {inproceedings}
}
Historically, the main criterion for a successful recommender system was the relevance of the recommended items to the user. In other words, the only objective for the recommendation algorithm was to learn user’s preferences for different items and generate recommendations accordingly. However, real-world recommender systems are well beyond a simple objective and often need to take into account multiple objectives simultaneously. These objectives can be either from the users’ perspective or they could come from other stakeholders such as item providers or any party that could be impacted by the recommendations. Such multi-objective and multi-stakeholder recommenders present unique challenges and these challenges were the focus of the MORS workshop. |
2020
|
Folk theories of algorithms: Understanding digital irritation Journal Article Brita Ytre-Arne; Hallvard Moe In: Media, Culture & Society, 2020, (Pre SFI). @article{Arne2020,
title = {Folk theories of algorithms: Understanding digital irritation},
author = {Brita Ytre-Arne and Hallvard Moe},
year = {2020},
date = {2020-12-31},
journal = {Media, Culture & Society},
series = {TEST},
note = {Pre SFI},
keywords = {WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
|
A knowledge-graph platform for newsrooms Journal Article Arne Berven; Ole A. Christensen; Sindre Moldeklev; Andreas Lothe Opdahl; Kjetil A. Villanger In: Computers in Industry, vol. 123, no. 103321, 2020, (Pre SFI). @article{Berven2020,
title = {A knowledge-graph platform for newsrooms},
author = {Arne Berven and Ole A. Christensen and Sindre Moldeklev and Andreas Lothe Opdahl and Kjetil A. Villanger },
url = {https://reader.elsevier.com/reader/sd/pii/S0166361520305558?token=F8A21A513C97BFF598C2755575B3C89174B3D404E2EDDD23EC37966A2754ACA1700011EBBCF52ADE2845ADBC12D40041},
doi = {https://doi.org/10.1016/j.compind.2020.103321},
year = {2020},
date = {2020-12-01},
journal = {Computers in Industry},
volume = {123},
number = {103321},
abstract = {Journalism is challenged by digitalisation and social media, resulting in lower subscription numbers and reduced advertising income. Information and communication techniques (ICT) offer new opportunities. Our research group is collaborating with a software developer of news production tools for the international market to explore how social, open, and other data sources can be leveraged for journalistic purposes. We have developed an architecture and prototype called News Hunter that uses knowledge graphs, natural-language processing (NLP), and machine learning (ML) together to support journalists. Our focus is on combining existing data sources and computation and storage techniques into a flexible architecture for news journalism. The paper presents News Hunter along with plans and possibilities for future work.},
note = {Pre SFI},
keywords = {Computational journalism, Journalistic knowledge platforms, Knowledge graphs, Machine learning (ML), Natural-language processing (NLP), Newsroom systems, Ontology, OWL, RDF, Semantic technologies, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
Journalism is challenged by digitalisation and social media, resulting in lower subscription numbers and reduced advertising income. Information and communication techniques (ICT) offer new opportunities. Our research group is collaborating with a software developer of news production tools for the international market to explore how social, open, and other data sources can be leveraged for journalistic purposes. We have developed an architecture and prototype called News Hunter that uses knowledge graphs, natural-language processing (NLP), and machine learning (ML) together to support journalists. Our focus is on combining existing data sources and computation and storage techniques into a flexible architecture for news journalism. The paper presents News Hunter along with plans and possibilities for future work. |
Experiments in Lifelog Organisation and Retrieval at NTCIR Book Chapter Cathal Gurrin; Hideo Joho; Frank Hopfgartner; Liting Zhou; Rami Albatal; Graham Healy; Duc-Tien Dang Nguyen In: Evaluating Information Retrieval and Access Tasks, Chapter 13, pp. 187-203, Springer, Singapore, 2020, (Pre SFI). @inbook{Gurrin2020,
title = {Experiments in Lifelog Organisation and Retrieval at NTCIR},
author = {Cathal Gurrin and Hideo Joho and Frank Hopfgartner and Liting Zhou and Rami Albatal and Graham Healy and Duc-Tien Dang Nguyen},
url = {https://www.researchgate.net/publication/344047066_Experiments_in_Lifelog_Organisation_and_Retrieval_at_NTCIR},
doi = {10.1007/978-981-15-5554-1_13},
year = {2020},
date = {2020-12-01},
booktitle = {Evaluating Information Retrieval and Access Tasks},
pages = {187-203},
publisher = {Springer},
address = {Singapore},
chapter = {13},
abstract = {Lifelogging can be described as the process by which individuals use various software and hardware devices to gather large archives of multimodal personal data from multiple sources and store them in a personal data archive, called a lifelog. The Lifelog task at NTCIR was a comparative benchmarking exercise with the aim of encouraging research into the organisation and retrieval of data from multimodal lifelogs. The Lifelog task ran for over 4 years from NTCIR-12 until NTCIR-14 (2015.02–2019.06); it supported participants to submit to five subtasks, each tackling a different challenge related to lifelog retrieval. In this chapter, a motivation is given for the Lifelog task and a review of progress since NTCIR-12 is presented. Finally, the lessons learned and challenges within the domain of lifelog retrieval are presented.},
note = {Pre SFI},
keywords = {WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {inbook}
}
Lifelogging can be described as the process by which individuals use various software and hardware devices to gather large archives of multimodal personal data from multiple sources and store them in a personal data archive, called a lifelog. The Lifelog task at NTCIR was a comparative benchmarking exercise with the aim of encouraging research into the organisation and retrieval of data from multimodal lifelogs. The Lifelog task ran for over 4 years from NTCIR-12 until NTCIR-14 (2015.02–2019.06); it supported participants to submit to five subtasks, each tackling a different challenge related to lifelog retrieval. In this chapter, a motivation is given for the Lifelog task and a review of progress since NTCIR-12 is presented. Finally, the lessons learned and challenges within the domain of lifelog retrieval are presented. |
Gender and sentiment, critics and authors: a dataset of Norwegian book reviews Journal Article Samia Touileb; Lilja Øvrelid; Erik Velldal In: Gender Bias in Natural Language Processing. Association for Computational Linguistics, 2020, (Pre SFI). @article{Touileb2020,
title = {Gender and sentiment, critics and authors: a dataset of Norwegian book reviews},
author = {Samia Touileb and Lilja Øvrelid and Erik Velldal},
url = {https://www.aclweb.org/anthology/2020.gebnlp-1.11.pdf},
year = {2020},
date = {2020-12-01},
journal = {Gender Bias in Natural Language Processing. Association for Computational Linguistics},
abstract = {Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively.},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {article}
}
Gender bias in models and datasets is widely studied in NLP. The focus has usually been on analysing how females and males express themselves, or how females and males are described. However, a less studied aspect is the combination of these two perspectives, how female and male describe the same or opposite gender. In this paper, we present a new gender annotated sentiment dataset of critics reviewing the works of female and male authors. We investigate if this newly annotated dataset contains differences in how the works of male and female authors are critiqued, in particular in terms of positive and negative sentiment. We also explore the differences in how this is done by male and female critics. We show that there are differences in how critics assess the works of authors of the same or opposite gender. For example, male critics rate crime novels written by females, and romantic and sentimental works written by males, more negatively. |
Improving sentiment analysis with multi-task learning of negation Journal Article J Barnes; Erik Velldal; Lilja Øvrelid In: 2020, (Pre SFI). @article{Barnes2020,
title = {Improving sentiment analysis with multi-task learning of negation},
author = {J Barnes and Erik Velldal and Lilja Øvrelid},
url = {https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/improving-sentiment-analysis-with-multitask-learning-of-negation/14EF2B829EC4B8EC29E7C0C5C77B95B0},
year = {2020},
date = {2020-11-11},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {article}
}
|
Changing news use. Unchanged news experiences? Book Irene Costera Meijer; Tim Groot Kormelink Routledge, 2020, ISBN: 9780367485788, (Pre SFI). @book{Meijer2020c,
title = {Changing news use. Unchanged news experiences?},
author = {Irene Costera Meijer and Tim Groot Kormelink},
url = {https://www.routledge.com/Changing-News-Use-Unchanged-News-Experiences/Meijer-Kormelink/p/book/9780367485788},
isbn = {9780367485788},
year = {2020},
date = {2020-11-09},
publisher = {Routledge},
abstract = {Changing News Use pulls from empirical research to introduce and describe
how changing news user patterns and journalism practices have been
mutually disruptive, exploring what journalists and the news media can
learn from these changes.
Based on 15 years of audience research, the authors provide an in-depth
description of what people do with news and how this has diversified
over time, from reading, watching, and listening to a broader spectrum
of user practices including checking, scrolling, tagging, and avoiding.
By emphasizing people’s own experience of journalism, this book also
investigates what two prominent audience measurements – clicking and
spending time – mean from a user perspective. The book outlines ways to
overcome the dilemma of providing what people apparently want (attentiongrabbing
news features) and delivering what people apparently need (what
journalists see as important information), suggesting alternative ways to
investigate and become sensitive to the practices, preferences, and pleasures
of audiences and discussing what these research findings might mean for
everyday journalism practice.
The book is a valuable and timely resource for academics and researchers
interested in the fields of journalism studies, sociology, digital media, and
communication.},
note = {Pre SFI},
keywords = {WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {book}
}
Changing News Use pulls from empirical research to introduce and describe
how changing news user patterns and journalism practices have been
mutually disruptive, exploring what journalists and the news media can
learn from these changes.
Based on 15 years of audience research, the authors provide an in-depth
description of what people do with news and how this has diversified
over time, from reading, watching, and listening to a broader spectrum
of user practices including checking, scrolling, tagging, and avoiding.
By emphasizing people’s own experience of journalism, this book also
investigates what two prominent audience measurements – clicking and
spending time – mean from a user perspective. The book outlines ways to
overcome the dilemma of providing what people apparently want (attentiongrabbing
news features) and delivering what people apparently need (what
journalists see as important information), suggesting alternative ways to
investigate and become sensitive to the practices, preferences, and pleasures
of audiences and discussing what these research findings might mean for
everyday journalism practice.
The book is a valuable and timely resource for academics and researchers
interested in the fields of journalism studies, sociology, digital media, and
communication. |
The complexity landscape of outcome determination in judgment aggregation Journal Article Ulle Endriss; Ronald de Haan; Jerôme Lang; Marija Slavkovik In: Journal of Artificial Intelligence Research, vol. 69, pp. 687–731, 2020, (Pre SFI). @article{Endriss2020,
title = {The complexity landscape of outcome determination in judgment aggregation},
author = {Ulle Endriss and Ronald de Haan and Jerôme Lang and Marija Slavkovik },
url = {https://www.jair.org/index.php/jair/article/view/11970/26619},
doi = {10.1613/jair.1.11970},
year = {2020},
date = {2020-11-04},
journal = {Journal of Artificial Intelligence Research},
volume = {69},
pages = {687–731},
abstract = {We provide a comprehensive analysis of the computational complexity of the outcome determinationproblem for the most important aggregation rules proposed in the literature on logic-based judgmentaggregation. Judgment aggregation is a powerful and flexible framework for studying problems ofcollective decision making that has attracted interest in a range of disciplines, including Legal Theory,Philosophy, Economics, Political Science, and Artificial Intelligence. The problem of computing theoutcome for a given list of individual judgments to be aggregated into a single collective judgment isthe most fundamental algorithmic challenge arising in this context. Our analysis applies to severaldifferent variants of the basic framework of judgment aggregation that have been discussed in theliterature, as well as to a new framework that encompasses all existing such frameworks in terms ofexpressive power and representational succinctness.},
note = {Pre SFI},
keywords = {WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
We provide a comprehensive analysis of the computational complexity of the outcome determinationproblem for the most important aggregation rules proposed in the literature on logic-based judgmentaggregation. Judgment aggregation is a powerful and flexible framework for studying problems ofcollective decision making that has attracted interest in a range of disciplines, including Legal Theory,Philosophy, Economics, Political Science, and Artificial Intelligence. The problem of computing theoutcome for a given list of individual judgments to be aggregated into a single collective judgment isthe most fundamental algorithmic challenge arising in this context. Our analysis applies to severaldifferent variants of the basic framework of judgment aggregation that have been discussed in theliterature, as well as to a new framework that encompasses all existing such frameworks in terms ofexpressive power and representational succinctness. |
AI-KG: an automatically generated knowledge graph of artificial intelligence Conference Danilo Dessì; Francesco Osborne; Diego Reforgiato Recupero; Davide Buscaldi; Enrico Motta; Harald Sack nternational Semantic Web Conference, Springer, 2020, (Pre SFI). @conference{Dessì2020,
title = {AI-KG: an automatically generated knowledge graph of artificial intelligence},
author = {Danilo Dessì and Francesco Osborne and Diego Reforgiato Recupero and Davide Buscaldi and Enrico Motta and Harald Sack},
url = {https://www.researchgate.net/publication/344991487_AI-KG_an_Automatically_Generated_Knowledge_Graph_of_Artificial_Intelligence},
year = {2020},
date = {2020-11-01},
booktitle = {nternational Semantic Web Conference},
pages = {127-143},
publisher = {Springer},
abstract = {Scientific knowledge has been traditionally disseminated and preserved through research articles published in journals, conference proceedings , and online archives. However, this article-centric paradigm has been often criticized for not allowing to automatically process, categorize , and reason on this knowledge. An alternative vision is to generate a semantically rich and interlinked description of the content of research publications. In this paper, we present the Artificial Intelligence Knowledge Graph (AI-KG), a large-scale automatically generated knowledge graph that describes 820K research entities. AI-KG includes about 14M RDF triples and 1.2M reified statements extracted from 333K research publications in the field of AI, and describes 5 types of entities (tasks, methods, metrics, materials, others) linked by 27 relations. AI-KG has been designed to support a variety of intelligent services for analyzing and making sense of research dynamics, supporting researchers in their daily job, and helping to inform decision-making in funding bodies and research policymakers. AI-KG has been generated by applying an automatic pipeline that extracts entities and relationships using three tools: DyGIE++, Stanford CoreNLP, and the CSO Classifier. It then integrates and filters the resulting triples using a combination of deep learning and semantic technologies in order to produce a high-quality knowledge graph. This pipeline was evaluated on a manually crafted gold standard, yielding competitive results. AI-KG is available under CC BY 4.0 and can be downloaded as a dump or queried via a SPARQL endpoint.},
note = {Pre SFI},
keywords = {WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {conference}
}
Scientific knowledge has been traditionally disseminated and preserved through research articles published in journals, conference proceedings , and online archives. However, this article-centric paradigm has been often criticized for not allowing to automatically process, categorize , and reason on this knowledge. An alternative vision is to generate a semantically rich and interlinked description of the content of research publications. In this paper, we present the Artificial Intelligence Knowledge Graph (AI-KG), a large-scale automatically generated knowledge graph that describes 820K research entities. AI-KG includes about 14M RDF triples and 1.2M reified statements extracted from 333K research publications in the field of AI, and describes 5 types of entities (tasks, methods, metrics, materials, others) linked by 27 relations. AI-KG has been designed to support a variety of intelligent services for analyzing and making sense of research dynamics, supporting researchers in their daily job, and helping to inform decision-making in funding bodies and research policymakers. AI-KG has been generated by applying an automatic pipeline that extracts entities and relationships using three tools: DyGIE++, Stanford CoreNLP, and the CSO Classifier. It then integrates and filters the resulting triples using a combination of deep learning and semantic technologies in order to produce a high-quality knowledge graph. This pipeline was evaluated on a manually crafted gold standard, yielding competitive results. AI-KG is available under CC BY 4.0 and can be downloaded as a dump or queried via a SPARQL endpoint. |
Operationalizing exposure diversity. Journal Article Hallvard Moe; Jan Fredrik Hovden; Kari Karppinen In: European Journal of Communication, pp. 1-2, 2020, (Pre SFI). @article{Moe2020,
title = {Operationalizing exposure diversity.},
author = {Hallvard Moe and Jan Fredrik Hovden and Kari Karppinen},
url = {https://journals.sagepub.com/doi/pdf/10.1177/0267323120966849},
doi = {10.1177/0267323120966849},
year = {2020},
date = {2020-10-29},
journal = {European Journal of Communication},
pages = {1-2},
abstract = {The concept of exposure diversity, the diversity of information that people actually access and use, has recently gained prominence in media policy debates. This aspect of media diversity, however, remains difficult to define, measure or implement in actual policy. In this article, we propose an empirical approach that operationalizes exposure diversity in terms of news and current affairs providers in the media repertoire of different social groups. This can be studied through cluster analysis of survey data on respondents’ combinations of use of different media providers and outlets. The article first discusses exposure diversity as a media policy aim. We then outline our proposal on how to take the debate a step further through empirical analysis of media repertoires, with an illustration of how such an analysis may be conducted using survey data from Norway.},
note = {Pre SFI},
keywords = {Exposure Diversity, Media Diversity, Media Policy, Media Repertoires, Survey, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
The concept of exposure diversity, the diversity of information that people actually access and use, has recently gained prominence in media policy debates. This aspect of media diversity, however, remains difficult to define, measure or implement in actual policy. In this article, we propose an empirical approach that operationalizes exposure diversity in terms of news and current affairs providers in the media repertoire of different social groups. This can be studied through cluster analysis of survey data on respondents’ combinations of use of different media providers and outlets. The article first discusses exposure diversity as a media policy aim. We then outline our proposal on how to take the debate a step further through empirical analysis of media repertoires, with an illustration of how such an analysis may be conducted using survey data from Norway. |
Addressing the New Item problem in video recommender systems by incorporation of visual features with restricted Boltzmann machines. Journal Article Naieme Hazrati; Mehdi Elahi In: Expert Systems, vol. e12645, pp. 1-20, 2020, (Pre SFI). @article{Hazrati2020,
title = { Addressing the New Item problem in video recommender systems by incorporation of visual features with restricted Boltzmann machines.},
author = {Naieme Hazrati and Mehdi Elahi},
url = {https://onlinelibrary.wiley.com/doi/epdf/10.1111/exsy.12645},
doi = {https://doi.org/10.1111/exsy.12645},
year = {2020},
date = {2020-10-19},
journal = {Expert Systems},
volume = {e12645},
pages = {1-20},
abstract = {Over the past years, the research of video recommender systems (RSs) has been mainly focussed on the development of novel algorithms. Although beneficial, still any algorithm may fail to recommend video items that the system has no form of data associated to them (New Item Cold Start). This problem occurs when a new item is added to the catalogue of the system and no data are available for that item. In content‐based RSs, the video items are typically represented by semantic attributes, when generating recommendations. These attributes require a group of experts or users for annotation, and still, the generated recommendations might not capture a complete picture of the users' preferences, for example, the visual tastes of users on video style. This article addresses this problem by proposing recommendation based on novel visual features that do not require human annotation and can represent visual aspects of video items. We have designed a novel evaluation methodology considering three realistic scenarios, that is, (a) extreme cold start, (b) moderate cold start and (c) warm‐start scenario. We have conducted a set of comprehensive experiments, and our results have shown the superior performance of recommendations based on visual features, in all of the evaluation scenarios.},
note = {Pre SFI},
keywords = {Cold Start, Multimedia, New Item, Recommender systems, Visually Aware, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
Over the past years, the research of video recommender systems (RSs) has been mainly focussed on the development of novel algorithms. Although beneficial, still any algorithm may fail to recommend video items that the system has no form of data associated to them (New Item Cold Start). This problem occurs when a new item is added to the catalogue of the system and no data are available for that item. In content‐based RSs, the video items are typically represented by semantic attributes, when generating recommendations. These attributes require a group of experts or users for annotation, and still, the generated recommendations might not capture a complete picture of the users' preferences, for example, the visual tastes of users on video style. This article addresses this problem by proposing recommendation based on novel visual features that do not require human annotation and can represent visual aspects of video items. We have designed a novel evaluation methodology considering three realistic scenarios, that is, (a) extreme cold start, (b) moderate cold start and (c) warm‐start scenario. We have conducted a set of comprehensive experiments, and our results have shown the superior performance of recommendations based on visual features, in all of the evaluation scenarios. |
Truth be told: Fake news detection using user reactions on reddit Journal Article Vinay Setty; Erlend Rekve In: Proceedings of the 29th acm international conference on information knowledge management, pp. 3325–3328, 2020, (Pre SFI). @article{Setty2020,
title = {Truth be told: Fake news detection using user reactions on reddit},
author = {Vinay Setty and Erlend Rekve},
url = {https://dl.acm.org/doi/pdf/10.1145/3340531.3417463},
doi = {https://doi.org/10.1145/3340531.3417463},
year = {2020},
date = {2020-10-01},
journal = {Proceedings of the 29th acm international conference on information knowledge management},
pages = {3325–3328},
abstract = {In this paper, we provide a large dataset for fake news detection using social media comments. The dataset consists of 12,597 claims (of which 63% are labelled as fake) from four different sources (Snopes, Poltifact, Emergent and Twitter). The novel part of the dataset is that it also includes over 662K social media discussion comments related to these claims from Reddit. We make this dataset public for the research community. In addition, for the task of fake news detection using social media comments, we provide a simple but strong baseline solution deep neural network model which beats several solutions in the literature.},
note = {Pre SFI},
keywords = {Deep neural networks, Fake news detection, Reddit comments, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
In this paper, we provide a large dataset for fake news detection using social media comments. The dataset consists of 12,597 claims (of which 63% are labelled as fake) from four different sources (Snopes, Poltifact, Emergent and Twitter). The novel part of the dataset is that it also includes over 662K social media discussion comments related to these claims from Reddit. We make this dataset public for the research community. In addition, for the task of fake news detection using social media comments, we provide a simple but strong baseline solution deep neural network model which beats several solutions in the literature. |
Temporal ambivalences in smartphone use: Conflicting flows, conflicting responsibilities. Journal Article Brita Ytre-Arne; Trine Syvertsen; Hallvard Moe; Faltin Karlsen In: New Media and Society, vol. 22, no. 9, pp. 1715–1732, 2020, (Pre SFI). @article{Arne2020b,
title = {Temporal ambivalences in smartphone use: Conflicting flows, conflicting responsibilities.},
author = {Brita Ytre-Arne and Trine Syvertsen and Hallvard Moe and Faltin Karlsen },
url = {https://journals.sagepub.com/doi/pdf/10.1177/1461444820913561},
doi = {10.1177/1461444820913561},
year = {2020},
date = {2020-09-03},
journal = {New Media and Society},
volume = {22},
number = {9},
pages = {1715–1732},
abstract = {This article explores implications of the central position of the smartphone in an age of constant connectivity. Based on a qualitative study of 50 informants, we ask how users experience and handle temporal ambivalences in everyday smartphone use, drawing on the concepts flow and responsibilization to conceptualize central dimensions of such ambivalences. The notion of conflicting flows illuminates how brief checking cycles expand at the expense of other activities, resulting in a temporal conflict experienced by users. Responsibilization points to how users take individual responsibility for managing such conflicting flows, and to how this practice is difficult and conflict-ridden. We conclude that while individual time management is often framed as the solution to temporal conflicts, such attempts at regulating smartphone use appear inadequate. Our conceptualization of temporal ambivalence offers a more nuanced understanding of why this is the case.},
note = {Pre SFI},
keywords = {Ambivalence, Flow, Media Use, Responsibilization, Smartphone, Time Management, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
This article explores implications of the central position of the smartphone in an age of constant connectivity. Based on a qualitative study of 50 informants, we ask how users experience and handle temporal ambivalences in everyday smartphone use, drawing on the concepts flow and responsibilization to conceptualize central dimensions of such ambivalences. The notion of conflicting flows illuminates how brief checking cycles expand at the expense of other activities, resulting in a temporal conflict experienced by users. Responsibilization points to how users take individual responsibility for managing such conflicting flows, and to how this practice is difficult and conflict-ridden. We conclude that while individual time management is often framed as the solution to temporal conflicts, such attempts at regulating smartphone use appear inadequate. Our conceptualization of temporal ambivalence offers a more nuanced understanding of why this is the case. |
Changing News Use. Unchanged news experiences? Book Irene Costera Meijer; Tim Groot Kormelink 1st, Routledge, London & New York, 2020, ISBN: 9781003041719, (Pre SFI). @book{Meijer2020,
title = {Changing News Use. Unchanged news experiences?},
author = {Irene Costera Meijer and Tim Groot Kormelink },
url = {https://www.researchgate.net/publication/345018999_Changing_News_Use_Unchanged_News_Experiences},
doi = {10.4324/9781003041719},
isbn = {9781003041719},
year = {2020},
date = {2020-09-01},
publisher = {Routledge},
address = {London & New York},
edition = {1st},
note = {Pre SFI},
keywords = {Changing News Use, Journalism, News, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {book}
}
|
Audiences’ Communicative Agency in a Datafied Age: Interpretative, Relational and Increasingly Prospective. Journal Article Brita Ytre-Arne; Ranjana Das In: Communication Theory, vol. 0, no. C, pp. 1-19, 2020, ISSN: 1050–3293, (Pre SFI). @article{Arne2020c,
title = {Audiences’ Communicative Agency in a Datafied Age: Interpretative, Relational and Increasingly Prospective.},
author = {Brita Ytre-Arne and Ranjana Das},
url = {https://watermark.silverchair.com/qtaa018.pdf?token=AQECAHi208BE49Ooan9kkhW_Ercy7Dm3ZL_9Cf3qfKAc485ysgAAAtAwggLMBgkqhkiG9w0BBwagggK9MIICuQIBADCCArIGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM-cFhCl8ql-5yUsJoAgEQgIICg0JoOiOVaCIdOstGjiiVpYuTKzqRfP7Hb1L0JBUB7TMpOQ5ya4v7afXtJvvTasi126A8qHSxK2rDZfeWFajUD34XXIbXVfimKI-a7-dZNYNjF6xn9p5OzsBABo10PuVtS5bHE4B3RSURKpRgGKXIyem7o-HzoTgKWKxJjRuOEVJNX4XjUC9-D9C7f8n3BItvYkMJqiX8NRSmuM3MI1MmhCjjtrUEOaURe-mCNKonobiYtkdoywElD9W7SG0ZQg9nigzcJmEH36Rbf3jzaGMMQhOsTv0NCAFwm52wxxeqt1jlSX6GmjMPmwTTmhZNGPE-sD0j3VZEfzqZNb5RqOBv2tih20z3kz2mBAU_25OlUThhSQ_G9-dNMVfVsuKMbo6TnS8EUotKdlWDmmenYUmQdHFx6KHA2mFn3j0AYyqz9Kolc2HeSSu4JJhZMZGHIUkNVC5c-GPPCzpO4D5pCT3pNw2d4mK037ziOWPOKcr29Ak2sJlQ05rcn4NovqBWrNIeRQRuhlVyNI7nr7bIXXFMwogvhFttUt3IxH68cYD6nNQ1Gbtl2BwygLseOwrwZA_6irZcqueVtAwqzMchowERFcKR8gMPKyDiDGPfbbKMNvqeHeIvemaQSLnfLM7Lfj9-OYtxOgF6CxZTrbOcCTUVpnvwTIZO1Oq2amSRBEb2lCORDiegqrxTitstUZ141VXW5bId94vwLuZqPoYA58nNcF0_2WzAOB69owF5B7D_ofDUTA2tBAhuvcvwKpAB5t_bhLDscuVfVGcYi5azd06oQ9PJWPV5EigzleuyHR57_cVyPpYkXe-PimZJ6iZzEhnmYZE0v9A_9F2LYm9dwfEryznsCog},
doi = {10.1093/ct/qtaa018},
issn = {1050–3293},
year = {2020},
date = {2020-08-24},
journal = {Communication Theory},
volume = {0},
number = {C},
pages = {1-19},
abstract = {This article develops a conceptualization of audience agency in the face of datafication. We consider how people, as audiences and users of media and technologies, face transforming communicative conditions, and how these conditions challenge the power potentials of audiences in processes of communication—that is, their communicative agency. To develop our conceptualization, we unpack the concept of audiences’ communicative agency by examining its foundations in communication scholarship, in reception theory and sociology, arguing that agency is understood as interpretative and relational, and applied to make important normative assessments. We further draw on emerging scholarship on encounters with data in the everyday to discuss how audience agency is now challenged by datafication, arguing that communicative agency is increasingly prospective in a datafied age. Thereby, we provide a theoretical conceptualization for further analysis of audiences in transforming communicative conditions.},
note = {Pre SFI},
keywords = {Agency, Audiences, Datafication, Everyday, Interpretation, Prospection, Reception Theory, Sociological Theory, Structure, Technology, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
This article develops a conceptualization of audience agency in the face of datafication. We consider how people, as audiences and users of media and technologies, face transforming communicative conditions, and how these conditions challenge the power potentials of audiences in processes of communication—that is, their communicative agency. To develop our conceptualization, we unpack the concept of audiences’ communicative agency by examining its foundations in communication scholarship, in reception theory and sociology, arguing that agency is understood as interpretative and relational, and applied to make important normative assessments. We further draw on emerging scholarship on encounters with data in the everyday to discuss how audience agency is now challenged by datafication, arguing that communicative agency is increasingly prospective in a datafied age. Thereby, we provide a theoretical conceptualization for further analysis of audiences in transforming communicative conditions. |
Sentiment analysis is not solved! Assessing and probing sentiment classification Proceeding J Barnes; Lilja Øvrelid; Erik Velldal 2020, (Pre SFI). @proceedings{Barnes2020b,
title = {Sentiment analysis is not solved! Assessing and probing sentiment classification},
author = {J Barnes and Lilja Øvrelid and Erik Velldal},
url = {https://www.aclweb.org/anthology/W19-4802/},
year = {2020},
date = {2020-08-01},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {proceedings}
}
|
Measuring Recommendation Explanation Quality: The Conflicting Goals of Explanations Conference Krisztian Balog; Filip Radlinski Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '20), New York, 2020, (Pre SFI). @conference{Balog2020,
title = {Measuring Recommendation Explanation Quality: The Conflicting Goals of Explanations},
author = {Krisztian Balog and Filip Radlinski},
url = {https://dl.acm.org/doi/pdf/10.1145/3397271.3401032},
doi = {https://doi.org/10.1145/3397271.3401032},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '20)},
pages = {329–338},
address = {New York},
abstract = {Explanations have a large effect on how people respond to recommendations. However, there are many possible intentions a system may have in generating explanations for a given recommendation -from increasing transparency, to enabling a faster decision, to persuading the recipient. As a good explanation for one goal may not be good for others, we address the questions of (1) how to robustly measure if an explanation meets a given goal and (2) how the different goals interact with each other. Specifically, this paper presents a first proposal of how to measure the quality of explanations along seven common goal dimensions catalogued in the literature. We find that the seven goals are not independent, but rather exhibit strong structure. Proposing two novel explanation evaluation designs, we identify challenges in evaluation, and provide more efficient measurement approaches of explanation quality.},
note = {Pre SFI},
keywords = {HCI design and evaluation methods, Human-centered computing, Information Systems, Recommender systems, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {conference}
}
Explanations have a large effect on how people respond to recommendations. However, there are many possible intentions a system may have in generating explanations for a given recommendation -from increasing transparency, to enabling a faster decision, to persuading the recipient. As a good explanation for one goal may not be good for others, we address the questions of (1) how to robustly measure if an explanation meets a given goal and (2) how the different goals interact with each other. Specifically, this paper presents a first proposal of how to measure the quality of explanations along seven common goal dimensions catalogued in the literature. We find that the seven goals are not independent, but rather exhibit strong structure. Proposing two novel explanation evaluation designs, we identify challenges in evaluation, and provide more efficient measurement approaches of explanation quality. |
Analysis and design of computational news angles Journal Article Enrico Motta; Enrico Daga; Andreas Lothe Opdahl; Bjørnar Tessem In: IEEE Access, vol. 8, pp. 120613-120626, 2020, (Pre SFI). @article{Motta2020,
title = {Analysis and design of computational news angles},
author = {Enrico Motta and Enrico Daga and Andreas Lothe Opdahl and Bjørnar Tessem},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9127417},
doi = {10.1109/ACCESS.2020.3005513},
year = {2020},
date = {2020-06-29},
journal = {IEEE Access},
volume = {8},
pages = {120613-120626},
abstract = {A key skill for a journalist is the ability to assess the newsworthiness of an event or situation. To this purpose journalists often rely on news angles, conceptual criteria that are used both i) to assess whether something is newsworthy and also ii) to shape the structure of the resulting news item. As journalism becomes increasingly computer-supported, and more and more sources of potentially newsworthy data become available in real time, it makes sense to try and equip journalistic software tools with operational versions of news angles, so that, when searching this vast data space, these tools can both identify effectively the events most relevant to the target audience, and also link them to appropriate news angles. In this paper we analyse the notion of news angle and, in particular, we i) introduce a formal framework and data schema for representing news angles and related concepts and ii) carry out a preliminary analysis and characterization of a number of commonly used news angles, both in terms of our formal model and also in terms of the computational reasoning capabilities that are needed to apply them effectively to real-world scenarios. This study provides a stepping stone towards our ultimate goal of realizing a solution capable of exploiting a library of news angles to identify potentially newsworthy events in a large journalistic data space.},
note = {Pre SFI},
keywords = {Computational journalism, Data schema, Knowledge representation, News angles, Ontology, Reasoning components., WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
A key skill for a journalist is the ability to assess the newsworthiness of an event or situation. To this purpose journalists often rely on news angles, conceptual criteria that are used both i) to assess whether something is newsworthy and also ii) to shape the structure of the resulting news item. As journalism becomes increasingly computer-supported, and more and more sources of potentially newsworthy data become available in real time, it makes sense to try and equip journalistic software tools with operational versions of news angles, so that, when searching this vast data space, these tools can both identify effectively the events most relevant to the target audience, and also link them to appropriate news angles. In this paper we analyse the notion of news angle and, in particular, we i) introduce a formal framework and data schema for representing news angles and related concepts and ii) carry out a preliminary analysis and characterization of a number of commonly used news angles, both in terms of our formal model and also in terms of the computational reasoning capabilities that are needed to apply them effectively to real-world scenarios. This study provides a stepping stone towards our ultimate goal of realizing a solution capable of exploiting a library of news angles to identify potentially newsworthy events in a large journalistic data space. |
Circumvention by design - dark patterns in cookie consents for online news outlets Conference Than Htut Soe; Oda Elise Nordberg; Frode Guribye; Marija Slavkovik Proceedings of the 11th Nordic Conference on Human-Computer Interaction, 2020, (Pre SFI). @conference{Soe2020,
title = {Circumvention by design - dark patterns in cookie consents for online news outlets},
author = {Than Htut Soe and Oda Elise Nordberg and Frode Guribye and Marija Slavkovik},
url = {https://dl.acm.org/doi/pdf/10.1145/3419249.3420132},
doi = {10.1145/3419249.3420132},
year = {2020},
date = {2020-06-24},
booktitle = {Proceedings of the 11th Nordic Conference on Human-Computer Interaction},
abstract = {To ensure that users of online services understand what data are collected and how they are used in algorithmic decision-making, the European Union's General Data Protection Regulation (GDPR) specifies informed consent as a minimal requirement. For online news outlets consent is commonly elicited through interface design elements in the form of a pop-up. We have manually analyzed 300 data collection consent notices from news outlets that are built to ensure compliance with GDPR. The analysis uncovered a variety of strategies or dark patterns that circumvent the intent of GDPR by design. We further study the presence and variety of these dark patterns in these "cookie consents" and use our observations to specify the concept of dark pattern in the context of consent elicitation.},
note = {Pre SFI},
keywords = {WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {conference}
}
To ensure that users of online services understand what data are collected and how they are used in algorithmic decision-making, the European Union's General Data Protection Regulation (GDPR) specifies informed consent as a minimal requirement. For online news outlets consent is commonly elicited through interface design elements in the form of a pop-up. We have manually analyzed 300 data collection consent notices from news outlets that are built to ensure compliance with GDPR. The analysis uncovered a variety of strategies or dark patterns that circumvent the intent of GDPR by design. We further study the presence and variety of these dark patterns in these "cookie consents" and use our observations to specify the concept of dark pattern in the context of consent elicitation. |
A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems Journal Article Ingrid Nunes; Dietmar Jannach In: User-Modeling and User-Adapted Interaction, vol. 27, no. 3-5, pp. 393-444, 2020, (Pre SFI). @article{Nunes2020,
title = {A Systematic Review and Taxonomy of Explanations in Decision Support and Recommender Systems},
author = {Ingrid Nunes and Dietmar Jannach},
url = {https://arxiv.org/pdf/2006.08672.pdf},
doi = {10.1007/s11257-017-9195-0},
year = {2020},
date = {2020-06-15},
journal = {User-Modeling and User-Adapted Interaction},
volume = {27},
number = {3-5},
pages = {393-444},
abstract = {With the recent advances in the field of artificial intelligence, an increasing number of decision-making tasks are delegated to software systems. A key requirement for the success and adoption of such systems is that users must trust system choices or even fully automated decisions. To achieve this, explanation facilities have been widely investigated as a means of establishing trust in these systems since the early years of expert systems. With today's increasingly sophisticated machine learning algorithms, new challenges in the context of explanations, accountability, and trust towards such systems constantly arise. In this work, we systematically review the literature on explanations in advice-giving systems. This is a family of systems that includes recommender systems, which is one of the most successful classes of advice-giving software in practice. We investigate the purposes of explanations as well as how they are generated, presented to users, and evaluated. As a result, we derive a novel comprehensive taxonomy of aspects to be considered when designing explanation facilities for current and future decision support systems. The taxonomy includes a variety of different facets, such as explanation objective, responsiveness, content and presentation. Moreover, we identified several challenges that remain unaddressed so far, for example related to fine-grained issues associated with the presentation of explanations and how explanation facilities are evaluated.},
note = {Pre SFI},
keywords = {Artificial Intelligence, Decision Support System, Expert System, Explanation, Knowledge-based system, Machine Learning, Recommender systems, Systematic review, Trust, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
With the recent advances in the field of artificial intelligence, an increasing number of decision-making tasks are delegated to software systems. A key requirement for the success and adoption of such systems is that users must trust system choices or even fully automated decisions. To achieve this, explanation facilities have been widely investigated as a means of establishing trust in these systems since the early years of expert systems. With today's increasingly sophisticated machine learning algorithms, new challenges in the context of explanations, accountability, and trust towards such systems constantly arise. In this work, we systematically review the literature on explanations in advice-giving systems. This is a family of systems that includes recommender systems, which is one of the most successful classes of advice-giving software in practice. We investigate the purposes of explanations as well as how they are generated, presented to users, and evaluated. As a result, we derive a novel comprehensive taxonomy of aspects to be considered when designing explanation facilities for current and future decision support systems. The taxonomy includes a variety of different facets, such as explanation objective, responsiveness, content and presentation. Moreover, we identified several challenges that remain unaddressed so far, for example related to fine-grained issues associated with the presentation of explanations and how explanation facilities are evaluated. |
Ontologies for finding journalistic angles Journal Article Andreas Lothe Opdahl; Bjørnar Tessem In: Software and Systems Modeling, pp. 1-17, 2020, (Pre SFI). @article{Ophdal2020,
title = {Ontologies for finding journalistic angles},
author = {Andreas Lothe Opdahl and Bjørnar Tessem},
url = {https://www.researchgate.net/publication/342132642_Ontologies_for_finding_journalistic_angles},
doi = {10.1007/s10270-020-00801-w},
year = {2020},
date = {2020-06-01},
journal = {Software and Systems Modeling},
pages = {1-17},
abstract = {Journalism relies more and more on information and communication technology (ICT). ICT-based journalistic knowledge platforms continuously harvest potentially news-relevant information from the Internet and make it useful for journalists. Because information about the same event is available from different sources and formats vary widely, knowledge graphs are emerging as a preferred technology for integrating, enriching, and preparing information for journalistic use. The paper explores how journalistic knowledge graphs can be augmented with support for news angles, which can help journalists to detect newsworthy events and make them interesting for the intended audience. We argue that finding newsworthy angles on news-related information is an important example of a topical problem in information science: that of detecting interesting events and situations in big data sets and presenting those events and situations in interesting ways},
note = {Pre SFI},
keywords = {WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
Journalism relies more and more on information and communication technology (ICT). ICT-based journalistic knowledge platforms continuously harvest potentially news-relevant information from the Internet and make it useful for journalists. Because information about the same event is available from different sources and formats vary widely, knowledge graphs are emerging as a preferred technology for integrating, enriching, and preparing information for journalistic use. The paper explores how journalistic knowledge graphs can be augmented with support for news angles, which can help journalists to detect newsworthy events and make them interesting for the intended audience. We argue that finding newsworthy angles on news-related information is an important example of a topical problem in information science: that of detecting interesting events and situations in big data sets and presenting those events and situations in interesting ways |
Unpacking Editorial Agreements in Collaborative Video Production Conference Pavel Okopnyi; Oskar Juhlin; Frode Guribye IMX '20: ACM International Conference on Interactive Media Experiences, New York, 2020, (Pre SFI). @conference{Okopnyi2020,
title = {Unpacking Editorial Agreements in Collaborative Video Production},
author = {Pavel Okopnyi and Oskar Juhlin and Frode Guribye},
url = {https://www.researchgate.net/publication/342251635_Unpacking_Editorial_Agreements_in_Collaborative_Video_Production},
doi = {https://doi.org/10.1145/3391614.3393652},
year = {2020},
date = {2020-06-01},
booktitle = {IMX '20: ACM International Conference on Interactive Media Experiences},
pages = {117–126},
address = {New York},
abstract = {Video production is a collaborative process involving creative, artistic and technical elements that require a multitude of specialised skill sets. This open-ended work is often marked by uncertainty and interpretive flexibility in terms of what the product is and should be. At the same time, most current video production tools are designed for single users. There is a growing interest, both in industry and academia, to design features that support key collaborative processes in editing, such as commenting on videos. We add to current research by unpacking specific forms of collaboration, in particular the social mechanisms and strategies employed to reduce interpretive flexibility and uncertainty in achieving agreements between editors and other collaborators. The findings contribute to the emerging design interest by identifying general design paths for how to support collaboration in video editing through scaffolding, iconic referencing, and suggestive editing.},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
Video production is a collaborative process involving creative, artistic and technical elements that require a multitude of specialised skill sets. This open-ended work is often marked by uncertainty and interpretive flexibility in terms of what the product is and should be. At the same time, most current video production tools are designed for single users. There is a growing interest, both in industry and academia, to design features that support key collaborative processes in editing, such as commenting on videos. We add to current research by unpacking specific forms of collaboration, in particular the social mechanisms and strategies employed to reduce interpretive flexibility and uncertainty in achieving agreements between editors and other collaborators. The findings contribute to the emerging design interest by identifying general design paths for how to support collaboration in video editing through scaffolding, iconic referencing, and suggestive editing. |
Brenda: Browser extension for fake news detection Journal Article Bjarte Botnevik; Eirik Sakariassen; Vinay Setty In: Proceedings of the 43rd international acm sigir conference on research and development in information retrieval, pp. 2117–2120, 2020, (Pre SFI). @article{Botnevik2020,
title = {Brenda: Browser extension for fake news detection},
author = {Bjarte Botnevik and Eirik Sakariassen and Vinay Setty},
url = {https://arxiv.org/pdf/2005.13270.pdf},
doi = {10.1145/3397271.3401396},
year = {2020},
date = {2020-05-27},
journal = {Proceedings of the 43rd international acm sigir conference on research and development in information retrieval},
pages = { 2117–2120},
publisher = {Association for Computing Machinery},
abstract = {Misinformation such as fake news has drawn a lot of attention in recent years. It has serious consequences on society, politics and economy. This has lead to a rise of manually fact-checking websites such as Snopes and Politifact. However, the scale of misinformation limits their ability for verification. In this demonstration, we propose BRENDA a browser extension which can be used to automate the entire process of credibility assessments of false claims. Behind the scenes BRENDA uses a tested deep neural network architecture to automatically identify fact check worthy claims and classifies as well as presents the result along with evidence to the user. Since BRENDA is a browser extension, it facilities fast automated fact checking for the end user without having to leave the Webpage.},
note = {Pre SFI},
keywords = {Fake news detection, Hierarchical attention, Neural networks, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
Misinformation such as fake news has drawn a lot of attention in recent years. It has serious consequences on society, politics and economy. This has lead to a rise of manually fact-checking websites such as Snopes and Politifact. However, the scale of misinformation limits their ability for verification. In this demonstration, we propose BRENDA a browser extension which can be used to automate the entire process of credibility assessments of false claims. Behind the scenes BRENDA uses a tested deep neural network architecture to automatically identify fact check worthy claims and classifies as well as presents the result along with evidence to the user. Since BRENDA is a browser extension, it facilities fast automated fact checking for the end user without having to leave the Webpage. |
Strategic and Organisational fit in Corporate News Markets: A Principal-agent Approach to Studying Newspaper Mergers Journal Article Helle Sjøvaag; Thomas Owren; Turid Borgen In: Journalism Practice, pp. 1-18, 2020, (Pre SFI). @article{Sjøvaag2020,
title = {Strategic and Organisational fit in Corporate News Markets: A Principal-agent Approach to Studying Newspaper Mergers},
author = {Helle Sjøvaag and Thomas Owren and Turid Borgen},
url = {https://www.tandfonline.com/doi/epub/10.1080/17512786.2020.1772097?needAccess=true},
doi = {https://doi.org/10.1080/17512786.2020.1772097},
year = {2020},
date = {2020-05-20},
journal = {Journalism Practice},
pages = {1-18},
abstract = {This article analyses strategic and organisational fit in corporate newspaper mergers in the context of the digitalisation of local newspaper markets. Using the 2019 acquisition of Nordsjø Media by Amedia in Norway as case, we analyse how eight editors-in-chief perceive the process of incorporating small, low-frequency, print-oriented monopolistic newspapers into one of Scandinavia’s largest newspaper chains. The semi-structured interviews were analysed in light of perceived strategic and organisational fit in a principal-agent theoretical framework, the aim of which is to shed light on corporate ownership effects in consolidated newspaper markets. The analysis reveals the precarity of independent ownership in digitising news markets, to which corporatisation emerges as a necessary and welcomed solution. We find the strategic fit as perceived by editors to be tied to technological resources and scale economics, while organisational fit is hampered by the speed and pressure of corporatisation processes. While these results largely support findings from previous acquisition studies in the news industries, the contribution of this analysis lies primarily with the necessity of scale required by the technological transformation that forces independent newspapers to submit to larger chain operations and how it influences considerations of fit in disruptive digital news markets.},
note = {Pre SFI},
keywords = {Agency Theory, Editor in chief, Local journalism, Media ownership, Mergers and acqusitions, News markets, News organisations, Newspaper chains, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
This article analyses strategic and organisational fit in corporate newspaper mergers in the context of the digitalisation of local newspaper markets. Using the 2019 acquisition of Nordsjø Media by Amedia in Norway as case, we analyse how eight editors-in-chief perceive the process of incorporating small, low-frequency, print-oriented monopolistic newspapers into one of Scandinavia’s largest newspaper chains. The semi-structured interviews were analysed in light of perceived strategic and organisational fit in a principal-agent theoretical framework, the aim of which is to shed light on corporate ownership effects in consolidated newspaper markets. The analysis reveals the precarity of independent ownership in digitising news markets, to which corporatisation emerges as a necessary and welcomed solution. We find the strategic fit as perceived by editors to be tied to technological resources and scale economics, while organisational fit is hampered by the speed and pressure of corporatisation processes. While these results largely support findings from previous acquisition studies in the news industries, the contribution of this analysis lies primarily with the necessity of scale required by the technological transformation that forces independent newspapers to submit to larger chain operations and how it influences considerations of fit in disruptive digital news markets. |
Identifying Sentiments in Algerian Code-switched User-generated Comments Conference Wafia Adouane; Samia Touileb; Jean-Philippe Bernardy 2020, (Pre SFI). @conference{Adouane2020,
title = {Identifying Sentiments in Algerian Code-switched User-generated Comments},
author = {Wafia Adouane and Samia Touileb and Jean-Philippe Bernardy},
url = {https://www.aclweb.org/anthology/2020.lrec-1.328.pdf},
year = {2020},
date = {2020-05-06},
journal = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {2698–2705},
abstract = {We present in this paper our work on Algerian language, an under-resourced North African colloquial Arabic variety, for which we
built a comparably large corpus of more than 36,000 code-switched user-generated comments annotated for sentiments. We opted
for this data domain because Algerian is a colloquial language with no existing freely available corpora. Moreover, we compiled
sentiment lexicons of positive and negative unigrams and bigrams reflecting the code-switches present in the language. We compare
the performance of four models on the task of identifying sentiments, and the results indicate that a CNN model trained end-to-end fits
better our unedited code-switched and unbalanced data across the predefined sentiment classes. Additionally, injecting the lexicons as
background knowledge to the model boosts its performance on the minority class with a gain of 10.54 points on the F-score. The results
of our experiments can be used as a baseline for future research for Algerian sentiment analysis.
},
note = {Pre SFI},
keywords = {Algerian Arabic, code-switching, sentiment analysis, under-resourced colloquial languages, user-generated data, WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {conference}
}
We present in this paper our work on Algerian language, an under-resourced North African colloquial Arabic variety, for which we
built a comparably large corpus of more than 36,000 code-switched user-generated comments annotated for sentiments. We opted
for this data domain because Algerian is a colloquial language with no existing freely available corpora. Moreover, we compiled
sentiment lexicons of positive and negative unigrams and bigrams reflecting the code-switches present in the language. We compare
the performance of four models on the task of identifying sentiments, and the results indicate that a CNN model trained end-to-end fits
better our unedited code-switched and unbalanced data across the predefined sentiment classes. Additionally, injecting the lexicons as
background knowledge to the model boosts its performance on the minority class with a gain of 10.54 points on the F-score. The results
of our experiments can be used as a baseline for future research for Algerian sentiment analysis.
|
Methods for datafication, datafication of methods: Introduction to the Special Issue. Journal Article Stine Lomborg; Lina Dencik; Hallvard Moe In: European Journal of Communication, vol. 35, no. 3, pp. 203-212, 2020, (Pre SFI). @article{Lomborg2020,
title = {Methods for datafication, datafication of methods: Introduction to the Special Issue.},
author = {Stine Lomborg and Lina Dencik and Hallvard Moe },
url = {https://journals.sagepub.com/doi/pdf/10.1177/0267323120922045},
doi = {10.1177/0267323120922045 Article information },
year = {2020},
date = {2020-05-05},
journal = {European Journal of Communication},
volume = {35},
number = {3},
pages = {203-212},
abstract = {Digital media enable processes of datafication: users' online activities leave digital traces that are transformed into data points in databases, kept by service providers and other private and public organisations, and repurposed for commercial exploitation, business innovation, surveillance -- and research. Increasingly, this also extends to sensors and recognition technologies that turn homes and cities, as well as our own bodies, into data points to be collected and analysed So-called ‘traditional’ media industries, too, including public service broadcasting, have been datafied, tracking and profiling audiences, algorithmically processing data for greater personalisation as a way to compete with new players and streaming services. Datafication both raises new research questions and brings about new avenues, and an array of tools, for empirical research. This special issue is dedicated to exploring these, linking them to broader historical trajectories of social science methodologies as well as to central concerns and perspectives in media and communication research. As such, this special issue grapples with approaches to empirical research that interlink questions of methods and tools with epistemology and practice. It discusses the datafication of methods, as well as methods for studying datafication. With this we hope to enable reflection of what research questions media and communication scholars should ask of datafication, and how new and existing methods enable us to answer them.},
note = {Pre SFI},
keywords = {Datafication, Media and Communication Research, Methods, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
Digital media enable processes of datafication: users' online activities leave digital traces that are transformed into data points in databases, kept by service providers and other private and public organisations, and repurposed for commercial exploitation, business innovation, surveillance -- and research. Increasingly, this also extends to sensors and recognition technologies that turn homes and cities, as well as our own bodies, into data points to be collected and analysed So-called ‘traditional’ media industries, too, including public service broadcasting, have been datafied, tracking and profiling audiences, algorithmically processing data for greater personalisation as a way to compete with new players and streaming services. Datafication both raises new research questions and brings about new avenues, and an array of tools, for empirical research. This special issue is dedicated to exploring these, linking them to broader historical trajectories of social science methodologies as well as to central concerns and perspectives in media and communication research. As such, this special issue grapples with approaches to empirical research that interlink questions of methods and tools with epistemology and practice. It discusses the datafication of methods, as well as methods for studying datafication. With this we hope to enable reflection of what research questions media and communication scholars should ask of datafication, and how new and existing methods enable us to answer them. |
Interactive Visualizations in INESS Book Chapter P Meurer; V Rosén; Koenraad De Smedt In: Butt, M.; Hautli-Janisz, A.; (Eds.), V. Lyding (Ed.): 2020, (Pre SFI). @inbook{Meurer2020,
title = {Interactive Visualizations in INESS},
author = {P Meurer and V Rosén and Koenraad De Smedt},
editor = {M. Butt and A. Hautli-Janisz and V. Lyding (Eds.)},
url = {https://web.stanford.edu/group/cslipublications/cslipublications/site/9781684000333.shtml},
year = {2020},
date = {2020-05-01},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {inbook}
}
|
NorNE: Annotating Named Entities for Norwegian Proceeding F Jørgensen; T Aasmoe; ASR Husevåg; Lilja Øvrelid; Erik Velldal (Ed.) 2020, (Pre SFI). @proceedings{Jørgensen2020,
title = {NorNE: Annotating Named Entities for Norwegian},
editor = {F Jørgensen and T Aasmoe and ASR Husevåg and Lilja Øvrelid and Erik Velldal},
url = {https://oda.oslomet.no/handle/10642/8830},
year = {2020},
date = {2020-05-01},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {proceedings}
}
|
A Fine-Grained Sentiment Dataset for Norwegian Proceeding Lilja Øvrelid; P Mæhlum; J Barnes; Erik Velldal 2020, (Pre SFI). @proceedings{Øvrelid2020,
title = {A Fine-Grained Sentiment Dataset for Norwegian},
author = {Lilja Øvrelid and P Mæhlum and J Barnes and Erik Velldal},
url = {https://www.aclweb.org/anthology/2020.lrec-1.618/},
year = {2020},
date = {2020-05-01},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {proceedings}
}
|
Named Entity Recognition without Labelled Data: A Weak Supervision Approach Journal Article Pierre Lison; Aliaksandr Hubin; Jeremy Barnes; Samia Touileb In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 1518–1533, 2020, (Pre SFI). @article{Lison2020,
title = {Named Entity Recognition without Labelled Data: A Weak Supervision Approach},
author = {Pierre Lison and Aliaksandr Hubin and Jeremy Barnes and Samia Touileb},
url = {https://arxiv.org/pdf/2004.14723.pdf},
year = {2020},
date = {2020-04-30},
journal = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
pages = {1518–1533},
abstract = {Named Entity Recognition (NER) performance often degrades rapidly when applied to target domains that differ from the texts observed during training. When in-domain labelled data is available, transfer learning techniques can be used to adapt existing NER models to the target domain. But what should one do when there is no hand-labelled data for the target domain? This paper presents a simple but powerful approach to learn NER models in the absence of labelled data through weak supervision. The approach relies on a broad spectrum of labelling functions to automatically annotate texts from the target domain. These annotations are then merged together using a hidden Markov model which captures the varying accuracies and confusions of the labelling functions. A sequence labelling model can finally be trained on the basis of this unified annotation. We evaluate the approach on two English datasets (CoNLL 2003 and news articles from Reuters and Bloomberg) and demonstrate an improvement of about 7 percentage points in entity-level F1 scores compared to an out-of-domain neural NER model.},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {article}
}
Named Entity Recognition (NER) performance often degrades rapidly when applied to target domains that differ from the texts observed during training. When in-domain labelled data is available, transfer learning techniques can be used to adapt existing NER models to the target domain. But what should one do when there is no hand-labelled data for the target domain? This paper presents a simple but powerful approach to learn NER models in the absence of labelled data through weak supervision. The approach relies on a broad spectrum of labelling functions to automatically annotate texts from the target domain. These annotations are then merged together using a hidden Markov model which captures the varying accuracies and confusions of the labelling functions. A sequence labelling model can finally be trained on the basis of this unified annotation. We evaluate the approach on two English datasets (CoNLL 2003 and news articles from Reuters and Bloomberg) and demonstrate an improvement of about 7 percentage points in entity-level F1 scores compared to an out-of-domain neural NER model. |
Deliberative systems theory and citizens’ use of online media: testing a critical theory of democracy on a high achiever. Journal Article Cathrine Holst; Hallvard Moe In: Political Studies, pp. 1-18, 2020, (Pre SFI). @article{Holst2020,
title = {Deliberative systems theory and citizens’ use of online media: testing a critical theory of democracy on a high achiever.},
author = {Cathrine Holst and Hallvard Moe },
url = {https://journals.sagepub.com/doi/pdf/10.1177/0032321719890809},
doi = {10.1177/0032321719890809 Article information },
year = {2020},
date = {2020-04-06},
journal = {Political Studies},
pages = {1-18},
abstract = {Deliberative systems theory is a promising candidate for a normative theory of democracy that combines ideal requirements with feasibility. Yet, recent theoretical elaborations and studies of citizens’ online media use inspired by the theory suffer from an incomplete account of the public sphere’s epistemic function, too rough interpretations of participatory levels, shortcomings in the understanding of online media, and a context-insensitive notion of policy reform. Addressing these weaknesses, the article argues for a refined version of deliberative systems theory. Particular attention is given to feasibility considerations. Reviewing studies of online democracy in Norway, the article shows that the theoretical critique has practical significance. It is also argued that the amended version of the deliberative systems approach produces a diagnosis of Norwegian online democracy more in line with reasonable expectations to a high achiever. This is taken as a prima facie indicator of feasibility.},
note = {Pre SFI},
keywords = {Deliberative Democracy, Experts, Non-ideal theory, Online Media, Public Sphere, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
Deliberative systems theory is a promising candidate for a normative theory of democracy that combines ideal requirements with feasibility. Yet, recent theoretical elaborations and studies of citizens’ online media use inspired by the theory suffer from an incomplete account of the public sphere’s epistemic function, too rough interpretations of participatory levels, shortcomings in the understanding of online media, and a context-insensitive notion of policy reform. Addressing these weaknesses, the article argues for a refined version of deliberative systems theory. Particular attention is given to feasibility considerations. Reviewing studies of online democracy in Norway, the article shows that the theoretical critique has practical significance. It is also argued that the amended version of the deliberative systems approach produces a diagnosis of Norwegian online democracy more in line with reasonable expectations to a high achiever. This is taken as a prima facie indicator of feasibility. |
“We in the Mojo Community” – Exploring a Global Network of Mobile Journalists Journal Article Anja Salzmann; Frode Guribye; Astrid Gynnild In: Journalism Practice, pp. 1-18, 2020, (Pre SFI). @article{Salzmann2020,
title = {“We in the Mojo Community” – Exploring a Global Network of Mobile Journalists},
author = {Anja Salzmann and Frode Guribye and Astrid Gynnild},
url = {https://www.tandfonline.com/doi/epub/10.1080/17512786.2020.1742772?needAccess=true},
doi = {https://doi.org/10.1080/17512786.2020.1742772},
year = {2020},
date = {2020-04-03},
journal = {Journalism Practice},
pages = {1-18},
abstract = {Mobile journalism is a fast-growing area of journalistic innovation that requires new skills and work practices. Thus, a major challenge for journalists is learning not only how to keep up with new gadgets but how to advance and develop a mojo mindset to pursue their interests and solidify future work options. This paper investigates a globally pioneering network of mojo journalism, the Mojo Community, that consists of journalists and practitioners dedicated to creating multimedia content using mobile technologies. The study is based on empirical data from interviews with and the observation of the participants of the community over a two-year period. The analysis draws on Wenger’s concept of “communities of practice” to explore the domain, structure, and role of this communal formation for innovation and change in journalistic practices. The community’s core group is comprised of journalists mainly affiliated with legacy broadcast organizations and with a particular interest in and extensive knowledge of mobile technologies. The participants perceive their engagement with the community as a way of meeting the challenges of organizational reluctance to change, fast-evolving technological advancements, and uncertain job prospects.},
note = {Pre SFI},
keywords = {community of practice, digital culture, mobile content creation, Mobile journalism, mobile technologies, mojo, mojo community, smartphone reporting, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Mobile journalism is a fast-growing area of journalistic innovation that requires new skills and work practices. Thus, a major challenge for journalists is learning not only how to keep up with new gadgets but how to advance and develop a mojo mindset to pursue their interests and solidify future work options. This paper investigates a globally pioneering network of mojo journalism, the Mojo Community, that consists of journalists and practitioners dedicated to creating multimedia content using mobile technologies. The study is based on empirical data from interviews with and the observation of the participants of the community over a two-year period. The analysis draws on Wenger’s concept of “communities of practice” to explore the domain, structure, and role of this communal formation for innovation and change in journalistic practices. The community’s core group is comprised of journalists mainly affiliated with legacy broadcast organizations and with a particular interest in and extensive knowledge of mobile technologies. The participants perceive their engagement with the community as a way of meeting the challenges of organizational reluctance to change, fast-evolving technological advancements, and uncertain job prospects. |
Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles Journal Article Morten Fjeld; Smitha Sheshadri; Shendong Zhao; Yang Cheng In: CHI 2020 Paper, pp. 1-13, 2020, (Pre SFI). @article{Fjeld2020,
title = {Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles},
author = {Morten Fjeld and Smitha Sheshadri and Shendong Zhao and Yang Cheng},
url = {https://dl.acm.org/doi/pdf/10.1145/3313831.3376272
https://www.youtube.com/watch?v=WY_T0fK5gCQ&ab_channel=ACMSIGCHI},
year = {2020},
date = {2020-04-01},
journal = {CHI 2020 Paper},
pages = {1-13},
abstract = {Mobile vocabulary learning interfaces typically present material only in auditory and visual channels, underutilizing the haptic modality. We explored haptic-integrated learning by adding free-form digital annotation to mobile vocabulary learning interfaces. Through a series of pilot studies, we identified three design factors: annotation mode, presentation sequence, and vibrotactile feedback, that influence recall in haptic-integrated vocabulary interfaces. These factors were then evaluated in a within-subject comparative study using a digital flashcard interface as baseline. Results using a 84-item vocabulary showed that the 'whole word' annotation mode is highly effective, yielding a 24.21% increase in immediate recall scores and a 30.36% increase in the 7-day delayed scores. Effects of presentation sequence and vibrotactile feedback were more transient; they affected the results of immediate tests, but not the delayed tests. We discuss the implications of these factors for designing future mobile learning applications.},
note = {Pre SFI},
keywords = {Haptics for Learning, Intersensory reinforced learning, Mobile Vocabulary Learning, Motoric Engagement, Multimodal Learning, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Mobile vocabulary learning interfaces typically present material only in auditory and visual channels, underutilizing the haptic modality. We explored haptic-integrated learning by adding free-form digital annotation to mobile vocabulary learning interfaces. Through a series of pilot studies, we identified three design factors: annotation mode, presentation sequence, and vibrotactile feedback, that influence recall in haptic-integrated vocabulary interfaces. These factors were then evaluated in a within-subject comparative study using a digital flashcard interface as baseline. Results using a 84-item vocabulary showed that the 'whole word' annotation mode is highly effective, yielding a 24.21% increase in immediate recall scores and a 30.36% increase in the 7-day delayed scores. Effects of presentation sequence and vibrotactile feedback were more transient; they affected the results of immediate tests, but not the delayed tests. We discuss the implications of these factors for designing future mobile learning applications. |
FAIR Digital Objects for Science: From Data Pieces to Actionable Knowledge Units Journal Article Koenraad de Smedt; D Koureas; P Wittenberg In: 2020, (Pre SFI). @article{deSmedt2020,
title = {FAIR Digital Objects for Science: From Data Pieces to Actionable Knowledge Units},
author = {Koenraad de Smedt and D Koureas and P Wittenberg},
url = {https://ideas.repec.org/a/gam/jpubli/v8y2020i2p21-d344422.html},
year = {2020},
date = {2020-04-01},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {article}
}
|
With a little help from my peers: depicting social norms in a recommender interface to promote energy conservation Conference Alain D. Starke; Martijn C. Willemsen; Chris C.P. Snijders no. March 2020, 2020. @conference{Starke2020b,
title = {With a little help from my peers: depicting social norms in a recommender interface to promote energy conservation},
author = {Alain D. Starke and Martijn C. Willemsen and Chris C.P. Snijders},
url = {https://dl.acm.org/doi/10.1145/3377325.3377518},
doi = {10.1145/3377325.3377518},
year = {2020},
date = {2020-03-17},
number = {March 2020},
pages = {1-11},
abstract = {How can recommender interfaces help users to adopt new behaviors? In the behavioral change literature, nudges and norms are studied to understand how to convince people to take action (e.g. towel re-use is boosted when stating that `75% of hotel guests' do so), but what is advised is typically not personalized. Most recommender systems know what to recommend in a personalized way, but not much research has considered how to present such advice to help users to change their current habits. We examine the value of presenting normative messages (e.g. `75% of users do X') based on actual user data in a personalized energy recommender interface called `Saving Aid'. In a study among 207 smart thermostat owners, we compared three different normative explanations (`Global', `Similar', and `Experienced' norm rates) to a non-social baseline (`kWh savings'). Although none of the norms increased the total number of chosen measures directly, we show evidence that the effect of norms seems to be mediated by the perceived feasibility of the measures. Also, how norms were presented (i.e. specific source, adoption rate) affected which measures were chosen within our Saving Aid interface.},
keywords = {Decision Support System, Human computer interaction, Human-centered computing, Information Systems, User studies},
pubstate = {published},
tppubtype = {conference}
}
How can recommender interfaces help users to adopt new behaviors? In the behavioral change literature, nudges and norms are studied to understand how to convince people to take action (e.g. towel re-use is boosted when stating that `75% of hotel guests' do so), but what is advised is typically not personalized. Most recommender systems know what to recommend in a personalized way, but not much research has considered how to present such advice to help users to change their current habits. We examine the value of presenting normative messages (e.g. `75% of users do X') based on actual user data in a personalized energy recommender interface called `Saving Aid'. In a study among 207 smart thermostat owners, we compared three different normative explanations (`Global', `Similar', and `Experienced' norm rates) to a non-social baseline (`kWh savings'). Although none of the norms increased the total number of chosen measures directly, we show evidence that the effect of norms seems to be mediated by the perceived feasibility of the measures. Also, how norms were presented (i.e. specific source, adoption rate) affected which measures were chosen within our Saving Aid interface. |
Towards a Framework for Visual Intelligence in Service Robotics: Epistemic Requirements and Gap Analysis Journal Article Agnese Chiatti; Enrico Motta; Enrico Daga In: Proceedings of the 17th International Conference on Principles of Knowledge Representation and Reasoning (KR 2020), pp. 905–916, 2020, (Pre SFI). @article{Chiatti2020,
title = {Towards a Framework for Visual Intelligence in Service Robotics: Epistemic Requirements and Gap Analysis},
author = {Agnese Chiatti and Enrico Motta and Enrico Daga},
url = {https://arxiv.org/ftp/arxiv/papers/2003/2003.06171.pdf},
year = {2020},
date = {2020-03-13},
journal = {Proceedings of the 17th International Conference on Principles of Knowledge Representation and Reasoning (KR 2020)},
pages = {905–916},
abstract = {A key capability required by service robots operating in real-world, dynamic environments is that of Visual Intelligence, i.e., the ability to use their vision system, reasoning components and background knowledge to make sense of their environment. In this paper, we analyze the epistemic requirements for Visual Intelligence, both in a top-down fashion, using existing frameworks for human-like Visual Intelligence in the literature, and from the bottom up, based on the errors emerging from object recognition trials in a real-world robotic scenario. Finally, we use these requirements to evaluate current knowledge bases for Service Robotics and to identify gaps in the support they provide for Visual Intelligence. These gaps provide the basis of a research agenda for developing more effective knowledge representations for Visual Intelligence.},
note = {Pre SFI},
keywords = {WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
A key capability required by service robots operating in real-world, dynamic environments is that of Visual Intelligence, i.e., the ability to use their vision system, reasoning components and background knowledge to make sense of their environment. In this paper, we analyze the epistemic requirements for Visual Intelligence, both in a top-down fashion, using existing frameworks for human-like Visual Intelligence in the literature, and from the bottom up, based on the errors emerging from object recognition trials in a real-world robotic scenario. Finally, we use these requirements to evaluate current knowledge bases for Service Robotics and to identify gaps in the support they provide for Visual Intelligence. These gaps provide the basis of a research agenda for developing more effective knowledge representations for Visual Intelligence. |
Named entity extraction for knowledge graphs: A literature overview Journal Article Tareq Al-Moslmi; Marc Gallofré Ocaña; Andreas Lothe Opdahl; Csaba Veres
In: IEEE Access, vol. 8, pp. 32862-32881, 2020, (Pre SFI). @article{Al-Moslmi2020,
title = {Named entity extraction for knowledge graphs: A literature overview},
author = {Tareq Al-Moslmi and Marc Gallofré Ocaña and Andreas Lothe Opdahl and Csaba Veres
},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8999622},
doi = {10.1109/ACCESS.2020.2973928},
year = {2020},
date = {2020-02-14},
journal = {IEEE Access},
volume = {8},
pages = {32862-32881},
abstract = {An enormous amount of digital information is expressed as natural-language (NL) text that is not easily processable by computers. Knowledge Graphs (KG) offer a widely used format for representing information in computer-processable form. Natural Language Processing (NLP) is therefore needed for mining (or lifting) knowledge graphs from NL texts. A central part of the problem is to extract the named entities in the text. The paper presents an overview of recent advances in this area, covering: Named Entity Recognition (NER), Named Entity Disambiguation (NED), and Named Entity Linking (NEL). We comment that many approaches to NED and NEL are based on older approaches to NER and need to leverage the outputs of state-of-the-art NER systems. There is also a need for standard methods to evaluate and compare named-entity extraction approaches. We observe that NEL has recently moved from being stepwise and isolated into an integrated process along two dimensions: the first is that previously sequential steps are now being integrated into end-to-end processes, and the second is that entities that were previously analysed in isolation are now being lifted in each other's context. The current culmination of these trends are the deep-learning approaches that have recently reported promising results.},
note = {Pre SFI},
keywords = {Knowledge graphs, Named-entity disambiguation, Named-entity extraction, Named-entity linking, Named-entity recognition, Natural-language processing, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
An enormous amount of digital information is expressed as natural-language (NL) text that is not easily processable by computers. Knowledge Graphs (KG) offer a widely used format for representing information in computer-processable form. Natural Language Processing (NLP) is therefore needed for mining (or lifting) knowledge graphs from NL texts. A central part of the problem is to extract the named entities in the text. The paper presents an overview of recent advances in this area, covering: Named Entity Recognition (NER), Named Entity Disambiguation (NED), and Named Entity Linking (NEL). We comment that many approaches to NED and NEL are based on older approaches to NER and need to leverage the outputs of state-of-the-art NER systems. There is also a need for standard methods to evaluate and compare named-entity extraction approaches. We observe that NEL has recently moved from being stepwise and isolated into an integrated process along two dimensions: the first is that previously sequential steps are now being integrated into end-to-end processes, and the second is that entities that were previously analysed in isolation are now being lifted in each other's context. The current culmination of these trends are the deep-learning approaches that have recently reported promising results. |
Web Table Extraction, Retrieval, and Augmentation: A Survey Journal Article Shuo Zhang; Krisztian Balog In: ACM Transactions on Intelligent Systems and Technology (TIST), vol. 11, no. 2, pp. 1-35, 2020, (Pre SFI). @article{Zhang2020,
title = {Web Table Extraction, Retrieval, and Augmentation: A Survey},
author = {Shuo Zhang and Krisztian Balog},
url = {https://arxiv.org/pdf/2002.00207.pdf},
year = {2020},
date = {2020-02-01},
journal = {ACM Transactions on Intelligent Systems and Technology (TIST)},
volume = {11},
number = {2},
pages = {1-35},
abstract = {Tables are a powerful and popular tool for organizing and manipulating data. A vast number of tables can be found on the Web, which represents a valuable knowledge resource. The objective of this survey is to synthesize and present two decades of research on web tables. In particular, we organize existing literature into six main categories of information access tasks: table extraction, table interpretation, table search, question answering, knowledge base augmentation, and table augmentation. For each of these tasks, we identify and describe seminal approaches, present relevant resources, and point out interdependencies among the different tasks.},
note = {Pre SFI},
keywords = {Table augmentation, Table extraction, Table interpretation, Table mining, Table retrieval, Table search, WP2: User Modeling Personalization and Engagement},
pubstate = {published},
tppubtype = {article}
}
Tables are a powerful and popular tool for organizing and manipulating data. A vast number of tables can be found on the Web, which represents a valuable knowledge resource. The objective of this survey is to synthesize and present two decades of research on web tables. In particular, we organize existing literature into six main categories of information access tasks: table extraction, table interpretation, table search, question answering, knowledge base augmentation, and table augmentation. For each of these tasks, we identify and describe seminal approaches, present relevant resources, and point out interdependencies among the different tasks. |
Morphological filter detector for image forensics applications Journal Article G. Boato; Duc-Tien Dang Nguyen; F.G.B. De Natale In: IEEE Access, vol. 8, pp. 13549-13560, 2020, (Pre SFI). @article{Boato2020,
title = {Morphological filter detector for image forensics applications},
author = {G. Boato and Duc-Tien Dang Nguyen and F.G.B. De Natale},
url = {https://www.researchgate.net/publication/338524511_Morphological_Filter_Detector_for_Image_Forensics_Applications},
doi = {10.1109/ACCESS.2020.2965745},
year = {2020},
date = {2020-01-01},
journal = {IEEE Access},
volume = {8},
pages = {13549-13560},
abstract = {Mathematical morphology provides a large set of powerful non-linear image operators, widely used for feature extraction, noise removal or image enhancement. Although morphological filters might be used to remove artifacts produced by image manipulations, both on binary and graylevel documents, little effort has been spent towards their forensic identification. In this paper we propose a non-trivial extension of a deterministic approach originally detecting erosion and dilation of binary images. The proposed approach operates on grayscale images and is robust to image compression and other typical attacks. When the image is attacked the method looses its deterministic nature and uses a properly trained SVM classifier, using the original detector as a feature extractor. Extensive tests demonstrate that the proposed method guarantees very high accuracy in filtering detection, providing 100% accuracy in discriminating the presence and the type of morphological filter in raw images of three different datasets. The achieved accuracy is also good after JPEG compression, equal or above 76.8% on all datasets for quality factors above 80. The proposed approach is also able to determine the adopted structuring element for moderate compression factors. Finally, it is robust against noise addition and it can distinguish morphological filter from other filters.},
note = {Pre SFI},
keywords = {WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {article}
}
Mathematical morphology provides a large set of powerful non-linear image operators, widely used for feature extraction, noise removal or image enhancement. Although morphological filters might be used to remove artifacts produced by image manipulations, both on binary and graylevel documents, little effort has been spent towards their forensic identification. In this paper we propose a non-trivial extension of a deterministic approach originally detecting erosion and dilation of binary images. The proposed approach operates on grayscale images and is robust to image compression and other typical attacks. When the image is attacked the method looses its deterministic nature and uses a properly trained SVM classifier, using the original detector as a feature extractor. Extensive tests demonstrate that the proposed method guarantees very high accuracy in filtering detection, providing 100% accuracy in discriminating the presence and the type of morphological filter in raw images of three different datasets. The achieved accuracy is also good after JPEG compression, equal or above 76.8% on all datasets for quality factors above 80. The proposed approach is also able to determine the adopted structuring element for moderate compression factors. Finally, it is robust against noise addition and it can distinguish morphological filter from other filters. |
Beyond “one-size-fits-all” platforms: Applying Campbell's paradigm to test personalized energy advice in the Netherlands Journal Article Alain D. Starke; Martijn C. Willemsen; Chris C.P. Snijders In: vol. 59, no. January 2020, pp. 1-12, 2020. @article{Starke2020,
title = {Beyond “one-size-fits-all” platforms: Applying Campbell's paradigm to test personalized energy advice in the Netherlands},
author = {Alain D. Starke and Martijn C. Willemsen and Chris C.P. Snijders},
url = {https://www.sciencedirect.com/science/article/pii/S2214629618302615?via%3Dihub},
doi = {10.1016/j.erss.2019.101311},
year = {2020},
date = {2020-01-01},
volume = {59},
number = {January 2020},
pages = {1-12},
abstract = {When analyzing ways in which people save energy, most researchers and policy makers conceptually differentiate between curtailment (e.g. unplugging chargers) and efficiency measures (e.g. installing PV cells). However, such a two-dimensional approach is suboptimal from both a conceptual and policy perspective, as it does not consider individual differences that determine energy-saving behavior. We propose a different, one-dimensional approach, applying Campbell's Paradigm through the Rasch model, in which both curtailment and efficiency measures are intermixed on a single scale and ordered according to their behavioral costs. By matching these behavioral costs to individual energy-saving attitudes, we investigate to what extent attitude-tailored energy-saving advice can help consumers to save energy.
We present the results of two studies. The first study (N = 263) reliably calibrated a one-dimensional Rasch scale that consists of 79 energy-saving measures, suitable for advice. The second study employed this scale to investigate how users (N = 196) evaluate attitude-tailored energy-saving advice in a web-based energy recommender system. Results indicate that Rasch-based recommendations can be used to effectively tailor energy-saving advice and that such attitude-tailored advice is more adequate than a number of non-personalized approaches.},
keywords = {Conservation advice, Energy efficiency, Rasch model, Recommender systems},
pubstate = {published},
tppubtype = {article}
}
When analyzing ways in which people save energy, most researchers and policy makers conceptually differentiate between curtailment (e.g. unplugging chargers) and efficiency measures (e.g. installing PV cells). However, such a two-dimensional approach is suboptimal from both a conceptual and policy perspective, as it does not consider individual differences that determine energy-saving behavior. We propose a different, one-dimensional approach, applying Campbell's Paradigm through the Rasch model, in which both curtailment and efficiency measures are intermixed on a single scale and ordered according to their behavioral costs. By matching these behavioral costs to individual energy-saving attitudes, we investigate to what extent attitude-tailored energy-saving advice can help consumers to save energy.
We present the results of two studies. The first study (N = 263) reliably calibrated a one-dimensional Rasch scale that consists of 79 energy-saving measures, suitable for advice. The second study employed this scale to investigate how users (N = 196) evaluate attitude-tailored energy-saving advice in a web-based energy recommender system. Results indicate that Rasch-based recommendations can be used to effectively tailor energy-saving advice and that such attitude-tailored advice is more adequate than a number of non-personalized approaches. |
2019
|
Distributed Readiness Citizenship: A Realistic, Normative Concept for Citizens’ Public Connection. Journal Article Hallvard Moe In: Communication Theory, 2019, ISSN: 1050–3293, (Pre SFI). @article{Moe2019,
title = {Distributed Readiness Citizenship: A Realistic, Normative Concept for Citizens’ Public Connection.},
author = {Hallvard Moe},
url = {https://bora.uib.no/bora-xmlui/bitstream/handle/1956/23098/qtz016.pdf?sequence=3&isAllowed=y},
doi = {10.1093/ct/qtz016},
issn = {1050–3293},
year = {2019},
date = {2019-12-09},
journal = {Communication Theory},
abstract = {This article argues that our view of citizens as miserably failing to maintain their role in democracy is problematic, and that the problems stem from the “informed citizen” ideal: it is too demanding, but also misses the target. The article proposes an alternative normative concept for citizens’ public connection: distributed readiness citizenship. The concept highlights how the state of being prepared to act is more important than levels of measurable political knowledge. Readiness is crucial to finding enough information and relevant cues, and it cannot be assessed based on individual citizens in isolation, but should be considered as distributed, and embodied in citizens’ social networks, with a division of labor. With such a conceptualization, we are better equipped to evaluate existing conditions, judge the impact of populism and propaganda, and figure out how to improve the chances for those less well-off to participate in democracy.},
note = {Pre SFI},
keywords = {Citizenship, Deliberative, Democracy, Democratic Realism, Normative Theory, Public Sphere, WP1: Understanding Media Experiences},
pubstate = {published},
tppubtype = {article}
}
This article argues that our view of citizens as miserably failing to maintain their role in democracy is problematic, and that the problems stem from the “informed citizen” ideal: it is too demanding, but also misses the target. The article proposes an alternative normative concept for citizens’ public connection: distributed readiness citizenship. The concept highlights how the state of being prepared to act is more important than levels of measurable political knowledge. Readiness is crucial to finding enough information and relevant cues, and it cannot be assessed based on individual citizens in isolation, but should be considered as distributed, and embodied in citizens’ social networks, with a division of labor. With such a conceptualization, we are better equipped to evaluate existing conditions, judge the impact of populism and propaganda, and figure out how to improve the chances for those less well-off to participate in democracy. |
Analogical News Angles from Text Similarity Conference Bjørnar Tessem Artificial Intelligence XXXVI, no. 11927, Springer International Publishing, 2019, (Pre SFI). @conference{Tessem2019b,
title = {Analogical News Angles from Text Similarity},
author = {Bjørnar Tessem},
editor = {Max Bramer and Miltos Petridis},
url = {https://bora.uib.no/bora-xmlui/bitstream/handle/1956/22473/SGAI_2019.pdf?sequence=4&isAllowed=y},
doi = {https://doi.org/10.1007/978-3-030-34885-4_35},
year = {2019},
date = {2019-11-19},
booktitle = {Artificial Intelligence XXXVI},
number = {11927},
pages = {449–455},
publisher = {Springer International Publishing},
abstract = {The paper presents an algorithm providing creativity support to journalists. It suggests analogical transfer of news angles from reports written about different events than the one the journalist is working on. The problem is formulated as a matching problem, where news reports with similar wordings from two events are matched, and unmatched reports from previous cases are selected as candidates for a news angle transfer. The approach is based on document similarity measures for matching and selection of transferable candidates. The algorithm has been tested on a small data set and show that the concept may be viable, but needs more exploration and evaluation in journalistic practice.},
note = {Pre SFI},
keywords = {Analogical reasoning, Computational creativity, Document similarity, Journalism, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {conference}
}
The paper presents an algorithm providing creativity support to journalists. It suggests analogical transfer of news angles from reports written about different events than the one the journalist is working on. The problem is formulated as a matching problem, where news reports with similar wordings from two events are matched, and unmatched reports from previous cases are selected as candidates for a news angle transfer. The approach is based on document similarity measures for matching and selection of transferable candidates. The algorithm has been tested on a small data set and show that the concept may be viable, but needs more exploration and evaluation in journalistic practice. |
Lexicon information in neural sentiment analysis: a multi-task learning approach Conference Jeremy Barnes; Samia Touileb; Lilja Øvrelid; Erik Velldal Linköping University Electronic Press, 2019, (Pre SFI). @conference{Barnes2019,
title = {Lexicon information in neural sentiment analysis: a multi-task learning approach},
author = {Jeremy Barnes and Samia Touileb and Lilja Øvrelid and Erik Velldal},
url = {https://www.aclweb.org/anthology/W19-6119.pdf},
year = {2019},
date = {2019-10-01},
journal = {Proceedings of the 22nd Nordic Conference on Computational Linguistics (NoDaLiDa)},
pages = {175–186},
publisher = {Linköping University Electronic Press},
abstract = {This paper explores the use of multi-task learning (MTL) for incorporating external knowledge in neural models. Specifically, we show how MTL can enable a BiLSTM sentiment classifier to incorporate information from sentiment lexicons. Our MTL set-up is shown to improve model performance (compared to a single-task set-up) on both English and Norwegian sentence-level sentiment datasets. The paper also introduces a new sentiment lexicon for Norwegian.},
note = {Pre SFI},
keywords = {WP5: Norwegian Language Technologies},
pubstate = {published},
tppubtype = {conference}
}
This paper explores the use of multi-task learning (MTL) for incorporating external knowledge in neural models. Specifically, we show how MTL can enable a BiLSTM sentiment classifier to incorporate information from sentiment lexicons. Our MTL set-up is shown to improve model performance (compared to a single-task set-up) on both English and Norwegian sentence-level sentiment datasets. The paper also introduces a new sentiment lexicon for Norwegian. |
Learning to Recommend Similar Items from Human Judgements Journal Article Christoph Trattner; Dietmar Jannach In: User Modeling and User-Adapted Interaction Journal, pp. 1-50, 2019, (Pre SFI). @article{Trattner2020,
title = {Learning to Recommend Similar Items from Human Judgements},
author = {Christoph Trattner and Dietmar Jannach},
url = {https://www.christophtrattner.info/pubs/UMUAI2019.pdf},
doi = {10.1007/s11257-019-09245-4},
year = {2019},
date = {2019-09-20},
journal = {User Modeling and User-Adapted Interaction Journal},
pages = {1-50},
abstract = {Similar item recommendations—a common feature of many Web sites—point users to other interesting objects given a currently inspected item. A common way of computing such recommendations is to use a similarity function, which expresses how much alike two given objects are. Such similarity functions are usually designed based on the specifics of the given application domain. In this work, we explore how such functions can be learned from human judgments of similarities between objects, using two domains of “quality and taste”—cooking recipe and movie recommendation—as guiding scenarios. In our approach, we first collect a few thousand pairwise similarity assessments with the help of crowdworkers. Using these data, we then train different machine learning models that can be used as similarity functions to compare objects. Offline analyses reveal for both application domains that models that combine different types of item characteristics are the best predictors for human-perceived similarity. To further validate the usefulness of the learned models, we conducted additional user studies. In these studies, we exposed participants to similar item recommendations using a set of models that were trained with different feature subsets. The results showed that the combined models that exhibited the best offline prediction performance led to the highest user-perceived similarity, but also to recommendations that were considered useful by the participants, thus confirming the feasibility of our approach.},
note = {Pre SFI},
keywords = {Content-basedrecommender systems, Similar item recommendations, Similarity measures, User studies},
pubstate = {published},
tppubtype = {article}
}
Similar item recommendations—a common feature of many Web sites—point users to other interesting objects given a currently inspected item. A common way of computing such recommendations is to use a similarity function, which expresses how much alike two given objects are. Such similarity functions are usually designed based on the specifics of the given application domain. In this work, we explore how such functions can be learned from human judgments of similarities between objects, using two domains of “quality and taste”—cooking recipe and movie recommendation—as guiding scenarios. In our approach, we first collect a few thousand pairwise similarity assessments with the help of crowdworkers. Using these data, we then train different machine learning models that can be used as similarity functions to compare objects. Offline analyses reveal for both application domains that models that combine different types of item characteristics are the best predictors for human-perceived similarity. To further validate the usefulness of the learned models, we conducted additional user studies. In these studies, we exposed participants to similar item recommendations using a set of models that were trained with different feature subsets. The results showed that the combined models that exhibited the best offline prediction performance led to the highest user-perceived similarity, but also to recommendations that were considered useful by the participants, thus confirming the feasibility of our approach. |
Capturing themed evidence, a hybrid approach Conference Enrico Daga; Enrico Motta roceedings of the 10th International Conference on Knowledge Capture, 2019, (Pre SFI). @conference{Daga2019,
title = {Capturing themed evidence, a hybrid approach},
author = {Enrico Daga and Enrico Motta},
url = {https://dl.acm.org/doi/pdf/10.1145/3360901.3364415},
year = {2019},
date = {2019-09-01},
booktitle = {roceedings of the 10th International Conference on Knowledge Capture},
pages = {93-100},
abstract = {The task of identifying pieces of evidence in texts is of fundamental importance in supporting qualitative studies in various domains, especially in the humanities. In this paper, we coin the expression themed evidence, to refer to (direct or indirect) traces of a fact or situation relevant to a theme of interest and study the problem of identifying them in texts. We devise a generic framework aimed at capturing themed evidence in texts based on a hybrid approach, combining statistical natural language processing, background knowledge, and Semantic Web technologies. The effectiveness of the method is demonstrated on a case study of a digital humanities database aimed at collecting and curating a repository of evidence of experiences of listening to music. Extensive experiments demonstrate that our hybrid approach outperforms alternative solutions. We also evidence its generality by testing it on a different use case in the digital humanities.},
note = {Pre SFI},
keywords = {DBpedia, Hybrid method, Information extraction, Themed evidence, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {conference}
}
The task of identifying pieces of evidence in texts is of fundamental importance in supporting qualitative studies in various domains, especially in the humanities. In this paper, we coin the expression themed evidence, to refer to (direct or indirect) traces of a fact or situation relevant to a theme of interest and study the problem of identifying them in texts. We devise a generic framework aimed at capturing themed evidence in texts based on a hybrid approach, combining statistical natural language processing, background knowledge, and Semantic Web technologies. The effectiveness of the method is demonstrated on a case study of a digital humanities database aimed at collecting and curating a repository of evidence of experiences of listening to music. Extensive experiments demonstrate that our hybrid approach outperforms alternative solutions. We also evidence its generality by testing it on a different use case in the digital humanities. |
Hierarchical attention networks to learn latent aspect embeddings for fake news detection Conference Rahul Mishra; Vinay Setty Proceedings of the 2019 acm sigir international conference on theory of information retrieval, Association for Computing Machinery, New York, 2019, (Pre SFI). @conference{Mishra2019,
title = {Hierarchical attention networks to learn latent aspect embeddings for fake news detection},
author = {Rahul Mishra and Vinay Setty},
url = {https://dl.acm.org/doi/pdf/10.1145/3341981.3344229},
doi = {10.1145/3341981.3344229},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 2019 acm sigir international conference on theory of information retrieval},
pages = {197–204},
publisher = {Association for Computing Machinery},
address = {New York},
abstract = {Recently false claims and misinformation have become rampant in the web, affecting election outcomes, societies and economies. Consequently, fact checking websites such as snopes.com and politifact.com are becoming popular. However, these websites require expert analysis which is slow and not scalable. Many recent works try to solve these challenges using machine learning models trained on a variety of features and a rich lexicon or more recently, deep neural networks to avoid feature engineering. In this paper, we propose hierarchical deep attention networks to learn embeddings for various latent aspects of news. Contrary to existing solutions which only apply word-level self-attention, our model jointly learns the latent aspect embeddings for classifying false claims by applying hierarchical attention. Using several manually annotated high quality datasets such as Politifact, Snopes and Fever we show that these learned aspect embeddings are strong predictors of false claims. We show that latent aspect embeddings learned from attention mechanisms improve the accuracy of false claim detection by up to 13.5% in terms of Macro F1 compared to a state-of-the-art attention mechanism guided by claim-text DeClarE. We also extract and visualize the evidence from the external articles which supports or disproves the claims},
note = {Pre SFI},
keywords = {Fake news, Hierarchical attention, Latent aspect embeddings, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {conference}
}
Recently false claims and misinformation have become rampant in the web, affecting election outcomes, societies and economies. Consequently, fact checking websites such as snopes.com and politifact.com are becoming popular. However, these websites require expert analysis which is slow and not scalable. Many recent works try to solve these challenges using machine learning models trained on a variety of features and a rich lexicon or more recently, deep neural networks to avoid feature engineering. In this paper, we propose hierarchical deep attention networks to learn embeddings for various latent aspects of news. Contrary to existing solutions which only apply word-level self-attention, our model jointly learns the latent aspect embeddings for classifying false claims by applying hierarchical attention. Using several manually annotated high quality datasets such as Politifact, Snopes and Fever we show that these learned aspect embeddings are strong predictors of false claims. We show that latent aspect embeddings learned from attention mechanisms improve the accuracy of false claim detection by up to 13.5% in terms of Macro F1 compared to a state-of-the-art attention mechanism guided by claim-text DeClarE. We also extract and visualize the evidence from the external articles which supports or disproves the claims |