2023
|
Trustworthy Journalism Through AI Journal Article Opdahl, Andreas L.; Tessem, Bjørnar; Dang-Nguyen, Duc-Tien; Motta, Enrico; Setty, Vinay; Throndsen, Eivind; Tverberg, Are; Trattner, Christoph In: Data & Knowledge Engineering (DKE), Elsevier, 2023. @article{Opdahl2023,
title = {Trustworthy Journalism Through AI},
author = {Andreas L. Opdahl and Bjørnar Tessem and Duc-Tien Dang-Nguyen and Enrico Motta and Vinay Setty and Eivind Throndsen and Are Tverberg and Christoph Trattner},
url = {https://mediafutures.no/1-s2-0-s0169023x23000423-main/},
year = {2023},
date = {2023-04-29},
urldate = {2023-04-29},
journal = {Data & Knowledge Engineering (DKE), Elsevier},
abstract = {Quality journalism has become more important than ever due to the need for quality and trustworthy media outlets that can provide accurate information to the public and help to address and counterbalance the wide and rapid spread of disinformation. At the same time, quality journalism is under pressure due to loss of revenue and competition from alternative information providers. This vision paper discusses how recent advances in Artificial Intelligence (AI), and in Machine Learning (ML) in particular, can be harnessed to support efficient production of high-quality journalism. From a news consumer perspective, the key parameter here concerns the degree of trust that is engendered by quality news production. For this reason, the paper will discuss how AI techniques can be applied to all aspects of news, at all stages of its production cycle, to increase trust.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Quality journalism has become more important than ever due to the need for quality and trustworthy media outlets that can provide accurate information to the public and help to address and counterbalance the wide and rapid spread of disinformation. At the same time, quality journalism is under pressure due to loss of revenue and competition from alternative information providers. This vision paper discusses how recent advances in Artificial Intelligence (AI), and in Machine Learning (ML) in particular, can be harnessed to support efficient production of high-quality journalism. From a news consumer perspective, the key parameter here concerns the degree of trust that is engendered by quality news production. For this reason, the paper will discuss how AI techniques can be applied to all aspects of news, at all stages of its production cycle, to increase trust. |
2021
|
Responsible media technology and AI: challenges and research directions Journal Article Trattner, Christoph; Jannach, Dietmar; Motta, Enrico; Meijer, Irene Costera; Diakopoulos, Nicholas; Elahi, Mehdi; Opdahl, Andreas L.; Tessem, Bjørnar; Borch, Njål; Fjeld, Morten; Øvrelid, Lilja; Smedt, Koenraad De; Moe, Hallvard In: AI and Ethics, 2021. @article{cristin2000622,
title = {Responsible media technology and AI: challenges and research directions},
author = {Christoph Trattner and Dietmar Jannach and Enrico Motta and Irene Costera Meijer and Nicholas Diakopoulos and Mehdi Elahi and Andreas L. Opdahl and Bjørnar Tessem and Njål Borch and Morten Fjeld and Lilja Øvrelid and Koenraad De Smedt and Hallvard Moe},
url = {https://app.cristin.no/results/show.jsf?id=2000622, Cristin
https://link.springer.com/content/pdf/10.1007/s43681-021-00126-4.pdf},
doi = {https://doi.org/10.1007/s43681-021-00126-4},
year = {2021},
date = {2021-12-20},
urldate = {2021-12-20},
journal = {AI and Ethics},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
WP3 2021 M3.1 Report The industrial expectations to, needs from and wishes for the work package Technical Report Tverberg, Are; Agasøster, Ingrid; Grønbæck, Mads; Monsen, Marius; Strand, Robert; Eikeland, Kristian; Throndsen, Eivind; Westvang, Lars; Knudsen, Tove B.; Fiskerud, Eivind; Skår, Rune; Stoppel, Sergej; Berven, Arne; Pedersen, Glenn Skare; Macklin, Paul; Cuomo, Kenneth; Vredenberg, Loek; Tolonen, Kristian; Opdahl, Andreas L.; Tessem, Bjørnar; Veres, Csaba; Dang-Nguyen, Duc-Tien; Motta, Enrico; Setty, Vinay Jayarama University of Bergen, MediaFutures 2021. @techreport{Tverberg2021,
title = {WP3 2021 M3.1 Report The industrial expectations to, needs from and wishes for the work package},
author = {Are Tverberg and Ingrid Agasøster and Mads Grønbæck and Marius Monsen and Robert Strand and Kristian Eikeland and Eivind Throndsen and Lars Westvang and Tove B. Knudsen and Eivind Fiskerud and Rune Skår and Sergej Stoppel and Arne Berven and Glenn Skare Pedersen and Paul Macklin and Kenneth Cuomo and Loek Vredenberg and Kristian Tolonen and Andreas L. Opdahl and Bjørnar Tessem and Csaba Veres and Duc-Tien Dang-Nguyen and Enrico Motta and Vinay Jayarama Setty},
url = {https://mediafutures.no/wp3-q2-2021-m3-1-report-by-the-industrial-partners-final-2/},
year = {2021},
date = {2021-07-25},
urldate = {2021-07-25},
institution = {University of Bergen, MediaFutures},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
|
2020
|
AI-KG: an automatically generated knowledge graph of artificial intelligence Conference Dessì, Danilo; Osborne, Francesco; Recupero, Diego Reforgiato; Buscaldi, Davide; Motta, Enrico; Sack, Harald nternational Semantic Web Conference, Springer, 2020, (Pre SFI). @conference{Dessì2020,
title = {AI-KG: an automatically generated knowledge graph of artificial intelligence},
author = {Danilo Dessì and Francesco Osborne and Diego Reforgiato Recupero and Davide Buscaldi and Enrico Motta and Harald Sack},
url = {https://www.researchgate.net/publication/344991487_AI-KG_an_Automatically_Generated_Knowledge_Graph_of_Artificial_Intelligence},
year = {2020},
date = {2020-11-01},
booktitle = {nternational Semantic Web Conference},
pages = {127-143},
publisher = {Springer},
abstract = {Scientific knowledge has been traditionally disseminated and preserved through research articles published in journals, conference proceedings , and online archives. However, this article-centric paradigm has been often criticized for not allowing to automatically process, categorize , and reason on this knowledge. An alternative vision is to generate a semantically rich and interlinked description of the content of research publications. In this paper, we present the Artificial Intelligence Knowledge Graph (AI-KG), a large-scale automatically generated knowledge graph that describes 820K research entities. AI-KG includes about 14M RDF triples and 1.2M reified statements extracted from 333K research publications in the field of AI, and describes 5 types of entities (tasks, methods, metrics, materials, others) linked by 27 relations. AI-KG has been designed to support a variety of intelligent services for analyzing and making sense of research dynamics, supporting researchers in their daily job, and helping to inform decision-making in funding bodies and research policymakers. AI-KG has been generated by applying an automatic pipeline that extracts entities and relationships using three tools: DyGIE++, Stanford CoreNLP, and the CSO Classifier. It then integrates and filters the resulting triples using a combination of deep learning and semantic technologies in order to produce a high-quality knowledge graph. This pipeline was evaluated on a manually crafted gold standard, yielding competitive results. AI-KG is available under CC BY 4.0 and can be downloaded as a dump or queried via a SPARQL endpoint.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Scientific knowledge has been traditionally disseminated and preserved through research articles published in journals, conference proceedings , and online archives. However, this article-centric paradigm has been often criticized for not allowing to automatically process, categorize , and reason on this knowledge. An alternative vision is to generate a semantically rich and interlinked description of the content of research publications. In this paper, we present the Artificial Intelligence Knowledge Graph (AI-KG), a large-scale automatically generated knowledge graph that describes 820K research entities. AI-KG includes about 14M RDF triples and 1.2M reified statements extracted from 333K research publications in the field of AI, and describes 5 types of entities (tasks, methods, metrics, materials, others) linked by 27 relations. AI-KG has been designed to support a variety of intelligent services for analyzing and making sense of research dynamics, supporting researchers in their daily job, and helping to inform decision-making in funding bodies and research policymakers. AI-KG has been generated by applying an automatic pipeline that extracts entities and relationships using three tools: DyGIE++, Stanford CoreNLP, and the CSO Classifier. It then integrates and filters the resulting triples using a combination of deep learning and semantic technologies in order to produce a high-quality knowledge graph. This pipeline was evaluated on a manually crafted gold standard, yielding competitive results. AI-KG is available under CC BY 4.0 and can be downloaded as a dump or queried via a SPARQL endpoint. |
Analysis and design of computational news angles Journal Article Motta, Enrico; Daga, Enrico; Opdahl, Andreas L.; Tessem, Bjørnar In: IEEE Access, vol. 8, pp. 120613-120626, 2020, (Pre SFI). @article{Motta2020,
title = {Analysis and design of computational news angles},
author = {Enrico Motta and Enrico Daga and Andreas L. Opdahl and Bjørnar Tessem},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=9127417},
doi = {10.1109/ACCESS.2020.3005513},
year = {2020},
date = {2020-06-29},
urldate = {2020-06-29},
journal = {IEEE Access},
volume = {8},
pages = {120613-120626},
abstract = {A key skill for a journalist is the ability to assess the newsworthiness of an event or situation. To this purpose journalists often rely on news angles, conceptual criteria that are used both i) to assess whether something is newsworthy and also ii) to shape the structure of the resulting news item. As journalism becomes increasingly computer-supported, and more and more sources of potentially newsworthy data become available in real time, it makes sense to try and equip journalistic software tools with operational versions of news angles, so that, when searching this vast data space, these tools can both identify effectively the events most relevant to the target audience, and also link them to appropriate news angles. In this paper we analyse the notion of news angle and, in particular, we i) introduce a formal framework and data schema for representing news angles and related concepts and ii) carry out a preliminary analysis and characterization of a number of commonly used news angles, both in terms of our formal model and also in terms of the computational reasoning capabilities that are needed to apply them effectively to real-world scenarios. This study provides a stepping stone towards our ultimate goal of realizing a solution capable of exploiting a library of news angles to identify potentially newsworthy events in a large journalistic data space.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
A key skill for a journalist is the ability to assess the newsworthiness of an event or situation. To this purpose journalists often rely on news angles, conceptual criteria that are used both i) to assess whether something is newsworthy and also ii) to shape the structure of the resulting news item. As journalism becomes increasingly computer-supported, and more and more sources of potentially newsworthy data become available in real time, it makes sense to try and equip journalistic software tools with operational versions of news angles, so that, when searching this vast data space, these tools can both identify effectively the events most relevant to the target audience, and also link them to appropriate news angles. In this paper we analyse the notion of news angle and, in particular, we i) introduce a formal framework and data schema for representing news angles and related concepts and ii) carry out a preliminary analysis and characterization of a number of commonly used news angles, both in terms of our formal model and also in terms of the computational reasoning capabilities that are needed to apply them effectively to real-world scenarios. This study provides a stepping stone towards our ultimate goal of realizing a solution capable of exploiting a library of news angles to identify potentially newsworthy events in a large journalistic data space. |
Towards a Framework for Visual Intelligence in Service Robotics: Epistemic Requirements and Gap Analysis Journal Article Chiatti, Agnese; Motta, Enrico; Daga, Enrico In: Proceedings of the 17th International Conference on Principles of Knowledge Representation and Reasoning (KR 2020), pp. 905–916, 2020, (Pre SFI). @article{Chiatti2020,
title = {Towards a Framework for Visual Intelligence in Service Robotics: Epistemic Requirements and Gap Analysis},
author = {Agnese Chiatti and Enrico Motta and Enrico Daga},
url = {https://arxiv.org/ftp/arxiv/papers/2003/2003.06171.pdf},
year = {2020},
date = {2020-03-13},
journal = {Proceedings of the 17th International Conference on Principles of Knowledge Representation and Reasoning (KR 2020)},
pages = {905–916},
abstract = {A key capability required by service robots operating in real-world, dynamic environments is that of Visual Intelligence, i.e., the ability to use their vision system, reasoning components and background knowledge to make sense of their environment. In this paper, we analyze the epistemic requirements for Visual Intelligence, both in a top-down fashion, using existing frameworks for human-like Visual Intelligence in the literature, and from the bottom up, based on the errors emerging from object recognition trials in a real-world robotic scenario. Finally, we use these requirements to evaluate current knowledge bases for Service Robotics and to identify gaps in the support they provide for Visual Intelligence. These gaps provide the basis of a research agenda for developing more effective knowledge representations for Visual Intelligence.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
A key capability required by service robots operating in real-world, dynamic environments is that of Visual Intelligence, i.e., the ability to use their vision system, reasoning components and background knowledge to make sense of their environment. In this paper, we analyze the epistemic requirements for Visual Intelligence, both in a top-down fashion, using existing frameworks for human-like Visual Intelligence in the literature, and from the bottom up, based on the errors emerging from object recognition trials in a real-world robotic scenario. Finally, we use these requirements to evaluate current knowledge bases for Service Robotics and to identify gaps in the support they provide for Visual Intelligence. These gaps provide the basis of a research agenda for developing more effective knowledge representations for Visual Intelligence. |
2019
|
Capturing themed evidence, a hybrid approach Conference Daga, Enrico; Motta, Enrico roceedings of the 10th International Conference on Knowledge Capture, 2019, (Pre SFI). @conference{Daga2019,
title = {Capturing themed evidence, a hybrid approach},
author = {Enrico Daga and Enrico Motta},
url = {https://dl.acm.org/doi/pdf/10.1145/3360901.3364415},
year = {2019},
date = {2019-09-01},
booktitle = {roceedings of the 10th International Conference on Knowledge Capture},
pages = {93-100},
abstract = {The task of identifying pieces of evidence in texts is of fundamental importance in supporting qualitative studies in various domains, especially in the humanities. In this paper, we coin the expression themed evidence, to refer to (direct or indirect) traces of a fact or situation relevant to a theme of interest and study the problem of identifying them in texts. We devise a generic framework aimed at capturing themed evidence in texts based on a hybrid approach, combining statistical natural language processing, background knowledge, and Semantic Web technologies. The effectiveness of the method is demonstrated on a case study of a digital humanities database aimed at collecting and curating a repository of evidence of experiences of listening to music. Extensive experiments demonstrate that our hybrid approach outperforms alternative solutions. We also evidence its generality by testing it on a different use case in the digital humanities.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
The task of identifying pieces of evidence in texts is of fundamental importance in supporting qualitative studies in various domains, especially in the humanities. In this paper, we coin the expression themed evidence, to refer to (direct or indirect) traces of a fact or situation relevant to a theme of interest and study the problem of identifying them in texts. We devise a generic framework aimed at capturing themed evidence in texts based on a hybrid approach, combining statistical natural language processing, background knowledge, and Semantic Web technologies. The effectiveness of the method is demonstrated on a case study of a digital humanities database aimed at collecting and curating a repository of evidence of experiences of listening to music. Extensive experiments demonstrate that our hybrid approach outperforms alternative solutions. We also evidence its generality by testing it on a different use case in the digital humanities. |
2018
|
AUGUR: forecasting the emergence of new research topics Conference Salatino, Angelo Antonio; Osborne, Francesco; Motta, Enrico Proceedings of the 18th ACM/IEEE on Joint Conference on Digital Libraries, 2018, (Pre SFI). @conference{Salatino2018,
title = {AUGUR: forecasting the emergence of new research topics},
author = {Angelo Antonio Salatino and Francesco Osborne and Enrico Motta},
url = {https://www.researchgate.net/publication/325492541_AUGUR_Forecasting_the_Emergence_of_New_Research_Topics},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 18th ACM/IEEE on Joint Conference on Digital Libraries},
pages = {303-312},
abstract = {Being able to rapidly recognise new research trends is strategic for many stakeholders, including universities, institutional funding bodies, academic publishers and companies. The literature presents several approaches to identifying the emergence of new research topics, which rely on the assumption that the topic is already exhibiting a certain degree of popularity and consistently referred to by a community of researchers. However, detecting the emergence of a new research area at an embryonic stage, i.e., before the topic has been consistently labelled by a community of researchers and associated with a number of publications, is still an open challenge. We address this issue by introducing Augur, a novel approach to the early detection of research topics. Augur analyses the diachronic relationships between research areas and is able to detect clusters of topics that exhibit dynamics correlated with the emergence of new research topics. Here we also present the Advanced Clique Percolation Method (ACPM), a new community detection algorithm developed specifically for supporting this task. Augur was evaluated on a gold standard of 1,408 debutant topics in the 2000-2011 interval and outperformed four alternative approaches in terms of both precision and recall.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Being able to rapidly recognise new research trends is strategic for many stakeholders, including universities, institutional funding bodies, academic publishers and companies. The literature presents several approaches to identifying the emergence of new research topics, which rely on the assumption that the topic is already exhibiting a certain degree of popularity and consistently referred to by a community of researchers. However, detecting the emergence of a new research area at an embryonic stage, i.e., before the topic has been consistently labelled by a community of researchers and associated with a number of publications, is still an open challenge. We address this issue by introducing Augur, a novel approach to the early detection of research topics. Augur analyses the diachronic relationships between research areas and is able to detect clusters of topics that exhibit dynamics correlated with the emergence of new research topics. Here we also present the Advanced Clique Percolation Method (ACPM), a new community detection algorithm developed specifically for supporting this task. Augur was evaluated on a gold standard of 1,408 debutant topics in the 2000-2011 interval and outperformed four alternative approaches in terms of both precision and recall. |