@conference{Mishra2019,
title = {Hierarchical attention networks to learn latent aspect embeddings for fake news detection},
author = {Rahul Mishra and Vinay Setty},
url = {https://dl.acm.org/doi/pdf/10.1145/3341981.3344229},
doi = {10.1145/3341981.3344229},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 2019 acm sigir international conference on theory of information retrieval},
pages = {197–204},
publisher = {Association for Computing Machinery},
address = {New York},
abstract = {Recently false claims and misinformation have become rampant in the web, affecting election outcomes, societies and economies. Consequently, fact checking websites such as snopes.com and politifact.com are becoming popular. However, these websites require expert analysis which is slow and not scalable. Many recent works try to solve these challenges using machine learning models trained on a variety of features and a rich lexicon or more recently, deep neural networks to avoid feature engineering. In this paper, we propose hierarchical deep attention networks to learn embeddings for various latent aspects of news. Contrary to existing solutions which only apply word-level self-attention, our model jointly learns the latent aspect embeddings for classifying false claims by applying hierarchical attention. Using several manually annotated high quality datasets such as Politifact, Snopes and Fever we show that these learned aspect embeddings are strong predictors of false claims. We show that latent aspect embeddings learned from attention mechanisms improve the accuracy of false claim detection by up to 13.5% in terms of Macro F1 compared to a state-of-the-art attention mechanism guided by claim-text DeClarE. We also extract and visualize the evidence from the external articles which supports or disproves the claims},
note = {Pre SFI},
keywords = {Fake news, Hierarchical attention, Latent aspect embeddings, WP3: Media Content Production and Analysis},
pubstate = {published},
tppubtype = {conference}
}