Khadiga Seddik
PhD Candidate
2025
Eknes-Riple, Jørgen; Jeng, Jia Hua; Starke, Alain D.; Seddik, Khadiga; Trattner, Christoph
Hope, Fear, or Anger? How Emotional Framing in a News Recommender System Guides User Preferences Working paper
2025.
@workingpaper{hopefear25,
title = {Hope, Fear, or Anger? How Emotional Framing in a News Recommender System Guides User Preferences},
author = {Jørgen Eknes-Riple and Jia Hua Jeng and Alain D. Starke and Khadiga Seddik and Christoph Trattner},
url = {https://mediafutures.no/recsys_inra_2025/},
year = {2025},
date = {2025-09-26},
urldate = {2025-09-26},
issue = {RecSys2025 - INRA workshop},
abstract = {News recommender systems (NRSs) increasingly leverage artificial intelligence to automate journalistic processes and tailor content to individual users. These systems are shaping patterns of news consumption. The emotional reframing of the content of the news article, applied through large language models (LLM), has the potential to influence the selection of the articles of users and guide them towards specific content. This paper explores how emotional reframing of news articles can influence user engagement, interaction, and openness to non-preferred content. We present the results of a user study (N = 150) on a news platform. How news articles were presented was subject to a 3x2-mixed research design. News articles were rewritten using a large language model (LLM) in one of three emotional tones: fearful, angry, or hopeful. Moreover, articles either aligned with the user's emotional state and topical preferences or not. These emotionally reframed articles were then either aligned or misaligned with users' self-reported emotional state to examine the effect of emotional alignment. The results show that emotional alignment significantly increased the likelihood that users selected an article as their favorite, even when it belonged to their least preferred topic category. This finding suggests that emotional alignment can guide users toward content they might otherwise avoid, offering a potential means to reduce selective exposure. In terms of behavioral engagement, articles reframed with an angry tone significantly led to longer reading times, while fearfully framed articles were more likely to be clicked. In contrast, hopeful framing resulted in reduced interaction, which suggests that negative rather than positive emotions increase user engagement.},
keywords = {},
pubstate = {published},
tppubtype = {workingpaper}
}
Jeng, Jia Hua; Kasangu, Gloria Anne Babile; Starke, Alain D.; Seddik, Khadiga; Trattner, Christoph
The role of GPT as an adaptive technology in climate change journalism Conference
UMAP 2025, 2025.
@conference{roleofGPT25,
title = {The role of GPT as an adaptive technology in climate change journalism},
author = {Jia Hua Jeng and Gloria Anne Babile Kasangu and Alain D. Starke and Khadiga Seddik and Christoph Trattner},
url = {https://mediafutures.no/umap2025-0401_small/},
year = {2025},
date = {2025-03-28},
booktitle = {UMAP 2025},
abstract = {Recent advancements in Large Language Models (LLMs), such as GPT-4o, have enabled automated content generation and adaptation, including summaries of news articles. To date, LLM use in a journalism context has been understudied, but can potentially address challenges of selective exposure and polarization by adapting content to end users. This study used a one-shot recommender platform to test whether LLM-generated news summaries were evaluated more positively than `standard' 50-word news article previews. Moreover, using climate change news from the Washington Post, we also compared the influence of different `emotional reframing' strategies to rewrite texts and their impact on the environmental behavioral intentions of end users. We used a 2 (between: Summary vs. 50-word previews) x 3 (within: fear, fear-hope or neutral reframing) research design. Participants (N = 300) were first asked to read news articles in our interface and to choose a preferred news article, while later performing an in-depth evaluation task on the usability (e.g., clarity) and trustworthiness of different framing strategies. Results showed that evaluations of summaries, while being positive, were not significantly better than those of previews. We did, however, observe that a fear-hope reframing strategy of a news article, when paired with a GPT-generated summary, led to higher pro-environmental intentions compared to neutral framing. We discuss the potential benefits of this technology.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2024
Seddik, Khadiga
Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere Conference
NIKT, 2024.
@conference{democratpu24,
title = {Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere},
author = {Khadiga Seddik},
url = {https://mediafutures.no/camera-ready-3/},
year = {2024},
date = {2024-11-25},
urldate = {2024-11-25},
booktitle = {NIKT},
abstract = {The rapid integration of Artificial Intelligence (AI) and Recommender Systems (RSs) into digital platforms has brought both opportunities and ethical concerns. These systems, designed to personalize content and optimize user engagement, have the potential to enhance how individuals navigate information online. However, this paper shifts the focus to the ethical complexities inherent in such systems, particularly the practice of nudging, where subtle algorithmic suggestions influence user behavior without explicit awareness. Issues like misinformation, algorithmic bias, privacy protection, and diminished content diversity raise important questions about the role of AI in shaping public discourse and decision-making processes. Rather than viewing these systems solely as tools for convenience, the paper challenges the reader to consider the deeper implications of AI-driven recommendations on democratic engagement. By examining how these technologies can quietly influence decisions and reduce exposure to different perspectives, it calls for a reevaluation of the ethical priorities in AI and RSs design. The paper calls for creating a digital space that promotes independence, fairness, and openness, making sure AI is used responsibly to support democratic values and protect user rights.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Seddik, Khadiga
Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere Conference
2024.
@conference{nokey,
title = {Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere},
author = {Khadiga Seddik},
year = {2024},
date = {2024-11-24},
abstract = {The rapid integration of Artificial Intelligence (AI) and Recommender Systems (RSs) into digital platforms has brought both opportunities and ethical concerns. These systems, designed to personalize content and optimize user engagement, have the potential to enhance how individuals navigate information online. However, this paper shifts the focus to the ethical complexities inherent in such systems, particularly the practice of nudging, where subtle algorithmic suggestions influence user behavior without explicit awareness. Issues like misinformation, algorithmic bias, privacy protection, and diminished content diversity raise important questions about the role of AI in shaping public discourse and decision-making processes. Rather than viewing these systems solely as tools for convenience, the paper challenges the reader to consider the deeper implications of AI-driven recommendations on democratic engagement. By examining how these technologies can quietly influence decisions and reduce exposure to different perspectives, it calls for reevaluating the ethical priorities in AI and RSs design. We present the problems identified along with their potential solutions, calling for creating a digital space that promotes independence, fairness, and openness, making sure AI is used responsibly to support democratic values and protect user rights.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2023
Seddik, Khadiga; Knudsen, Erik; Trilling, Damian; Trattner, Christoph
Understanding How News Recommender Systems Influence Selective Exposure Conference
Association for Computing Machinery (ACM) RecSys ’23, 2023.
@conference{behavrec2023,
title = {Understanding How News Recommender Systems Influence Selective Exposure},
author = {Khadiga Seddik and Erik Knudsen and Damian Trilling and Christoph Trattner },
url = {https://mediafutures.no/behavrec2023/},
year = {2023},
date = {2023-09-18},
urldate = {2023-09-18},
booktitle = {Association for Computing Machinery (ACM) RecSys ’23},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}