
Khadiga Seddik
PhD Candidate
University of Bergen
Khadiga Seddik is a PhD candidate at the Department of Information Science and Media Studies at the University of Bergen, Norway. She is a team member of the project NEWSREC: The Double-edged Sword of News Recommenders’ Impact on Democracy.
2025
Jeng, Jia Hua; Kasangu, Gloria Anne Babile; Starke, Alain D.; Seddik, Khadiga; Trattner, Christoph
The role of GPT as an adaptive technology in climate change journalism Conference
UMAP 2025, 2025.
@conference{roleofGPT25,
title = {The role of GPT as an adaptive technology in climate change journalism},
author = {Jia Hua Jeng and Gloria Anne Babile Kasangu and Alain D. Starke and Khadiga Seddik and Christoph Trattner},
url = {https://mediafutures.no/umap2025-0401_small/},
year = {2025},
date = {2025-03-28},
booktitle = {UMAP 2025},
abstract = {Recent advancements in Large Language Models (LLMs), such as GPT-4o, have enabled automated content generation and adaptation, including summaries of news articles. To date, LLM use in a journalism context has been understudied, but can potentially address challenges of selective exposure and polarization by adapting content to end users. This study used a one-shot recommender platform to test whether LLM-generated news summaries were evaluated more positively than `standard' 50-word news article previews. Moreover, using climate change news from the Washington Post, we also compared the influence of different `emotional reframing' strategies to rewrite texts and their impact on the environmental behavioral intentions of end users. We used a 2 (between: Summary vs. 50-word previews) x 3 (within: fear, fear-hope or neutral reframing) research design. Participants (N = 300) were first asked to read news articles in our interface and to choose a preferred news article, while later performing an in-depth evaluation task on the usability (e.g., clarity) and trustworthiness of different framing strategies. Results showed that evaluations of summaries, while being positive, were not significantly better than those of previews. We did, however, observe that a fear-hope reframing strategy of a news article, when paired with a GPT-generated summary, led to higher pro-environmental intentions compared to neutral framing. We discuss the potential benefits of this technology.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Recent advancements in Large Language Models (LLMs), such as GPT-4o, have enabled automated content generation and adaptation, including summaries of news articles. To date, LLM use in a journalism context has been understudied, but can potentially address challenges of selective exposure and polarization by adapting content to end users. This study used a one-shot recommender platform to test whether LLM-generated news summaries were evaluated more positively than `standard' 50-word news article previews. Moreover, using climate change news from the Washington Post, we also compared the influence of different `emotional reframing' strategies to rewrite texts and their impact on the environmental behavioral intentions of end users. We used a 2 (between: Summary vs. 50-word previews) x 3 (within: fear, fear-hope or neutral reframing) research design. Participants (N = 300) were first asked to read news articles in our interface and to choose a preferred news article, while later performing an in-depth evaluation task on the usability (e.g., clarity) and trustworthiness of different framing strategies. Results showed that evaluations of summaries, while being positive, were not significantly better than those of previews. We did, however, observe that a fear-hope reframing strategy of a news article, when paired with a GPT-generated summary, led to higher pro-environmental intentions compared to neutral framing. We discuss the potential benefits of this technology.
2024
Seddik, Khadiga
Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere Conference
NIKT, 2024.
@conference{democratpu24,
title = {Exploring the Ethical Challenges of AI and Recommender Systems in the Democratic Public Sphere},
author = {Khadiga Seddik},
url = {https://mediafutures.no/camera-ready-3/},
year = {2024},
date = {2024-11-25},
urldate = {2024-11-25},
booktitle = {NIKT},
abstract = {The rapid integration of Artificial Intelligence (AI) and Recommender Systems (RSs) into digital platforms has brought both opportunities and ethical concerns. These systems, designed to personalize content and optimize user engagement, have the potential to enhance how individuals navigate information online. However, this paper shifts the focus to the ethical complexities inherent in such systems, particularly the practice of nudging, where subtle algorithmic suggestions influence user behavior without explicit awareness. Issues like misinformation, algorithmic bias, privacy protection, and diminished content diversity raise important questions about the role of AI in shaping public discourse and decision-making processes. Rather than viewing these systems solely as tools for convenience, the paper challenges the reader to consider the deeper implications of AI-driven recommendations on democratic engagement. By examining how these technologies can quietly influence decisions and reduce exposure to different perspectives, it calls for a reevaluation of the ethical priorities in AI and RSs design. The paper calls for creating a digital space that promotes independence, fairness, and openness, making sure AI is used responsibly to support democratic values and protect user rights.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
The rapid integration of Artificial Intelligence (AI) and Recommender Systems (RSs) into digital platforms has brought both opportunities and ethical concerns. These systems, designed to personalize content and optimize user engagement, have the potential to enhance how individuals navigate information online. However, this paper shifts the focus to the ethical complexities inherent in such systems, particularly the practice of nudging, where subtle algorithmic suggestions influence user behavior without explicit awareness. Issues like misinformation, algorithmic bias, privacy protection, and diminished content diversity raise important questions about the role of AI in shaping public discourse and decision-making processes. Rather than viewing these systems solely as tools for convenience, the paper challenges the reader to consider the deeper implications of AI-driven recommendations on democratic engagement. By examining how these technologies can quietly influence decisions and reduce exposure to different perspectives, it calls for a reevaluation of the ethical priorities in AI and RSs design. The paper calls for creating a digital space that promotes independence, fairness, and openness, making sure AI is used responsibly to support democratic values and protect user rights.
2023
Seddik, Khadiga; Knudsen, Erik; Trilling, Damian; Trattner, Christoph
Understanding How News Recommender Systems Influence Selective Exposure Conference
Association for Computing Machinery (ACM) RecSys ’23, 2023.
@conference{behavrec2023,
title = {Understanding How News Recommender Systems Influence Selective Exposure},
author = {Khadiga Seddik and Erik Knudsen and Damian Trilling and Christoph Trattner },
url = {https://mediafutures.no/behavrec2023/},
year = {2023},
date = {2023-09-18},
urldate = {2023-09-18},
booktitle = {Association for Computing Machinery (ACM) RecSys ’23},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}