Prof. Morten Fjeld
Work Package Co-Leader
2024
Andrews, Peter; Nordberg, Oda Elise; Guribye, Frode; Fjeld, Morten; Borch, Njål
Designing for Automated Sports Commentary Systems Conference
IMX'24, 2024.
@conference{designing_for_automated24,
title = {Designing for Automated Sports Commentary Systems},
author = {Peter Andrews and Oda Elise Nordberg and Frode Guribye and Morten Fjeld and Njål Borch },
url = {https://mediafutures.no/designing_for_automated_sports_commentary_systems-2/},
year = {2024},
date = {2024-06-12},
booktitle = {IMX'24},
abstract = {Advancements in Natural Language Processing (NLP) and Computer Vision (CV) are revolutionizing how we experience sports broadcasting. Traditionally, sports commentary has played a crucial role in enhancing viewer understanding and engagement with live games. Yet, the prospects of automated commentary, especially in light of these technological advancements and their impact on viewers’ experience, remain largely unexplored. This paper elaborates upon an innovative automated commentary system that integrates NLP and CV to provide a multimodal experience, combining auditory feedback through text-to-speech and visual cues, known as italicizing, for real-time in-game commentary. The system supports color commentary, which aims to inform the viewer of information surrounding the game by pulling additional content from a database. Moreover, it also supports play-by-play commentary covering in-game developments derived from an event system based on CV. As the system reinvents the role of commentary in sports video, we must consider the design and implications of multimodal artificial commentators. A focused user study with eight participants aimed at understanding the design implications of such multimodal artificial commentators reveals critical insights. Key findings emphasize the importance of language precision, content relevance, and delivery style in automated commentary, underscoring the necessity for personalization to meet diverse viewer preferences. Our results validate the potential value and effectiveness of multimodal feedback and derive design considerations, particularly in personalizing content to revolutionize the role of commentary in sports broadcasts.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Andrews, Peter; Borch, Njål; Fjeld, Morten
ACM ICMIP, 2024.
@conference{Footyvision1,
title = {FootyVision: Multi-Object Tracking, Localisation, and Augmentation of Players and Ball in Football Video},
author = {Peter Andrews and Njål Borch and Morten Fjeld},
url = {https://mediafutures.no/peterandrews-footyvision-icmip24-final/},
year = {2024},
date = {2024-04-20},
booktitle = {ACM ICMIP},
abstract = {Football video content analysis is a rapidly evolving field aiming to enrich the viewing experience of football matches. Current research often focuses on specific tasks like player and/or ball detection, tracking, and localisation in top-down views. Our study strives to integrate these efforts into a comprehensive Multi-Object Tracking (MOT) model capable of handling perspective transformations. Our framework, FootyVision, employs a YOLOv7 backbone trained on an extended player and ball dataset. The MOT module builds a gallery and assigns identities via the Hungarian algorithm based on feature embeddings, bounding box intersection over union, distance, and velocity. A novel component of our model is the perspective transformation module that leverages activation maps from the YOLOv7 backbone to compute homographies using lines, intersection points, and ellipses. This method effectively adapts to dynamic and uncalibrated video data, even in viewpoints with limited visual information. In terms of performance, FootyVision sets new benchmarks. The model achieves a mean average precision (mAP) of 95.7% and an F1-score of 95.5% in object detection. For MOT, it demonstrates robust capabilities, with an IDF1 score of approximately 93% on both ISSIA and SoccerNet datasets. For SoccerNet, it reaches a MOTA of 94.04% and shows competitive results for ISSIA. Additionally, FootyVision scores a HOTA(0) of 93.1% and an overall HOTA of 72.16% for the SoccerNet dataset. Our ablation study confirms the effectiveness of the selected tracking features and identifies key attributes for further improvement. While the model excels in maintaining track accuracy throughout the testing dataset, we recognise the potential to enhance spatial-location accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Andrews, Peter; Nordberg, Oda Elise; Guribye, Frode; Fujita, Kazuyuki; Fjeld, Morten; Borch, Njål
AiCommentator: A Multimodal Conversational Agent for Embedded Visualization in Football Viewing Conference
Intelligent User Interfaces (IUI), 2024.
@conference{AIComment,
title = {AiCommentator: A Multimodal Conversational Agent for Embedded Visualization in Football Viewing},
author = {Peter Andrews and Oda Elise Nordberg and Frode Guribye and Kazuyuki Fujita and Morten Fjeld and Njål Borch},
url = {https://mediafutures.no/acm_iui_24_aicommentator_peterandrews-1/},
year = {2024},
date = {2024-03-18},
urldate = {2024-03-18},
booktitle = {Intelligent User Interfaces (IUI)},
journal = {Intelligent User Interfaces (IUI)},
abstract = {Traditionally, sports commentators provide viewers with diverse information, encompassing in-game developments and player performances. Yet young adult football viewers increasingly use mobile devices for deeper insights during football matches. Such insights into players on the pitch and performance statistics support viewers’ understanding of game stakes, creating a more engaging viewing experience. Inspired by commentators’ traditional roles and to incorporate information into a single platform, we developed AiCommentator, a Multimodal Conversational Agent (MCA) for embedded visualization and conversational interactions in football broadcast video. AiCommentator integrates embedded visualization, either with an automated non-interactive or with a responsive interactive commentary mode. Our system builds upon multimodal techniques, integrating computer vision and large language models, to demonstrate ways for designing tailored, interactive sports-viewing content. AiCommentator’s event system infers game states based on a multi-object tracking algorithm and computer vision backend, facilitating automated responsive commentary. We address three key topics: evaluating young adults’ satisfaction and immersion across the two viewing modes, enhancing viewer understanding of in-game events and players on the pitch, and devising methods to present this information in a usable manner. In a mixed-method evaluation (n=16) of AiCommentator, we found that the participants appreciated aspects of both system modes but preferred the interactive mode, expressing a higher degree of engagement and satisfaction. Our paper reports on our development of AiCommentator and presents the results from our user study, demonstrating the promise of interactive MCA for a more engaging sports viewing experience. Systems like AiCommentator could be pivotal in transforming the interactivity and accessibility of sports content, revolutionizing how sports viewers engage with video content.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2022
Ly, Duy-Nam; La, Thanh-Thai; Le, Khanh-Duy; Nguyen, Cuong; Fjeld, Morten; Tran, Thanh Ngoc-Dat; Tran, Minh-Triet
360TourGuiding: Towards Virtual Reality Training for Tour Guiding Conference
360TourGuiding: Towards Virtual Reality Training for Tour Guiding, 2022.
@conference{Ly2022,
title = {360TourGuiding: Towards Virtual Reality Training for Tour Guiding},
author = {Duy-Nam Ly and Thanh-Thai La and Khanh-Duy Le and Cuong Nguyen and Morten Fjeld and Thanh Ngoc-Dat Tran and Minh-Triet Tran
},
url = {https://mediafutures.no/3528575-3551436-compressed-4/},
year = {2022},
date = {2022-09-27},
urldate = {2022-09-27},
booktitle = {360TourGuiding: Towards Virtual Reality Training for Tour Guiding},
abstract = {Tour guiding plays an important role in turning sightseeing tours into memorable experiences. Tour guides, especially inexperienced ones, must practice intensively to perfect their craft. It is key that guides acquire knowledge about sights, in-situ presentation skills, and perfection ability to interact with and engage tourists. Therefore, tour-guide education requires on-site training at the place of interest including live tourist audiences. However, for modest budgets, such setups are costly and tourism students have to practice tour guiding at home or in simulated class-room setups. It has become a challenge for students to adequately prepare themselves for jobs in terms of relevant knowledge and skills. To tackle this problem, we propose 360TourGuiding, a VR system enabling its users to practice tour guiding with 360 travel videos plus the attendance of remote audiences participating through their mobile and personal device. This paper reports on the concept, on our design, current implementation, and on a pilot study with the current 360TourGuiding prototype. Based on qualitative feedback gained through the pilot study, we discuss possible system improvements, future system updates, and plans for empirical evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Wang, Ziming; Hu, Ziyi; Man, Yemao; Fjeld, Morten
A Collaborative System of Flying and Ground Robots with Universal Physical Coupling Interface (PCI), and the Potential Interactive Applications Conference
A Collaborative System of Flying and Ground Robots with Universal Physical Coupling Interface (PCI), and the Potential Interactive Applications, 2022.
@conference{Wang2022,
title = {A Collaborative System of Flying and Ground Robots with Universal Physical Coupling Interface (PCI), and the Potential Interactive Applications},
author = {Ziming Wang and Ziyi Hu and Yemao Man and Morten Fjeld},
year = {2022},
date = {2022-04-29},
urldate = {2022-04-29},
booktitle = {A Collaborative System of Flying and Ground Robots with Universal Physical Coupling Interface (PCI), and the Potential Interactive Applications},
abstract = {Flying and ground robots complement each other in terms of their advantages and disadvantages. We propose a collaborative system combining flying and ground robots, using a universal physical coupling interface (PCI) that allows for momentary connections and disconnections between multiple robots/devices. The proposed system may better utilize the complementary advantages of both flying and ground robots. We also describe various potential scenarios where such a system could be of benefit to interact with humans - namely, remote field works and rescue missions, transportation, healthcare, and education. Finally, we discuss the opportunities and challenges of such systems and consider deeper questions which should be studied in future work.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Fjeld, Morten; Hoshikawa, Yukai; Fujita, Kazuyuki; Takashima, Kazuki; Kitamura, Yoshifumi
RedirectedDoors: Redirection While Opening Doors in Virtual Reality Conference
RedirectedDoors: Redirection While Opening Doors in Virtual Reality., 2022.
@conference{Fjeld2022,
title = {RedirectedDoors: Redirection While Opening Doors in Virtual Reality},
author = {Morten Fjeld and Yukai Hoshikawa and Kazuyuki Fujita and Kazuki Takashima and Yoshifumi Kitamura },
year = {2022},
date = {2022-03-12},
urldate = {2022-03-12},
booktitle = {RedirectedDoors: Redirection While Opening Doors in Virtual Reality.},
abstract = {We propose RedirectedDoors, a novel technique for redirection in VR focused on door-opening behavior. This technique manipulates the user's walking direction by rotating the entire virtual environment at a certain angular ratio of the door being opened, while the virtual door's position is kept unmanipulated to ensure door-opening realism. Results of a user study using two types of door-opening interfaces (with and without a passive haptic prop) revealed that the estimated detection thresholds generally showed a higher space efficiency of redirection. Following the results, we derived usage guidelines for our technique that provide lower noticeability and higher acceptability.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2021
Trattner, Christoph; Jannach, Dietmar; Motta, Enrico; Meijer, Irene Costera; Diakopoulos, Nicholas; Elahi, Mehdi; Opdahl, Andreas L.; Tessem, Bjørnar; Borch, Njål; Fjeld, Morten; Øvrelid, Lilja; Smedt, Koenraad De; Moe, Hallvard
Responsible media technology and AI: challenges and research directions Journal Article
In: AI and Ethics, 2021.
@article{cristin2000622,
title = {Responsible media technology and AI: challenges and research directions},
author = {Christoph Trattner and Dietmar Jannach and Enrico Motta and Irene Costera Meijer and Nicholas Diakopoulos and Mehdi Elahi and Andreas L. Opdahl and Bjørnar Tessem and Njål Borch and Morten Fjeld and Lilja Øvrelid and Koenraad De Smedt and Hallvard Moe},
url = {https://app.cristin.no/results/show.jsf?id=2000622, Cristin
https://link.springer.com/content/pdf/10.1007/s43681-021-00126-4.pdf},
doi = {https://doi.org/10.1007/s43681-021-00126-4},
year = {2021},
date = {2021-12-20},
urldate = {2021-12-20},
journal = {AI and Ethics},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Le, Khanh-Duy; Tran, Tanh Quang; Chlasta, Karol; Krejtz, Krzysztof; Fjeld, Morten; Kunz, Andreas
VXSlate: Exploring Combination of Head Movements and Mobile Touch for Large Virtual Display Interaction Proceedings
Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8476-6.
@proceedings{Kunz2021,
title = {VXSlate: Exploring Combination of Head Movements and Mobile Touch for Large Virtual Display Interaction},
author = {Khanh-Duy Le and Tanh Quang Tran and Karol Chlasta and Krzysztof Krejtz and Morten Fjeld and Andreas Kunz},
doi = {https://doi.org/10.1145/3461778.3462076},
isbn = {978-1-4503-8476-6},
year = {2021},
date = {2021-06-28},
journal = {DIS '21: Designing Interactive Systems Conference 2021},
pages = {283–297},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
Le, Khanh-Duy; Tran, Tanh Quang; Chlasta, Karol; Krejtz, Krzysztof; Fjeld, Morten; Kunz, Andreas
VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction Conference
2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW). IEEE The Institute of Electrical and Electronics Engineers, Inc., 2021.
@conference{Le2021b,
title = {VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction},
author = {Khanh-Duy Le and Tanh Quang Tran and Karol Chlasta and Krzysztof Krejtz and Morten Fjeld and Andreas Kunz},
url = {https://conferences.computer.org/vrpub/pdfs/VRW2021-2ANNoldm4A10Ml9f63uYC9/136700a528/136700a528.pdf
https://www.youtube.com/watch?v=N8ZJlKWj4mk&ab_channel=DuyL%C3%AAKh%C3%A1nh},
doi = { 10.1109/VRW52623.2021.00146},
year = {2021},
date = {2021-02-12},
pages = {528-529},
publisher = {IEEE The Institute of Electrical and Electronics Engineers, Inc.},
organization = {2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW).},
abstract = {Virtual Reality (VR) headsets can open opportunities for users to accomplish complex tasks on large virtual displays, using compact setups. However, interacting with large virtual displays using existing interaction techniques might cause fatigue, especially for precise manipulations, due to the lack of physical surfaces. We designed VXSlate, an interaction technique that uses a large virtual display, as an expansion of a tablet. VXSlate combines a user’s head movements, as tracked by the VR headset, and touch interaction on the tablet. The user’s head movements position both a virtual representation of the tablet and of the user’s hand on the large virtual display. The user’s multi-touch interactions perform finely-tuned content manipulations.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Ljungblad, Sarah; Man, Yemao; Baytaş, Mehmet Aydın; Gamboa, Mafalda; Fjeld, Morten; Obaid, Mohammad
ACM CHI on human factors in computing systems conference proceeding, 2021.
@proceedings{cristin2003885,
title = {What Matters in Professional Drone Pilots’ Practice? An Interview Study to Understand the Complexity of Their Work and Inform Human-Drone Interaction Research},
author = {Sarah Ljungblad and Yemao Man and Mehmet Aydın Baytaş and Mafalda Gamboa and Morten Fjeld and Mohammad Obaid},
url = {https://app.cristin.no/results/show.jsf?id=2003885, Cristin},
year = {2021},
date = {2021-01-01},
howpublished = {ACM CHI on human factors in computing systems conference proceeding},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
2020
Fjeld, Morten; Sheshadri, Smitha; Zhao, Shengdong; Cheng, Yang
Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles Journal Article
In: CHI 2020 Paper, pp. 1-13, 2020, (Pre SFI).
@article{Fjeld2020,
title = {Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles},
author = {Morten Fjeld and Smitha Sheshadri and Shengdong Zhao and Yang Cheng},
url = {https://dl.acm.org/doi/pdf/10.1145/3313831.3376272
https://www.youtube.com/watch?v=WY_T0fK5gCQ&ab_channel=ACMSIGCHI},
year = {2020},
date = {2020-04-01},
urldate = {2020-04-01},
journal = {CHI 2020 Paper},
pages = {1-13},
abstract = {Mobile vocabulary learning interfaces typically present material only in auditory and visual channels, underutilizing the haptic modality. We explored haptic-integrated learning by adding free-form digital annotation to mobile vocabulary learning interfaces. Through a series of pilot studies, we identified three design factors: annotation mode, presentation sequence, and vibrotactile feedback, that influence recall in haptic-integrated vocabulary interfaces. These factors were then evaluated in a within-subject comparative study using a digital flashcard interface as baseline. Results using a 84-item vocabulary showed that the 'whole word' annotation mode is highly effective, yielding a 24.21% increase in immediate recall scores and a 30.36% increase in the 7-day delayed scores. Effects of presentation sequence and vibrotactile feedback were more transient; they affected the results of immediate tests, but not the delayed tests. We discuss the implications of these factors for designing future mobile learning applications.},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2018
Kudo, Yoshiki; Takashima, Kazuki; Fjeld, Morten; Kitamura, Yoshifumi
AdapTable: Extending Reach over Large Tabletops Through Flexible Multi-Display Configuration. Proceedings
2018, (Pre SFI).
@proceedings{Kudo2018,
title = { AdapTable: Extending Reach over Large Tabletops Through Flexible Multi-Display Configuration.},
author = {Yoshiki Kudo and Kazuki Takashima and Morten Fjeld and Yoshifumi Kitamura},
url = {https://dl.acm.org/doi/pdf/10.1145/3279778.3279779
https://www.youtube.com/watch?v=HG_4COsWGDM},
year = {2018},
date = {2018-11-17},
urldate = {2018-11-17},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
Zhu, Kening; Fjeld, Morten; Ülüner, Ayca
WristOrigami: Exploring foldable design for multi-display smartwatch Proceedings
2018, (Pre SFI).
@proceedings{Zhu2018,
title = {WristOrigami: Exploring foldable design for multi-display smartwatch},
author = {Kening Zhu and Morten Fjeld and Ayca Ülüner},
url = {https://dl.acm.org/doi/pdf/10.1145/3196709.3196713
https://www.youtube.com/watch?v=1_2D79zntIk},
year = {2018},
date = {2018-06-09},
urldate = {2018-06-09},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}
Vechev, Velko; Dancu, Alexandru; Perrault, Simon T.; Roy, Quentin; Fjeld, Morten; Zhao, Shengdong
Movespace: on-body athletic interaction for running and cycling Journal Article
In: 2018, (Pre SFI).
@article{Vechev2018,
title = {Movespace: on-body athletic interaction for running and cycling},
author = {Velko Vechev and Alexandru Dancu and Simon T. Perrault and Quentin Roy and Morten Fjeld and Shengdong Zhao},
url = {https://dl.acm.org/doi/pdf/10.1145/3206505.3206527
https://www.youtube.com/watch?v=1_u4Zm4F7I0},
year = {2018},
date = {2018-05-29},
urldate = {2018-05-29},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2016
Wozniak, Pawel; Goyal, Nitesh; Kucharski, Przemyslaw; Lischke, Lars; Mayer, Sven; Fjeld, Morten
RAMPARTS: Supporting sensemaking with spatially-aware mobile interactions Journal Article
In: 2016, (Pre SFI).
@article{Wozniak2016,
title = { RAMPARTS: Supporting sensemaking with spatially-aware mobile interactions},
author = {Pawel Wozniak and Nitesh Goyal and Przemyslaw Kucharski and Lars Lischke and Sven Mayer and Morten Fjeld},
url = {https://dl.acm.org/doi/10.1145/2858036.2858491
https://www.youtube.com/watch?v=t01yLj3xhVc},
year = {2016},
date = {2016-05-01},
urldate = {2016-05-01},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Carcedo, Marta G.; Chua, Soon H.; Perrault, Simon; Wozniak, Pawel; Joshi, Raj; Obaid, Mohammad; Fjeld, Morten; Zhao, Shengdong
Hapticolor: Interpolating color information as haptic feedback to assist the colorblind Proceedings
2016, (Pre SFI).
@proceedings{Carcedo2016,
title = {Hapticolor: Interpolating color information as haptic feedback to assist the colorblind},
author = { Marta G. Carcedo and Soon H. Chua and Simon Perrault and Pawel Wozniak and Raj Joshi and Mohammad Obaid and Morten Fjeld and Shengdong Zhao},
url = {https://dl.acm.org/doi/10.1145/2858036.2858220
https://www.youtube.com/watch?v=qjoH6eNNZBU},
year = {2016},
date = {2016-05-01},
urldate = {2016-05-01},
note = {Pre SFI},
keywords = {},
pubstate = {published},
tppubtype = {proceedings}
}