About us
Publications
Home
Publications
2024
Peter Andrews; Njål Borch; Morten Fjeld
FootyVision: Multi-Object Tracking, Localisation, and Augmentation of Players and Ball in Football Video Conference
ACM ICMIP, 2024.
Abstract | BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@conference{Footyvision1,
title = {FootyVision: Multi-Object Tracking, Localisation, and Augmentation of Players and Ball in Football Video},
author = {Peter Andrews and Njål Borch and Morten Fjeld},
url = {https://mediafutures.no/peterandrews-footyvision-icmip24-final/},
year = {2024},
date = {2024-04-20},
booktitle = {ACM ICMIP},
abstract = {Football video content analysis is a rapidly evolving field aiming to enrich the viewing experience of football matches. Current research often focuses on specific tasks like player and/or ball detection, tracking, and localisation in top-down views. Our study strives to integrate these efforts into a comprehensive Multi-Object Tracking (MOT) model capable of handling perspective transformations. Our framework, FootyVision, employs a YOLOv7 backbone trained on an extended player and ball dataset. The MOT module builds a gallery and assigns identities via the Hungarian algorithm based on feature embeddings, bounding box intersection over union, distance, and velocity. A novel component of our model is the perspective transformation module that leverages activation maps from the YOLOv7 backbone to compute homographies using lines, intersection points, and ellipses. This method effectively adapts to dynamic and uncalibrated video data, even in viewpoints with limited visual information. In terms of performance, FootyVision sets new benchmarks. The model achieves a mean average precision (mAP) of 95.7% and an F1-score of 95.5% in object detection. For MOT, it demonstrates robust capabilities, with an IDF1 score of approximately 93% on both ISSIA and SoccerNet datasets. For SoccerNet, it reaches a MOTA of 94.04% and shows competitive results for ISSIA. Additionally, FootyVision scores a HOTA(0) of 93.1% and an overall HOTA of 72.16% for the SoccerNet dataset. Our ablation study confirms the effectiveness of the selected tracking features and identifies key attributes for further improvement. While the model excels in maintaining track accuracy throughout the testing dataset, we recognise the potential to enhance spatial-location accuracy.},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
Peter Andrews; Oda Elise Nordberg; Frode Guribye; Kazuyuki Fujita; Morten Fjeld; Njål Borch
AiCommentator: A Multimodal Conversational Agent for Embedded Visualization in Football Viewing Conference
2024.
Abstract | BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@conference{AIComment,
title = {AiCommentator: A Multimodal Conversational Agent for Embedded Visualization in Football Viewing},
author = {Peter Andrews and Oda Elise Nordberg and Frode Guribye and Kazuyuki Fujita and Morten Fjeld and Njål Borch},
url = {https://mediafutures.no/acm_iui_24_aicommentator_peterandrews-1/},
year = {2024},
date = {2024-03-18},
urldate = {2024-03-18},
journal = {Intelligent User Interfaces (IUI)},
abstract = {Traditionally, sports commentators provide viewers with diverse information, encompassing in-game developments and player performances. Yet young adult football viewers increasingly use mobile devices for deeper insights during football matches. Such insights into players on the pitch and performance statistics support viewers’ understanding of game stakes, creating a more engaging viewing experience. Inspired by commentators’ traditional roles and to incorporate information into a single platform, we developed AiCommentator, a Multimodal Conversational Agent (MCA) for embedded visualization and conversational interactions in football broadcast video. AiCommentator integrates embedded visualization, either with an automated non-interactive or with a responsive interactive commentary mode. Our system builds upon multimodal techniques, integrating computer vision and large language models, to demonstrate ways for designing tailored, interactive sports-viewing content. AiCommentator’s event system infers game states based on a multi-object tracking algorithm and computer vision backend, facilitating automated responsive commentary. We address three key topics: evaluating young adults’ satisfaction and immersion across the two viewing modes, enhancing viewer understanding of in-game events and players on the pitch, and devising methods to present this information in a usable manner. In a mixed-method evaluation (n=16) of AiCommentator, we found that the participants appreciated aspects of both system modes but preferred the interactive mode, expressing a higher degree of engagement and satisfaction. Our paper reports on our development of AiCommentator and presents the results from our user study, demonstrating the promise of interactive MCA for a more engaging sports viewing experience. Systems like AiCommentator could be pivotal in transforming the interactivity and accessibility of sports content, revolutionizing how sports viewers engage with video content.},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2022
Morten Fjeld; Yukai Hoshikawa; Kazuyuki Fujita; Kazuki Takashima; Yoshifumi Kitamura
RedirectedDoors: Redirection While Opening Doors in Virtual Reality Conference
RedirectedDoors: Redirection While Opening Doors in Virtual Reality., 2022.
Abstract | BibTeX | Tags: New, Virtual Reality, WP4: Media Content Interaction and Accessibility
@conference{Fjeld2022,
title = {RedirectedDoors: Redirection While Opening Doors in Virtual Reality},
author = {Morten Fjeld and Yukai Hoshikawa and Kazuyuki Fujita and Kazuki Takashima and Yoshifumi Kitamura },
year = {2022},
date = {2022-03-12},
urldate = {2022-03-12},
booktitle = {RedirectedDoors: Redirection While Opening Doors in Virtual Reality.},
abstract = {We propose RedirectedDoors, a novel technique for redirection in VR focused on door-opening behavior. This technique manipulates the user's walking direction by rotating the entire virtual environment at a certain angular ratio of the door being opened, while the virtual door's position is kept unmanipulated to ensure door-opening realism. Results of a user study using two types of door-opening interfaces (with and without a passive haptic prop) revealed that the estimated detection thresholds generally showed a higher space efficiency of redirection. Following the results, we derived usage guidelines for our technique that provide lower noticeability and higher acceptability.},
keywords = {New, Virtual Reality, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2021
Khanh-Duy Le; Tanh Quang Tran; Karol Chlasta; Krzysztof Krejtz; Morten Fjeld; Andreas Kunz
VXSlate: Exploring Combination of Head Movements and Mobile Touch for Large Virtual Display Interaction Proceedings
Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8476-6.
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@proceedings{Kunz2021,
title = {VXSlate: Exploring Combination of Head Movements and Mobile Touch for Large Virtual Display Interaction},
author = {Khanh-Duy Le and Tanh Quang Tran and Karol Chlasta and Krzysztof Krejtz and Morten Fjeld and Andreas Kunz},
doi = {https://doi.org/10.1145/3461778.3462076},
isbn = {978-1-4503-8476-6},
year = {2021},
date = {2021-06-28},
journal = {DIS '21: Designing Interactive Systems Conference 2021},
pages = {283–297},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {proceedings}
}
Khanh-Duy Le; Tanh Quang Tran; Karol Chlasta; Krzysztof Krejtz; Morten Fjeld; Andreas Kunz
VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction Conference
2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW). IEEE The Institute of Electrical and Electronics Engineers, Inc., 2021.
Abstract | BibTeX | Tags: Human computer interaction, Human-centered computing, Interaction techniques, SFI MediaFutures, Virtual Reality, WP4: Media Content Interaction and Accessibility | Links:
@conference{Le2021b,
title = {VXSlate: Combining Head Movement and Mobile Touch for Large Virtual Display Interaction},
author = {Khanh-Duy Le and Tanh Quang Tran and Karol Chlasta and Krzysztof Krejtz and Morten Fjeld and Andreas Kunz},
url = {https://conferences.computer.org/vrpub/pdfs/VRW2021-2ANNoldm4A10Ml9f63uYC9/136700a528/136700a528.pdf
https://www.youtube.com/watch?v=N8ZJlKWj4mk&ab_channel=DuyL%C3%AAKh%C3%A1nh},
doi = { 10.1109/VRW52623.2021.00146},
year = {2021},
date = {2021-02-12},
pages = {528-529},
publisher = {IEEE The Institute of Electrical and Electronics Engineers, Inc.},
organization = {2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW).},
abstract = {Virtual Reality (VR) headsets can open opportunities for users to accomplish complex tasks on large virtual displays, using compact setups. However, interacting with large virtual displays using existing interaction techniques might cause fatigue, especially for precise manipulations, due to the lack of physical surfaces. We designed VXSlate, an interaction technique that uses a large virtual display, as an expansion of a tablet. VXSlate combines a user’s head movements, as tracked by the VR headset, and touch interaction on the tablet. The user’s head movements position both a virtual representation of the tablet and of the user’s hand on the large virtual display. The user’s multi-touch interactions perform finely-tuned content manipulations.},
keywords = {Human computer interaction, Human-centered computing, Interaction techniques, SFI MediaFutures, Virtual Reality, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2020
Pavel Okopnyi; Oskar Juhlin; Frode Guribye
Unpacking Editorial Agreements in Collaborative Video Production Conference
IMX '20: ACM International Conference on Interactive Media Experiences, New York, 2020, (Pre SFI).
Abstract | BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@conference{Okopnyi2020,
title = {Unpacking Editorial Agreements in Collaborative Video Production},
author = {Pavel Okopnyi and Oskar Juhlin and Frode Guribye},
url = {https://www.researchgate.net/publication/342251635_Unpacking_Editorial_Agreements_in_Collaborative_Video_Production},
doi = {https://doi.org/10.1145/3391614.3393652},
year = {2020},
date = {2020-06-01},
booktitle = {IMX '20: ACM International Conference on Interactive Media Experiences},
pages = {117–126},
address = {New York},
abstract = {Video production is a collaborative process involving creative, artistic and technical elements that require a multitude of specialised skill sets. This open-ended work is often marked by uncertainty and interpretive flexibility in terms of what the product is and should be. At the same time, most current video production tools are designed for single users. There is a growing interest, both in industry and academia, to design features that support key collaborative processes in editing, such as commenting on videos. We add to current research by unpacking specific forms of collaboration, in particular the social mechanisms and strategies employed to reduce interpretive flexibility and uncertainty in achieving agreements between editors and other collaborators. The findings contribute to the emerging design interest by identifying general design paths for how to support collaboration in video editing through scaffolding, iconic referencing, and suggestive editing.},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
Anja Salzmann; Frode Guribye; Astrid Gynnild
“We in the Mojo Community” – Exploring a Global Network of Mobile Journalists Journal Article
In: Journalism Practice, pp. 1-18, 2020, (Pre SFI).
Abstract | BibTeX | Tags: community of practice, digital culture, mobile content creation, Mobile journalism, mobile technologies, mojo, mojo community, smartphone reporting, WP4: Media Content Interaction and Accessibility | Links:
@article{Salzmann2020,
title = {“We in the Mojo Community” – Exploring a Global Network of Mobile Journalists},
author = {Anja Salzmann and Frode Guribye and Astrid Gynnild},
url = {https://www.tandfonline.com/doi/epub/10.1080/17512786.2020.1742772?needAccess=true},
doi = {https://doi.org/10.1080/17512786.2020.1742772},
year = {2020},
date = {2020-04-03},
journal = {Journalism Practice},
pages = {1-18},
abstract = {Mobile journalism is a fast-growing area of journalistic innovation that requires new skills and work practices. Thus, a major challenge for journalists is learning not only how to keep up with new gadgets but how to advance and develop a mojo mindset to pursue their interests and solidify future work options. This paper investigates a globally pioneering network of mojo journalism, the Mojo Community, that consists of journalists and practitioners dedicated to creating multimedia content using mobile technologies. The study is based on empirical data from interviews with and the observation of the participants of the community over a two-year period. The analysis draws on Wenger’s concept of “communities of practice” to explore the domain, structure, and role of this communal formation for innovation and change in journalistic practices. The community’s core group is comprised of journalists mainly affiliated with legacy broadcast organizations and with a particular interest in and extensive knowledge of mobile technologies. The participants perceive their engagement with the community as a way of meeting the challenges of organizational reluctance to change, fast-evolving technological advancements, and uncertain job prospects.},
note = {Pre SFI},
keywords = {community of practice, digital culture, mobile content creation, Mobile journalism, mobile technologies, mojo, mojo community, smartphone reporting, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Morten Fjeld; Smitha Sheshadri; Shengdong Zhao; Yang Cheng
Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles Journal Article
In: CHI 2020 Paper, pp. 1-13, 2020, (Pre SFI).
Abstract | BibTeX | Tags: Haptics for Learning, Intersensory reinforced learning, Mobile Vocabulary Learning, Motoric Engagement, Multimodal Learning, WP4: Media Content Interaction and Accessibility | Links:
@article{Fjeld2020,
title = {Learn with Haptics: Improving Vocabulary Recall with Free-form Digital Annotation on Touchscreen Mobiles},
author = {Morten Fjeld and Smitha Sheshadri and Shengdong Zhao and Yang Cheng},
url = {https://dl.acm.org/doi/pdf/10.1145/3313831.3376272
https://www.youtube.com/watch?v=WY_T0fK5gCQ&ab_channel=ACMSIGCHI},
year = {2020},
date = {2020-04-01},
urldate = {2020-04-01},
journal = {CHI 2020 Paper},
pages = {1-13},
abstract = {Mobile vocabulary learning interfaces typically present material only in auditory and visual channels, underutilizing the haptic modality. We explored haptic-integrated learning by adding free-form digital annotation to mobile vocabulary learning interfaces. Through a series of pilot studies, we identified three design factors: annotation mode, presentation sequence, and vibrotactile feedback, that influence recall in haptic-integrated vocabulary interfaces. These factors were then evaluated in a within-subject comparative study using a digital flashcard interface as baseline. Results using a 84-item vocabulary showed that the 'whole word' annotation mode is highly effective, yielding a 24.21% increase in immediate recall scores and a 30.36% increase in the 7-day delayed scores. Effects of presentation sequence and vibrotactile feedback were more transient; they affected the results of immediate tests, but not the delayed tests. We discuss the implications of these factors for designing future mobile learning applications.},
note = {Pre SFI},
keywords = {Haptics for Learning, Intersensory reinforced learning, Mobile Vocabulary Learning, Motoric Engagement, Multimodal Learning, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
2019
Eivind Flobak; Jo Dugstad Wake; Joakim Vindenes; Smiti Kahlon; T. Nordgreen; Frode Guribye
Participatory Design of VR Scenarios for Exposure Therapy Conference
Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI '19), no. Paper 569, New York, 2019, (Pre SFI).
Abstract | BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@conference{Flobak2019,
title = {Participatory Design of VR Scenarios for Exposure Therapy},
author = {Eivind Flobak and Jo Dugstad Wake and Joakim Vindenes and Smiti Kahlon and T. Nordgreen and Frode Guribye},
url = {https://www.researchgate.net/publication/330205387_Participatory_Design_of_VR_Scenarios_for_Exposure_Therapy},
doi = {10.1145/3290605.3300799 },
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI '19)},
number = {Paper 569},
address = {New York},
abstract = {Virtual reality (VR) applications for exposure therapy predominantly use computer-generated imagery to create controlled environments in which users can be exposed to their fears. Creating 3D animations, however, is demanding and time-consuming. This paper presents a participatory approach for prototyping VR scenarios that are enabled by 360° video and grounded in lived experiences. We organized a participa-tory workshop with adolescents to prototype such scenarios, consisting of iterative phases of ideation, storyboarding, live-action plays recorded by a 360° camera, and group evaluation. Through an analysis of the participants' interactions, we outline how they worked to design prototypes that depict situations relevant to those with a fear of public speaking. Our analysis also explores how participants used their experiences and refections as resources for design. Six clinical psychologists evaluated the prototypes from the workshop and concluded they were viable therapeutic tools, emphasizing the immer-sive, realistic experience they presented. We argue that our approach makes the design of VR scenarios more accessible.},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2018
Njål Borch; Ingar Mæhlum Arntzen
Mediasync Report 2015: Evaluating timed playback of HTML5 Media Technical Report
2018, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@techreport{Borch2015,
title = {Mediasync Report 2015: Evaluating timed playback of HTML5 Media},
author = {Njål Borch and Ingar Mæhlum Arntzen},
url = {https://norceresearch.brage.unit.no/norceresearch-xmlui/bitstream/handle/11250/2711974/Norut_Tromso_rapport_28-2015.pdf?sequence=2},
year = {2018},
date = {2018-12-18},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {techreport}
}
Yoshiki Kudo; Kazuki Takashima; Morten Fjeld; Yoshifumi Kitamura
AdapTable: Extending Reach over Large Tabletops Through Flexible Multi-Display Configuration. Proceedings
2018, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@proceedings{Kudo2018,
title = { AdapTable: Extending Reach over Large Tabletops Through Flexible Multi-Display Configuration.},
author = {Yoshiki Kudo and Kazuki Takashima and Morten Fjeld and Yoshifumi Kitamura},
url = {https://dl.acm.org/doi/pdf/10.1145/3279778.3279779
https://www.youtube.com/watch?v=HG_4COsWGDM},
year = {2018},
date = {2018-11-17},
urldate = {2018-11-17},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {proceedings}
}
Kening Zhu; Morten Fjeld; Ayca Ülüner
WristOrigami: Exploring foldable design for multi-display smartwatch Proceedings
2018, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@proceedings{Zhu2018,
title = {WristOrigami: Exploring foldable design for multi-display smartwatch},
author = {Kening Zhu and Morten Fjeld and Ayca Ülüner},
url = {https://dl.acm.org/doi/pdf/10.1145/3196709.3196713
https://www.youtube.com/watch?v=1_2D79zntIk},
year = {2018},
date = {2018-06-09},
urldate = {2018-06-09},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {proceedings}
}
Velko Vechev; Alexandru Dancu; Simon T. Perrault; Quentin Roy; Morten Fjeld; Shengdong Zhao
Movespace: on-body athletic interaction for running and cycling Journal Article
In: 2018, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@article{Vechev2018,
title = {Movespace: on-body athletic interaction for running and cycling},
author = {Velko Vechev and Alexandru Dancu and Simon T. Perrault and Quentin Roy and Morten Fjeld and Shengdong Zhao},
url = {https://dl.acm.org/doi/pdf/10.1145/3206505.3206527
https://www.youtube.com/watch?v=1_u4Zm4F7I0},
year = {2018},
date = {2018-05-29},
urldate = {2018-05-29},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Ingar Mæhlum Arntzen; Njål Borch; François Daoust
Media Synchronization on the Web. In: MediaSync Book Chapter
In: 2018, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@inbook{Arntzen2018,
title = {Media Synchronization on the Web. In: MediaSync},
author = {Ingar Mæhlum Arntzen and Njål Borch and François Daoust},
url = {https://www.w3.org/community/webtiming/files/2018/05/arntzen_mediasync_web_author_edition.pdf},
year = {2018},
date = {2018-05-07},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {inbook}
}
Ingar Mæhlum Arntzen; Njål Borch; François Daoust; Dominique Hazael-Massieux
Multi-device Linear Composition on the Web, Enabling Multi-device Linear Media with HTMLTimingObject and Shared Motion Conference
Media Synchronization Workshop Brussels, 2018, (Pre SFI).
Abstract | BibTeX | Tags: Linear Media, Multi-device, Shared Motion, WP4: Media Content Interaction and Accessibility | Links:
@conference{Arntzen2018b,
title = {Multi-device Linear Composition on the Web, Enabling Multi-device Linear Media with HTMLTimingObject and Shared Motion},
author = {Ingar Mæhlum Arntzen and Njål Borch and François Daoust and Dominique Hazael-Massieux
},
url = {https://www.researchgate.net/publication/324991987_Multi-device_Linear_Composition_on_the_Web_Enabling_Multi-device_Linear_Media_with_HTMLTimingObject_and_Shared_Motion},
year = {2018},
date = {2018-01-01},
address = {Brussels},
organization = {Media Synchronization Workshop},
abstract = {Composition is a hallmark of the Web, yet it does not fully extend to linear media. This paper defines linear composition as the ability to form linear media by coordinated playback of independent linear components. We argue that native Web support for linear composition is a key enabler for Web-based multi-device linear media, and that precise multi-device timing is the main technical challenge. This paper proposes the introduction of an HTMLTimingObject as basis for linear composition in the single-device scenario. Linear composition in the multi-device scenario is ensured as HTMLTimingObjects may integrate with Shared Motion, a generic timing mechanism for the Web. By connecting HTMLMediaElements and HTMLTrackElements with a multi-device timing mechanism, a powerful programming model for multi-device linear media is unlocked.},
note = {Pre SFI},
keywords = {Linear Media, Multi-device, Shared Motion, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2017
Njål Borch
Økt samvirke og beslutningsstøtte – Case Salten Brann IKS Technical Report
2017, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@techreport{Borch2017,
title = {Økt samvirke og beslutningsstøtte – Case Salten Brann IKS},
author = {Njål Borch},
url = {https://norceresearch.brage.unit.no/norceresearch-xmlui/handle/11250/2647818},
year = {2017},
date = {2017-08-17},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {techreport}
}
Njål Borch; François Daoust; Ingar Mæhlum Arntzen
Timing - small step for developers, giant leap for the media industry, IBC 2016 Conference
2017, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@conference{Borch2016,
title = {Timing - small step for developers, giant leap for the media industry, IBC 2016},
author = {Njål Borch and François Daoust and Ingar Mæhlum Arntzen},
url = {https://www.w3.org/community/webtiming/files/2016/09/Borch_IBC2016-final.pdf},
year = {2017},
date = {2017-02-11},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {conference}
}
2016
Frode Guribye; Lars Nyre
The changing ecology of tools for live news reporting Journal Article
In: Journalism Practice, vol. 10, no. 11, pp. 1216-1230, 2016, ISSN: 1751-2794, (Pre SFI).
Abstract | BibTeX | Tags: broadcast news; ecology of tools; journalism; live reporting; mobile interaction; video applications; video journalism; visual technology, WP4: Media Content Interaction and Accessibility | Links:
@article{Guribye2016,
title = {The changing ecology of tools for live news reporting},
author = {Frode Guribye and Lars Nyre},
url = {https://www.tandfonline.com/doi/pdf/10.1080/17512786.2016.1259011?needAccess=true},
doi = {10.1080/17512786.2016.1259011},
issn = {1751-2794},
year = {2016},
date = {2016-12-05},
journal = {Journalism Practice},
volume = {10},
number = {11},
pages = {1216-1230},
abstract = {Broadcast news channels provide fresh, continuously updated coverage of events, in sharp competition with other news channels in the same market. The live moment is a valuable feature, and broadcasters have always relied on teams that can react quickly to breaking news and report live from the scene. Technology plays an important role in the production of live news, and a number of tools are applied by skilled actors in what can be called an ecology of tools for live news reporting. This study explores new video tools for television news, and the tinkering conducted by the reporting teams to adapt to such tools. Six journalists and photographers at broadcaster TV 2 in Norway were interviewed about their everyday work practices out in the field, and we present the findings in an analysis where six aspects of contemporary live news reporting are explored: (1) from heavy to light equipment, (2) more live news at TV 2, (3) the practice of going live, (4) the mobility of live reporters, (5) tinkering to go live, and (6) quicker pace of production. In the concluding remarks we summarize our insights about live news reporting.},
note = {Pre SFI},
keywords = {broadcast news; ecology of tools; journalism; live reporting; mobile interaction; video applications; video journalism; visual technology, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Ingar Mæhlum Arntzen; Njål Borch
Data-independent sequencing with the timing object: a JavaScript sequencer for single-device and multi-device web media. In Proceedings of the 7th International Conference on Multimedia Systems (MMSys '16) Proceedings
2016, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@proceedings{Arntzen2016,
title = {Data-independent sequencing with the timing object: a JavaScript sequencer for single-device and multi-device web media. In Proceedings of the 7th International Conference on Multimedia Systems (MMSys '16)},
author = {Ingar Mæhlum Arntzen and Njål Borch},
url = {https://www.w3.org/community/webtiming/files/2016/05/mmsys2016slides.pdf},
year = {2016},
date = {2016-05-12},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {proceedings}
}
Pawel Wozniak; Nitesh Goyal; Przemyslaw Kucharski; Lars Lischke; Sven Mayer; Morten Fjeld
RAMPARTS: Supporting sensemaking with spatially-aware mobile interactions Journal Article
In: 2016, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@article{Wozniak2016,
title = { RAMPARTS: Supporting sensemaking with spatially-aware mobile interactions},
author = {Pawel Wozniak and Nitesh Goyal and Przemyslaw Kucharski and Lars Lischke and Sven Mayer and Morten Fjeld},
url = {https://dl.acm.org/doi/10.1145/2858036.2858491
https://www.youtube.com/watch?v=t01yLj3xhVc},
year = {2016},
date = {2016-05-01},
urldate = {2016-05-01},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
Marta G. Carcedo; Soon H. Chua; Simon Perrault; Pawel Wozniak; Raj Joshi; Mohammad Obaid; Morten Fjeld; Shengdong Zhao
Hapticolor: Interpolating color information as haptic feedback to assist the colorblind Proceedings
2016, (Pre SFI).
BibTeX | Tags: WP4: Media Content Interaction and Accessibility | Links:
@proceedings{Carcedo2016,
title = {Hapticolor: Interpolating color information as haptic feedback to assist the colorblind},
author = { Marta G. Carcedo and Soon H. Chua and Simon Perrault and Pawel Wozniak and Raj Joshi and Mohammad Obaid and Morten Fjeld and Shengdong Zhao},
url = {https://dl.acm.org/doi/10.1145/2858036.2858220
https://www.youtube.com/watch?v=qjoH6eNNZBU},
year = {2016},
date = {2016-05-01},
urldate = {2016-05-01},
note = {Pre SFI},
keywords = {WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {proceedings}
}
2015
Njål Borch; Ingar Mæhlum Arntzen
Mediasync Report 2015: Evaluating timed playback of HTML5 Media Journal Article
In: Norut, 2015, ISBN: 978-82-7492-319-5, (Pre SFI).
Abstract | BibTeX | Tags: HTML5, MediaSync, WP4: Media Content Interaction and Accessibility | Links:
@article{Borch2015b,
title = {Mediasync Report 2015: Evaluating timed playback of HTML5 Media},
author = {Njål Borch and Ingar Mæhlum Arntzen},
url = {https://norceresearch.brage.unit.no/norceresearch-xmlui/bitstream/handle/11250/2711974/Norut_Tromso_rapport_28-2015.pdf?sequence=2&isAllowed=y},
isbn = {978-82-7492-319-5},
year = {2015},
date = {2015-12-08},
journal = {Norut},
abstract = {In this report we provide an extensive analysis of timing aspects of HTML5 Media, across a variety of browsers,
operating systems and media formats. Particularly we investigate how playback compares to the progression of
the local clock and how players respond to time-shifting and adjustments in playback-rate.
Additionally, we use the MediaSync JS library to enforce correctly timed playback for HTML5 media, and indicate
the effects this has on user experience. MediaSync is developed based on results from the above analysis.
MediaSync aims to provide a best effort solution that works across a variety of media formats, operating systems
and browser types, and does not make optimizations for specific permutations..
},
note = {Pre SFI},
keywords = {HTML5, MediaSync, WP4: Media Content Interaction and Accessibility},
pubstate = {published},
tppubtype = {article}
}
operating systems and media formats. Particularly we investigate how playback compares to the progression of
the local clock and how players respond to time-shifting and adjustments in playback-rate.
Additionally, we use the MediaSync JS library to enforce correctly timed playback for HTML5 media, and indicate
the effects this has on user experience. MediaSync is developed based on results from the above analysis.
MediaSync aims to provide a best effort solution that works across a variety of media formats, operating systems
and browser types, and does not make optimizations for specific permutations..