2020
|
Zerman, Emin; O’Dwyer, Néill; Young, Gareth W.; Smolic, Aljosa A Case Study on the Use of Volumetric Video in Augmented Reality for Cultural Heritage Conference Proceedings of the 11th Nordic Conference on Human-Computer Interaction (NordiCHI '20), Association for Computing Machinery (ACM), Tallinn, Estonia, 2020, ISBN: 978-1-4503-7579-5. @conference{Zerman2020b,
title = {A Case Study on the Use of Volumetric Video in Augmented Reality for Cultural Heritage},
author = {Emin Zerman and Néill O’Dwyer and Gareth W. Young and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2020/10/3419249.3420115-1.pdf},
doi = {https://doi.org/10.1145/3419249.3420115},
isbn = {978-1-4503-7579-5},
year = {2020},
date = {2020-10-29},
booktitle = {Proceedings of the 11th Nordic Conference on Human-Computer Interaction (NordiCHI '20)},
pages = {1 - 5},
publisher = {Association for Computing Machinery (ACM)},
address = {Tallinn, Estonia},
abstract = {Interest in the use of augmented reality (AR) is currently growing alongside advances in visual display and 3D reconstruction technologies. With the integration of volumetric video (VV), AR can be enhanced with representations of live-action actors. In this paper, we present an exploratory case-study of a museologically focused AR application that evaluated various parameters affecting the overall experience for visitors of the Long Room in the Old Library at Trinity College Dublin. Employing a volumetric 3D representation of Jonathan Swift, the benefits of applying VV in AR for a cultural heritage use-case scenario were explored. Moreover, we compared two AR platforms for this prototype application: a tablet and an HMD. For this purpose, we collected post-task opinions of the application and processed quantitative and qualitative data. The results highlighted differences between the two platforms and showed how the developed VV AR application was initially received by the users.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Interest in the use of augmented reality (AR) is currently growing alongside advances in visual display and 3D reconstruction technologies. With the integration of volumetric video (VV), AR can be enhanced with representations of live-action actors. In this paper, we present an exploratory case-study of a museologically focused AR application that evaluated various parameters affecting the overall experience for visitors of the Long Room in the Old Library at Trinity College Dublin. Employing a volumetric 3D representation of Jonathan Swift, the benefits of applying VV in AR for a cultural heritage use-case scenario were explored. Moreover, we compared two AR platforms for this prototype application: a tablet and an HMD. For this purpose, we collected post-task opinions of the application and processed quantitative and qualitative data. The results highlighted differences between the two platforms and showed how the developed VV AR application was initially received by the users. |
O’Dwyer, Néill; Young, Gareth W.; Johnson, Nicholas; Zerman, Emin; Smolic, Aljosa Mixed Reality and Volumetric Video in Cultural Heritage: Expert opinions on augmented and virtual reality Inproceedings In: Rauterberg, Matthias (Ed.): Culture and Computing, pp. 195 – 214, Human Computer Interaction International Springer, Copenhagen, 2020, ISBN: 978-3-030-50267-6. @inproceedings{O’Dwyer2020,
title = {Mixed Reality and Volumetric Video in Cultural Heritage: Expert opinions on augmented and virtual reality},
author = {Néill O’Dwyer and Gareth W. Young and Nicholas Johnson and Emin Zerman and Aljosa Smolic},
editor = {Matthias Rauterberg},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2020/07/odwyer2020_chapter_mixedrealityandvolumetricvideo.pdf},
isbn = {978-3-030-50267-6},
year = {2020},
date = {2020-06-30},
booktitle = {Culture and Computing},
journal = {Culture and Computing},
volume = {12215},
pages = {195 – 214},
publisher = {Springer},
address = {Copenhagen},
organization = {Human Computer Interaction International},
series = {Lecture Notes in Computer Science},
abstract = {Mixed reality (MR) technology is currently growing in popularity for applications in the cultural heritage domain. Furthermore, with the ability to be viewed with six degrees of freedom, volumetric video (VV) is presently being explored as a viable approach to content creation within this area. When combined, MR technology and VV present both practitioners and audiences with innovative approaches to the creation and consumption of both tangible and intangible representations of cultural significance. While there are some existing quantitative studies appraising these new technologies, the precise effects of MR in a cultural heritage context have yet to be fully explored. Here we show the results of a systematic evaluation of MR technology as applied in a cultural heritage context, where subject matter expert interviews were conducted to identify how virtual reality and augmented reality technologies are influencing the creative practices of domain experts and audience engagements with modern dramatic literature. Gathered from high-level stakeholders within the cultural heritage domain, our results highlighted the problems, concerns, and desires of users who must consider this technology in practice. We found that MR and VV content were considered by many to be disruptive technologies for the future of film, theater, and performance practice from the perspectives of both practitioners and audiences. We anticipate that these results will help future MR and VV projects to create meaningful content that is sympathetic to the needs and requirements of creators and audiences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mixed reality (MR) technology is currently growing in popularity for applications in the cultural heritage domain. Furthermore, with the ability to be viewed with six degrees of freedom, volumetric video (VV) is presently being explored as a viable approach to content creation within this area. When combined, MR technology and VV present both practitioners and audiences with innovative approaches to the creation and consumption of both tangible and intangible representations of cultural significance. While there are some existing quantitative studies appraising these new technologies, the precise effects of MR in a cultural heritage context have yet to be fully explored. Here we show the results of a systematic evaluation of MR technology as applied in a cultural heritage context, where subject matter expert interviews were conducted to identify how virtual reality and augmented reality technologies are influencing the creative practices of domain experts and audience engagements with modern dramatic literature. Gathered from high-level stakeholders within the cultural heritage domain, our results highlighted the problems, concerns, and desires of users who must consider this technology in practice. We found that MR and VV content were considered by many to be disruptive technologies for the future of film, theater, and performance practice from the perspectives of both practitioners and audiences. We anticipate that these results will help future MR and VV projects to create meaningful content that is sympathetic to the needs and requirements of creators and audiences. |
Moynihan, Matthew; Pagés, Rafael; Smolic, Aljosa A Self-regulating Spatio-Temporal Filter for Volumetric Video Point Clouds Book Chapter In: vol. 1182, pp. 391-408, Springer International Publishing, 2020, ISBN: 978-3-030-41590-7. @inbook{Moynihan2020,
title = {A Self-regulating Spatio-Temporal Filter for Volumetric Video Point Clouds},
author = {Matthew Moynihan and Rafael Pagés and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2020/05/mm2020Cloud_compressed.pdf},
doi = {10.1007/978-3-030-41590-7_16},
isbn = {978-3-030-41590-7},
year = {2020},
date = {2020-02-20},
volume = {1182},
pages = {391-408},
publisher = {Springer International Publishing},
series = {Computer Vision, Imaging and Computer Graphics Theory and Applications},
abstract = {The following work presents a self-regulating filter that is capable of performing accurate upsampling of dynamic point cloud data sequences captured using wide-baseline multi-view camera setups. This is achieved by using two-way temporal projection of edge-aware upsampled point clouds while imposing coherence and noise filtering via a windowed, self-regulating noise filter. We use a state of the art Spatio-Temporal Edge-Aware scene flow estimation to accurately model the motion of points across a sequence and then, leveraging the spatio-temporal inconsistency of unstructured noise, we perform a weighted Hausdorff distance-based noise filter over a given window. Our results demonstrate that this approach produces temporally coherent, upsampled point clouds while mitigating both additive and unstructured noise. In addition to filtering noise, the algorithm is able to greatly reduce intermittent loss of pertinent geometry. The system performs well in dynamic real world scenarios with both stationary and non-stationary cameras as well as synthetically rendered environments for baseline study.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
The following work presents a self-regulating filter that is capable of performing accurate upsampling of dynamic point cloud data sequences captured using wide-baseline multi-view camera setups. This is achieved by using two-way temporal projection of edge-aware upsampled point clouds while imposing coherence and noise filtering via a windowed, self-regulating noise filter. We use a state of the art Spatio-Temporal Edge-Aware scene flow estimation to accurately model the motion of points across a sequence and then, leveraging the spatio-temporal inconsistency of unstructured noise, we perform a weighted Hausdorff distance-based noise filter over a given window. Our results demonstrate that this approach produces temporally coherent, upsampled point clouds while mitigating both additive and unstructured noise. In addition to filtering noise, the algorithm is able to greatly reduce intermittent loss of pertinent geometry. The system performs well in dynamic real world scenarios with both stationary and non-stationary cameras as well as synthetically rendered environments for baseline study. |
2019
|
Zerman, Emin; Valenzise, Giuseppe; Smolic, Aljosa Analysing the Impact of Cross-Content Pairs on Pairwise Comparison Scaling Inproceedings In: 11th International Conference on Quality of Multimedia Experience (QoMEX 2019), IEEE 2019. @inproceedings{zerman2019analysing,
title = {Analysing the Impact of Cross-Content Pairs on Pairwise Comparison Scaling},
author = {Emin Zerman and Giuseppe Valenzise and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2019/03/qomex2019_crossContent_preprint.pdf},
doi = {10.1109/QoMEX.2019.8743295},
year = {2019},
date = {2019-06-06},
booktitle = {11th International Conference on Quality of Multimedia Experience (QoMEX 2019)},
organization = {IEEE},
abstract = {Pairwise comparisons (PWC) methodology is one of the most commonly used methods for subjective quality assessment, especially for computer graphics and multimedia applications. Unlike rating methods, a psychometric scaling operation is required to convert PWC results to numerical subjective quality values. Due to the nature of this scaling operation, the obtained quality scores are relative to the set they are computed in. While it is customary to compare different versions of the same content, in this work we study how cross-content comparisons may benefit psychometric scaling. For this purpose, we use two different video quality databases which have both rating and PWC experiment results. The results show that despite same-content comparisons play a major role in the accuracy of psychometric scaling, the use of a small portion of cross-content comparison pairs is indeed beneficial to obtain more accurate quality estimates.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pairwise comparisons (PWC) methodology is one of the most commonly used methods for subjective quality assessment, especially for computer graphics and multimedia applications. Unlike rating methods, a psychometric scaling operation is required to convert PWC results to numerical subjective quality values. Due to the nature of this scaling operation, the obtained quality scores are relative to the set they are computed in. While it is customary to compare different versions of the same content, in this work we study how cross-content comparisons may benefit psychometric scaling. For this purpose, we use two different video quality databases which have both rating and PWC experiment results. The results show that despite same-content comparisons play a major role in the accuracy of psychometric scaling, the use of a small portion of cross-content comparison pairs is indeed beneficial to obtain more accurate quality estimates. |
Moynihan, Matthew; Pagés, Rafael; Smolic, Aljosa Spatio-Temporal Upsampling for Free Viewpoint Video Point Clouds Conference In Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications VISAPP, vol. 5, SciTePress, 2019, ISBN: 978-989-758-354-4. @conference{Moynihan2019,
title = {Spatio-Temporal Upsampling for Free Viewpoint Video Point Clouds},
author = {Matthew Moynihan and Rafael Pagés and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/visigrapp_2019camera-ready/},
doi = {10.5220/0007361606840692 },
isbn = {978-989-758-354-4},
year = {2019},
date = {2019-02-25},
booktitle = {In Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications VISAPP},
volume = {5},
pages = {684-692},
publisher = {SciTePress},
abstract = {This paper presents an approach to upsampling point cloud sequences captured through a wide baseline camera setup in a spatio-temporally consistent manner. The system uses edge-aware scene flow to understand the movement of 3D points across a free-viewpoint video scene to impose temporal consistency. In addition to geometric upsampling, a Hausdorff distance quality metric is used to filter noise and further improve the density of each point cloud. Results show that the system produces temporally consistent point clouds, not only reducing errors and noise but also recovering details that were lost in frame-by-frame dense point cloud reconstruction. The system has been successfully tested in sequences that have been captured via both static or handheld cameras.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
This paper presents an approach to upsampling point cloud sequences captured through a wide baseline camera setup in a spatio-temporally consistent manner. The system uses edge-aware scene flow to understand the movement of 3D points across a free-viewpoint video scene to impose temporal consistency. In addition to geometric upsampling, a Hausdorff distance quality metric is used to filter noise and further improve the density of each point cloud. Results show that the system produces temporally consistent point clouds, not only reducing errors and noise but also recovering details that were lost in frame-by-frame dense point cloud reconstruction. The system has been successfully tested in sequences that have been captured via both static or handheld cameras. |
O’Dwyer, Néill; Johnson, Nicholas Exploring volumetric video and narrative through Samuel Beckett’s Play Journal Article In: International Journal of Performance Arts and Digital Media, 2019, ISSN: 1479-4713. @article{O’Dwyer2019,
title = {Exploring volumetric video and narrative through Samuel Beckett’s Play},
author = {Néill O’Dwyer and Nicholas Johnson},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2019/01/Exploring-volumetric-video-and-narrative-through-Samuel-Beckett’s-Play-14794713.2019.pdf},
doi = {10.1080/14794713.2019.1567243},
issn = {1479-4713},
year = {2019},
date = {2019-01-15},
journal = {International Journal of Performance Arts and Digital Media},
abstract = {This paper draws upon the primary research of an interdepartmental collaborative practice-as-research project that took place at Trinity College during 2017, in which a Samuel Beckett play, entitled Play, was reinterpreted for virtual reality. It included contributions from the Departments of Computer Science, Drama and Electrical and Electronic Engineering. The goal of this article is to offer some expanded philosophical and aesthetic reflections on the practice, now that the major production processes are completed. The primary themes that are dealt with in this paper are the reorganised rules concerning: (1) making work in the VR medium and (2) the impact of the research on viewership and content engagement in digital culture. In doing so we draw on the technological philosophy of Bernard Stiegler, who extends the legacy of Gilles Deleuze and Gilbert Simondon, to reflect on the psychic, sociopolitical and economic impacts of VR technology on cognition, subjectivity and identity in the contemporary digitalised world.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This paper draws upon the primary research of an interdepartmental collaborative practice-as-research project that took place at Trinity College during 2017, in which a Samuel Beckett play, entitled Play, was reinterpreted for virtual reality. It included contributions from the Departments of Computer Science, Drama and Electrical and Electronic Engineering. The goal of this article is to offer some expanded philosophical and aesthetic reflections on the practice, now that the major production processes are completed. The primary themes that are dealt with in this paper are the reorganised rules concerning: (1) making work in the VR medium and (2) the impact of the research on viewership and content engagement in digital culture. In doing so we draw on the technological philosophy of Bernard Stiegler, who extends the legacy of Gilles Deleuze and Gilbert Simondon, to reflect on the psychic, sociopolitical and economic impacts of VR technology on cognition, subjectivity and identity in the contemporary digitalised world. |
2018
|
O’Dwyer, Néill; Ondřej, Jan; Pagés, Rafael; Amplianitis, Konstantinos; Smolić, Aljoša Jonathan Swift: Augmented Reality Application for Trinity Library ’s Long Room Conference International Conference on Interactive Digital Storytelling (ICIDS 2018) 2018. @conference{O’Dwyer2018b,
title = {Jonathan Swift: Augmented Reality Application for Trinity Library ’s Long Room},
author = {Néill O’Dwyer and Jan Ondřej and Rafael Pagés and Konstantinos Amplianitis and Aljoša Smolić},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2019/01/ODwyer2018_Chapter_JonathanSwiftAugmentedRealityA-2.pdf},
doi = {10.1007/978-3-030-04028-4_39},
year = {2018},
date = {2018-12-05},
pages = {pp 348-351},
organization = {International Conference on Interactive Digital Storytelling (ICIDS 2018)},
abstract = {This demo paper describes a project that engages cutting-edge free viewpoint video (FVV) techniques for developing content for an augmented reality prototype. The article traces the evolutionary process from concept, through narrative development, to completed AR prototypes for the HoloLens and handheld mobile devices. It concludes with some reflections on the affordances of the various hardware formats and posits future directions for the research.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
This demo paper describes a project that engages cutting-edge free viewpoint video (FVV) techniques for developing content for an augmented reality prototype. The article traces the evolutionary process from concept, through narrative development, to completed AR prototypes for the HoloLens and handheld mobile devices. It concludes with some reflections on the affordances of the various hardware formats and posits future directions for the research. |
Gao, Pan; Ozcinar, Cagri; Smolic, Aljosa Optimization of Occlusion-Inducing Depth Pixels in 3-D Video Coding Conference IEEE International Conference on Image Processing (ICIP 2018), Athens, Greece 2018. @conference{Gao2018,
title = {Optimization of Occlusion-Inducing Depth Pixels in 3-D Video Coding},
author = {Pan Gao and Cagri Ozcinar and Aljosa Smolic},
url = {https://arxiv.org/abs/1805.03105},
year = {2018},
date = {2018-10-07},
booktitle = {IEEE International Conference on Image Processing (ICIP 2018)},
organization = {Athens, Greece},
abstract = {The optimization of occlusion-inducing depth pixels in depth map coding has received little attention in the literature, since their associated texture pixels are occluded in the synthesized view and their effect on the synthesized view is considered negligible. However, the occlusion-inducing depth pixels still need to consume the bits to be transmitted, and will induce geometry distortion that inherently exists in the synthesized view. In this paper, we propose an efficient depth map coding scheme specifically for the occlusion-inducing depth pixels by using allowable depth distortions. Firstly, we formulate a problem of minimizing the overall geometry distortion in the occlusion subject to the bit rate constraint, for which the depth distortion is properly adjusted within the set of allowable depth distortions that introduce the same disparity error as the initial depth distortion. Then, we propose a dynamic programming solution to find the optimal depth distortion vector for the occlusion. The proposed algorithm can improve the coding efficiency without alteration of the occlusion order. Simulation results confirm the performance improvement compared to other existing algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
The optimization of occlusion-inducing depth pixels in depth map coding has received little attention in the literature, since their associated texture pixels are occluded in the synthesized view and their effect on the synthesized view is considered negligible. However, the occlusion-inducing depth pixels still need to consume the bits to be transmitted, and will induce geometry distortion that inherently exists in the synthesized view. In this paper, we propose an efficient depth map coding scheme specifically for the occlusion-inducing depth pixels by using allowable depth distortions. Firstly, we formulate a problem of minimizing the overall geometry distortion in the occlusion subject to the bit rate constraint, for which the depth distortion is properly adjusted within the set of allowable depth distortions that introduce the same disparity error as the initial depth distortion. Then, we propose a dynamic programming solution to find the optimal depth distortion vector for the occlusion. The proposed algorithm can improve the coding efficiency without alteration of the occlusion order. Simulation results confirm the performance improvement compared to other existing algorithms. |
O’Dwyer, Néill; Johnson, Nicholas; Pagés, Rafael; Ondřej, Jan; Amplianitis, Konstantinos; Bates, Enda; Monaghan, David; Smolic, Aljoša Beckett in VR: exploring narrative using free viewpoint video Inproceedings In: Proceeding SIGGRAPH '18, ACM SIGGRAPH ACM SIGGRAPH, New York, NY, USA, 2018, ISBN: 978-1-4503-5817-0 . @inproceedings{O'Dwyer2018,
title = {Beckett in VR: exploring narrative using free viewpoint video},
author = {Néill O’Dwyer and Nicholas Johnson and Rafael Pagés and Jan Ondřej and Konstantinos Amplianitis and Enda Bates and David Monaghan and Aljoša Smolic},
url = {https://dl.acm.org/citation.cfm?doid=3230744.3230774},
doi = {10.1145/3230744.3230774},
isbn = {978-1-4503-5817-0 },
year = {2018},
date = {2018-08-12},
booktitle = {Proceeding SIGGRAPH '18},
number = {2},
publisher = {ACM SIGGRAPH},
address = {New York, NY, USA},
organization = {ACM SIGGRAPH},
abstract = {This poster describes a reinterpretation of Samuel Beckett's theatrical text Play for virtual reality (VR). It is an aesthetic reflection on practice that follows up an a technical project description submitted to ISMAR 2017 [O'Dwyer et al. 2017]. Actors are captured in a green screen environment using free-viewpoint video (FVV) techniques, and the scene is built in a game engine, complete with binaural spatial audio and six degrees of freedom of movement. The project explores how ludic qualities in the original text help elicit the conversational and interactive specificities of the digital medium. The work affirms the potential for interactive narrative in VR, opens new experiences of the text, and highlights the reorganisation of the author-audience dynamic.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This poster describes a reinterpretation of Samuel Beckett's theatrical text Play for virtual reality (VR). It is an aesthetic reflection on practice that follows up an a technical project description submitted to ISMAR 2017 [O'Dwyer et al. 2017]. Actors are captured in a green screen environment using free-viewpoint video (FVV) techniques, and the scene is built in a game engine, complete with binaural spatial audio and six degrees of freedom of movement. The project explores how ludic qualities in the original text help elicit the conversational and interactive specificities of the digital medium. The work affirms the potential for interactive narrative in VR, opens new experiences of the text, and highlights the reorganisation of the author-audience dynamic. |
Pagés, Rafael; Amplianitis, Konstantinos; Monaghan, David; Ondrej, Jan; Smolic, Aljosa Affordable Content Creation for Free-Viewpoint Video and VR/AR Applications Journal Article In: Journal of Visual Communication and Image Representation, vol. Volume 53, pp. 192-201, 2018. @article{pages2018affordable,
title = {Affordable Content Creation for Free-Viewpoint Video and VR/AR Applications},
author = {Rafael Pagés and Konstantinos Amplianitis and David Monaghan and Jan Ondrej and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/research/6dof/affordable-content-creation-for-free-viewpoint-video-and-vr-ar-applications/},
doi = {10.1016/j.jvcir.2018.03.012},
year = {2018},
date = {2018-05-01},
journal = {Journal of Visual Communication and Image Representation},
volume = {Volume 53},
pages = {192-201},
abstract = {We present a scalable pipeline for Free-Viewpoint Video (FVV) content creation, considering also visualisation in Augmented Reality (AR) and Virtual Reality (VR). We support a range of scenarios where there may be a limited number of handheld consumer cameras, but also demonstrate how our method can be applied in professional multi-camera setups. Our novel pipeline extends many state-of-the-art techniques (such as structure-from-motion, shape-from-silhouette and multi-view stereo) and incorporates bio-mechanical constraints through 3D skeletal information as well as efficient camera pose estimation algorithms. We introduce multi-source shape-from-silhouette (MS-SfS) combined with fusion of different geometry data as crucial components for accurate reconstruction in sparse camera settings. Our approach is highly flexible and our results indicate suitability either for affordable content creation for VR/AR or for interactive FVV visualisation where a user can choose an arbitrary viewpoint or sweep between known views using view synthesis.
},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
We present a scalable pipeline for Free-Viewpoint Video (FVV) content creation, considering also visualisation in Augmented Reality (AR) and Virtual Reality (VR). We support a range of scenarios where there may be a limited number of handheld consumer cameras, but also demonstrate how our method can be applied in professional multi-camera setups. Our novel pipeline extends many state-of-the-art techniques (such as structure-from-motion, shape-from-silhouette and multi-view stereo) and incorporates bio-mechanical constraints through 3D skeletal information as well as efficient camera pose estimation algorithms. We introduce multi-source shape-from-silhouette (MS-SfS) combined with fusion of different geometry data as crucial components for accurate reconstruction in sparse camera settings. Our approach is highly flexible and our results indicate suitability either for affordable content creation for VR/AR or for interactive FVV visualisation where a user can choose an arbitrary viewpoint or sweep between known views using view synthesis.
|
O’Dwyer, Néill; Johnson, Nicholas Virtual Play: Beckettian Experiments in Virtual Reality Journal Article In: Contemporary Theatre review, vol. 28.1, 2018. @article{BeckettianExperimentsinVirtualReality,
title = {Virtual Play: Beckettian Experiments in Virtual Reality},
author = {Néill O’Dwyer and Nicholas Johnson},
url = {https://www.contemporarytheatrereview.org/2018/beckettian-experiments-in-virtual-reality/
},
year = {2018},
date = {2018-02-21},
journal = {Contemporary Theatre review},
volume = {28.1},
abstract = {The past ten years have seen extensive experimentation with Beckett and new technological media at Trinity College Dublin. Research projects have included the stage adaptation and installation of a teleplay (Ghost Trio, 2007), the HD digital video exploration of two teleplays (Abstract Machines, 2010, including new versions of …but the clouds… and Nacht und Träume), and numerous smaller projects involving audio and video within the remit of “fundamental research” at the Samuel Beckett Laboratory (2013–present). The most recent project, Virtual Play, explores Beckett’s Play (1963) within FVV (free-viewpoint video), a form of user-centred VR (virtual reality). This project, reflecting interdisciplinary and cross-faculty collaboration between the V-SENSE project (within the School of Computer Science and Statistics) and the School of Creative Arts, has made high-impact contributions in both FVV research and Beckett Studies, and has now been recognised at European level, receiving first prize at the 2017 New European Media Awards.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The past ten years have seen extensive experimentation with Beckett and new technological media at Trinity College Dublin. Research projects have included the stage adaptation and installation of a teleplay (Ghost Trio, 2007), the HD digital video exploration of two teleplays (Abstract Machines, 2010, including new versions of …but the clouds… and Nacht und Träume), and numerous smaller projects involving audio and video within the remit of “fundamental research” at the Samuel Beckett Laboratory (2013–present). The most recent project, Virtual Play, explores Beckett’s Play (1963) within FVV (free-viewpoint video), a form of user-centred VR (virtual reality). This project, reflecting interdisciplinary and cross-faculty collaboration between the V-SENSE project (within the School of Computer Science and Statistics) and the School of Creative Arts, has made high-impact contributions in both FVV research and Beckett Studies, and has now been recognised at European level, receiving first prize at the 2017 New European Media Awards. |
2017
|
O’Dwyer, Néill; Johnson, Nicholas; Bates, Enda; Pagés, Rafael; Ondrej, Jan; Amplianitis, Konstantinos; Monaghan, David; Smolic, Aljosa Virtual Play in Free-viewpoint Video: Reinterpreting Samuel Beckett for Virtual Reality Inproceedings In: 16th IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 262-267, IEEE Xplore digital library, 2017. @inproceedings{ODwyer2017b,
title = {Virtual Play in Free-viewpoint Video: Reinterpreting Samuel Beckett for Virtual Reality},
author = {Néill O’Dwyer and Nicholas Johnson and Enda Bates and Rafael Pagés and Jan Ondrej and Konstantinos Amplianitis and David Monaghan and Aljosa Smolic},
url = {https://v-sense.scss.tcd.ie:443/wp-content/uploads/2017/08/VARCI17_Beckett_V-SENSE_final.pdf},
doi = {10.1109/ISMAR-Adjunct.2017.87},
year = {2017},
date = {2017-10-14},
booktitle = {16th IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {262-267},
publisher = {IEEE Xplore digital library},
abstract = {Since the early years of the twenty-first century, the performing arts have been party to an increasing number of digital media projects that bring renewed attention to questions about, on one hand, new working processes involving capture and distribution techniques, and on the other hand, how particular works—with bespoke hard and software—can exert an efficacy over how work is created by the artist/producer or received by the audience. The evolution of author/audience criteria demand that digital arts practice modify aesthetic and storytelling strategies, to types that are more appropriate to communicating ideas over interactive digital networks, wherein AR/VR technologies are rapidly becoming the dominant interface. This project explores these redefined criteria through a reimagining of Samuel Becketts Play (1963) for digital culture. This paper offers an account of the working processes, the aesthetic and technical considerations that guide artistic decisions and how we attempt to place the overall work in the state of the art.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Since the early years of the twenty-first century, the performing arts have been party to an increasing number of digital media projects that bring renewed attention to questions about, on one hand, new working processes involving capture and distribution techniques, and on the other hand, how particular works—with bespoke hard and software—can exert an efficacy over how work is created by the artist/producer or received by the audience. The evolution of author/audience criteria demand that digital arts practice modify aesthetic and storytelling strategies, to types that are more appropriate to communicating ideas over interactive digital networks, wherein AR/VR technologies are rapidly becoming the dominant interface. This project explores these redefined criteria through a reimagining of Samuel Becketts Play (1963) for digital culture. This paper offers an account of the working processes, the aesthetic and technical considerations that guide artistic decisions and how we attempt to place the overall work in the state of the art. |