{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,20]],"date-time":"2025-10-20T10:24:47Z","timestamp":1760955887037,"version":"3.28.0"},"reference-count":27,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,5]]},"DOI":"10.1109\/qomex.2018.8463369","type":"proceedings-article","created":{"date-parts":[[2018,9,13]],"date-time":"2018-09-13T21:42:29Z","timestamp":1536874949000},"page":"1-3","source":"Crossref","is-referenced-by-count":47,"title":["Introducing UN Salient360! Benchmark: A platform for evaluating visual attention models for 360\u00b0 contents"],"prefix":"10.1109","author":[{"given":"Jesus","family":"Gutierrez","sequence":"first","affiliation":[]},{"given":"Erwan J.","family":"David","sequence":"additional","affiliation":[]},{"given":"Antoine","family":"Coutrot","sequence":"additional","affiliation":[]},{"given":"Matthieu Perreira","family":"Da Silva","sequence":"additional","affiliation":[]},{"given":"Patrick Le","family":"Callet","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Viewport-adaptive navigable 360-degree video delivery","author":"corbillon","year":"2017","journal-title":"IEEE International Conference on Communications"},{"journal-title":"Pano2Vid Automatic Cinematography for Watching 360&#x00B0; Videos","year":"2016","author":"su","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073668"},{"key":"ref13","first-page":"1","article-title":"Head movements during visual exploration of natural images in virtual reality","author":"hu","year":"2017","journal-title":"Proc Annual Conf on Information Sciences and Systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW.2017.8026231"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3083187.3083215"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3083187.3083210"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3083187.3083219"},{"key":"ref18","doi-asserted-by":"crossref","DOI":"10.3389\/fpsyg.2017.02116","article-title":"A Public Database of Immersive VR Videos with Corresponding Ratings of Arousal, Valence, and Correlations between Head Movements and Self Report Measures","volume":"8","author":"li","year":"2017","journal-title":"Frontiers in Psychology"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3083187.3083218"},{"key":"ref4","first-page":"1","author":"bylinskii","year":"2016","journal-title":"What do different evaluation metrics tell us about saliency models?"},{"key":"ref27","first-page":"211","article-title":"A vector-based, multidimensional scanpath similarity measure","author":"jarodzka","year":"2010","journal-title":"Proc Symp Eye Tracking Research and Applications"},{"key":"ref3","first-page":"1","article-title":"A Benchmark of Computational Models of Saliency to Predict Human Fixations","volume":"1","author":"judd","year":"2012","journal-title":"MIT Technical Report"},{"key":"ref6","first-page":"217","article-title":"Modeling visual attention in vr: Measuring the accuracy of predicted scanpaths","author":"marmitt","year":"2002","journal-title":"Eurographics 2002 Short Presentations"},{"journal-title":"Saliency benchmarking Separating models maps and metrics","year":"2017","author":"k\u00fcmmerer","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2018.2793599"},{"key":"ref7","first-page":"1","article-title":"Which saliency weighting for omni directional image quality assessment?","author":"rai","year":"2017","journal-title":"Proc 8th International Conference on Quality of Multimedia Experience"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2013.2265801"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ISMAR.2015.12"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.89"},{"journal-title":"Signal Processing Image Communication 2018","article-title":"Special issue","year":"0","key":"ref20"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.3758\/s13428-012-0226-9"},{"journal-title":"MIT Saliency Benchmark","year":"0","author":"bylinskii","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.3758\/s13428-014-0550-3"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2007.06.015"},{"journal-title":"Algorithms for the comparison of visual scan patterns","year":"2016","author":"k\u00fcbler","key":"ref26"},{"key":"ref25","first-page":"921","article-title":"Analysis of Scores, Datasets, and Models in Visual Saliency Prediction","author":"borji","year":"2013","journal-title":"IEEE International Conference on Computer Vision"}],"event":{"name":"2018 Tenth International Conference on Quality of Multimedia Experience (QoMEX)","start":{"date-parts":[[2018,5,29]]},"location":"Cagliari","end":{"date-parts":[[2018,6,1]]}},"container-title":["2018 Tenth International Conference on Quality of Multimedia Experience (QoMEX)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8452069\/8463292\/08463369.pdf?arnumber=8463369","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T10:07:29Z","timestamp":1643191649000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8463369\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,5]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/qomex.2018.8463369","relation":{},"subject":[],"published":{"date-parts":[[2018,5]]}}}