{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,16]],"date-time":"2025-10-16T06:58:10Z","timestamp":1760597890223,"version":"3.37.3"},"reference-count":64,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61572387","61632019","61836008","61672404","61871304"],"award-info":[{"award-number":["61572387","61632019","61836008","61672404","61871304"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61621005"],"award-info":[{"award-number":["61621005"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2019]]},"DOI":"10.1109\/access.2019.2915630","type":"journal-article","created":{"date-parts":[[2019,5,8]],"date-time":"2019-05-08T20:04:36Z","timestamp":1557345876000},"page":"60428-60438","source":"Crossref","is-referenced-by-count":18,"title":["A Convolutional Encoder-Decoder Network With Skip Connections for Saliency Prediction"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2161-1551","authenticated-orcid":false,"given":"Fei","family":"Qi","sequence":"first","affiliation":[]},{"given":"Chunhuan","family":"Lin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2179-3292","authenticated-orcid":false,"given":"Guangming","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1167\/11.3.9"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2011.272"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2817047"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.38"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2017.03.018"},{"key":"ref30","first-page":"362","article-title":"Predicting eye fixations using convolutional neural networks","author":"liu","year":"2015","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2009.5459462"},{"key":"ref36","first-page":"241","article-title":"Predicting human gaze using low-level saliency combined with face detection","author":"cerf","year":"2008","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2009.5459240"},{"key":"ref34","first-page":"155","article-title":"Saliency based on information maximization","author":"bruce","year":"2006","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref62","first-page":"1139","article-title":"On the importance of initialization and momentum in deep learning","author":"sutskever","year":"2013","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref61","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"2010","journal-title":"Proc 13th Int Conf on Artificial Intell"},{"journal-title":"Salgan Visual saliency prediction with generative adversarial networks","year":"2018","author":"pan","key":"ref63"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/0010-0285(80)90005-5"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.26"},{"key":"ref27","first-page":"4467","article-title":"Dual path networks","author":"chen","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1167\/13.4.11"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nrn1411"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2008.09.007"},{"journal-title":"Very Deep Convolutional Networks for Large-scale Image Recognition","year":"2015","author":"simonyan","key":"ref20"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref24","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","author":"ren","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref23","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2710620"},{"key":"ref50","first-page":"448","article-title":"Batch Normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref51","first-page":"315","article-title":"Deep sparse rectifier neural networks","author":"glorot","year":"2011","journal-title":"Proc 14th Int Conf Artif Intell Statist"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1510393112"},{"journal-title":"Cat2000 A large scale fixation dataset for boosting saliency research","year":"2015","author":"borji","key":"ref58"},{"article-title":"A benchmark of computational models of saliency to predict human fixations","year":"2012","author":"judd","key":"ref57"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298710"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2815601"},{"journal-title":"MIT Saliency Benchmark","year":"2019","author":"bylinskii","key":"ref54"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2005.03.019"},{"key":"ref52","article-title":"Multi-scale context aggregation by dilated convolutions","author":"yu","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref10","first-page":"481","article-title":"Discriminant saliency for visual recognition from cluttered scenes","author":"gao","year":"2005","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"353","DOI":"10.1109\/TPAMI.2010.70","article-title":"Learning to detect a salient object","volume":"33","author":"liu","year":"2011","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref40","first-page":"487","article-title":"Learning deep features for scene recognition using places database","author":"zhou","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.276"},{"journal-title":"Seeing with humans Gaze-assisted neural image captioning","year":"2016","author":"sugano","key":"ref13"},{"key":"ref14","first-page":"5455","article-title":"Visual saliency based on multiscale deep features","author":"li","year":"2015","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298938"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298918"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298731"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/34.730558"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1080\/135062800394667"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.358"},{"key":"ref5","first-page":"545","article-title":"Graph-based visual saliency","author":"harel","year":"2007","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2004.834657"},{"journal-title":"Deepgaze ii Reading fixations from deep features trained on object recognition","year":"2016","author":"k\u00fcmmerer","key":"ref7"},{"key":"ref49","article-title":"Skip connections eliminate singularities","author":"orhan","year":"2018","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"185","DOI":"10.1109\/TIP.2009.2030969","article-title":"A novel multiresolution spatiotemporal saliency detection model and its applications in image and video compression","volume":"19","author":"guo","year":"2010","journal-title":"IEEE Trans Image Process"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2016.2536638"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2644615"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_38"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2851672"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"journal-title":"Saliency prediction in the deep learning era An empirical investigation","year":"2018","author":"borji","key":"ref44"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2787612"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8600701\/08709735.pdf?arnumber=8709735","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,8,10]],"date-time":"2021-08-10T19:39:58Z","timestamp":1628624398000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8709735\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"references-count":64,"URL":"https:\/\/doi.org\/10.1109\/access.2019.2915630","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2019]]}}}