{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T15:32:33Z","timestamp":1775143953940,"version":"3.50.1"},"reference-count":81,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,12,1]],"date-time":"2019-12-01T00:00:00Z","timestamp":1575158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key R&D Program of China","award":["2017YFB1002202"],"award-info":[{"award-number":["2017YFB1002202"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61471273"],"award-info":[{"award-number":["61471273"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61771348"],"award-info":[{"award-number":["61771348"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Wuhan Morning Light Plan of Youth Science and Technology","award":["2017050304010302"],"award-info":[{"award-number":["2017050304010302"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2019,12]]},"DOI":"10.1109\/tcsvt.2018.2883305","type":"journal-article","created":{"date-parts":[[2018,11,26]],"date-time":"2018-11-26T23:06:41Z","timestamp":1543273601000},"page":"3544-3557","source":"Crossref","is-referenced-by-count":79,"title":["Video Saliency Prediction Based on Spatial-Temporal Two-Stream Network"],"prefix":"10.1109","volume":"29","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9111-0499","authenticated-orcid":false,"given":"Kao","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7882-1066","authenticated-orcid":false,"given":"Zhenzhong","family":"Chen","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref73","first-page":"265","article-title":"TensorFlow: A system for large-scale machine learning","author":"abadi","year":"2016","journal-title":"Proc USENIX Symp on Operating System Design and Implementation"},{"key":"ref72","author":"chollet","year":"2015","journal-title":"Keras"},{"key":"ref71","first-page":"1","article-title":"Recent progress in deep learning for natural language processing","author":"lu","year":"2016","journal-title":"Proc Tuts Annu Conf North Amer Chapter Assoc Comput Linguistics Hum Lang Technol"},{"key":"ref70","first-page":"1","article-title":"Deep learning tutorial","author":"lecun","year":"2013","journal-title":"Proc Int Conf Mach Learn Tut"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICIS.2009.165"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1167\/9.12.15"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2013.2277884"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-016-4124-5"},{"key":"ref75","first-page":"1","article-title":"A model of motion attention for video skimming","author":"ma","year":"2002","journal-title":"Proc IEEE Int Conf Image Process"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2013.2273613"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2004.1326506"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2003.1221571"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2834826"},{"key":"ref32","author":"pan","year":"2017","journal-title":"Salgan Visual saliency prediction with generative adversarial networks"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2851672"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2817047"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299189"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-015-2802-3"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2710620"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2004.09.017"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2005.03.019"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2004.10.009"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.147"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.71"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.118"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.620"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1167\/8.7.32"},{"key":"ref66","article-title":"A benchmark of computational models of saliency to predict human fixations","author":"judd","year":"2012"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.623"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1007\/s12559-010-9074-z"},{"key":"ref68","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1163\/15685680360511645"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/34.730558"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1080\/135062800394667"},{"key":"ref20","first-page":"219","article-title":"Shifts in selective visual attention: Towards the underlying neural circuitry","volume":"4","author":"koch","year":"1985","journal-title":"Hum Neurobiol"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2009.5459462"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2015.2473844"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.513"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.358"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298710"},{"key":"ref25","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2015","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.343"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00514"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1016\/0959-4388(94)90066-3"},{"key":"ref58","doi-asserted-by":"crossref","first-page":"4","DOI":"10.16910\/jemr.2.2.5","article-title":"Systematic tendencies in scene viewing","volume":"2","author":"tatler","year":"2008","journal-title":"J Eye Movement Res"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.59"},{"key":"ref55","author":"borji","year":"2015","journal-title":"Cat2000 A large scale fixation dataset for boosting saliency research"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1167\/9.7.4"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1167\/7.14.4"},{"key":"ref52","author":"jiang","year":"2017","journal-title":"Predicting video saliency with object-to-motion CNN and two-layer convolutional LSTM"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/0010-0285(80)90005-5"},{"key":"ref11","first-page":"545","article-title":"Graph-based visual saliency","author":"harel","year":"2007","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0215-3"},{"key":"ref12","first-page":"1063","article-title":"Video saliency detection via dynamic consistent spatio-temporal attention modelling","author":"zhong","year":"2013","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref13","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.81"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.38"},{"key":"ref81","first-page":"426","article-title":"A new perceived motion based shot content representation","author":"ma","year":"2001","journal-title":"Proc IEEE Int Conf Image Process"},{"key":"ref17","article-title":"Deep gaze I: Boosting saliency prediction with feature maps trained on ImageNet","author":"k\u00fcmmerer","year":"2015","journal-title":"Proc Workshop Int Conf Learn Represent"},{"key":"ref18","author":"k\u00fcmmerer","year":"2016","journal-title":"Deepgaze ii Reading fixations from deep features trained on object recognition"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/0166-2236(92)90344-8"},{"key":"ref80","article-title":"Visual saliency in video compression and transmission","author":"hadizadeh","year":"2013"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2013.2282897"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2010.2080279"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.98"},{"key":"ref5","first-page":"251","article-title":"Attentional selection for object recognition&#x2014;A gentle way","author":"walther","year":"2002","journal-title":"Proc Int Workshop Biologically Motivated Comput Vis"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2011.171"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2199126"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2754941"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.334"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.175"},{"key":"ref45","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","author":"simonyan","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref48","article-title":"Recurrent mixture density network for spatiotemporal visual attention","author":"bazzani","year":"2017","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2016.7532629"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2014.2336549"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2016.04.048"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2017.2777665"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2567391"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/76\/8926550\/08543830.pdf?arnumber=8543830","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T20:56:34Z","timestamp":1657745794000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8543830\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,12]]},"references-count":81,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2018.2883305","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,12]]}}}