{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T18:59:19Z","timestamp":1770317959313,"version":"3.49.0"},"reference-count":66,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2022,7,7]],"date-time":"2022-07-07T00:00:00Z","timestamp":1657152000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,7,7]],"date-time":"2022-07-07T00:00:00Z","timestamp":1657152000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,3]]},"DOI":"10.1007\/s10489-022-03647-5","type":"journal-article","created":{"date-parts":[[2022,7,7]],"date-time":"2022-07-07T06:11:45Z","timestamp":1657174305000},"page":"6214-6229","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["FGO-Net: Feature and Gaussian Optimization Network for visual saliency prediction"],"prefix":"10.1007","volume":"53","author":[{"given":"Jialun","family":"Pei","sequence":"first","affiliation":[]},{"given":"Tao","family":"Zhou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8454-1407","authenticated-orcid":false,"given":"He","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Chao","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Chuanbo","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,7,7]]},"reference":[{"key":"3647_CR1","unstructured":"Borji A (2019) Saliency prediction in the deep learning era: Successes and limitations. IEEE Trans Patt Anal Mach Intell"},{"issue":"6","key":"3647_CR2","doi-asserted-by":"publisher","first-page":"493","DOI":"10.1016\/S0960-9822(03)00135-0","volume":"13","author":"P Lennie","year":"2003","unstructured":"Lennie P (2003) The cost of cortical computation. Curr Biol 13(6):493\u2013497","journal-title":"Curr Biol"},{"issue":"8","key":"3647_CR3","doi-asserted-by":"publisher","first-page":"1913","DOI":"10.1109\/TPAMI.2019.2905607","volume":"42","author":"W Wang","year":"2019","unstructured":"Wang W, Shen J, Dong X, Borji A, Yang R (2019) Inferring salient objects from human fixations. IEEE Trans Patt Anal Mach Intell 42(8):1913\u20131927","journal-title":"IEEE Trans Patt Anal Mach Intell"},{"key":"3647_CR4","doi-asserted-by":"crossref","unstructured":"Russakovsky O, Li L-J, Fei-Fei L (2015) Best of both worlds: human-machine collaboration for object annotation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2121\u20132131","DOI":"10.1109\/CVPR.2015.7298824"},{"issue":"1","key":"3647_CR5","doi-asserted-by":"publisher","first-page":"19","DOI":"10.1109\/TIP.2013.2282897","volume":"23","author":"H Hadizadeh","year":"2013","unstructured":"Hadizadeh H, Baji\u0107 I V (2013) Saliency-aware video compression. IEEE Trans Image Process 23(1):19\u201333","journal-title":"IEEE Trans Image Process"},{"issue":"2","key":"3647_CR6","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3177745","volume":"14","author":"M Cornia","year":"2018","unstructured":"Cornia M, Baraldi L, Serra G, Cucchiara R (2018) Paying more attention to saliency: Image captioning with saliency and context attention. ACM Trans Multimed Comput Commun Appl 14(2):1\u201321","journal-title":"ACM Trans Multimed Comput Commun Appl"},{"key":"3647_CR7","unstructured":"V M, N V (2009) Saliency-based discriminant tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1007\u20131013"},{"issue":"11","key":"3647_CR8","doi-asserted-by":"publisher","first-page":"1254","DOI":"10.1109\/34.730558","volume":"20","author":"L Itti","year":"1998","unstructured":"Itti L, Koch C, Niebur E (1998) A model of saliency-based visual attention for rapid scene analysis. IEEE Trans Patt Anal Mach Intell 20(11):1254\u20131259","journal-title":"IEEE Trans Patt Anal Mach Intell"},{"issue":"5","key":"3647_CR9","first-page":"1689","volume":"22","author":"Y Xie","year":"2012","unstructured":"Xie Y, Lu H, Yang M-H (2012) Bayesian saliency via low and mid level cues. IEEE Trans Image Process 22(5):1689\u20131698","journal-title":"IEEE Trans Image Process"},{"issue":"4","key":"3647_CR10","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1167\/13.4.11","volume":"13","author":"E Erdem","year":"2013","unstructured":"Erdem E, Erdem A (2013) Visual saliency estimation by nonlinearly integrating features using region covariances. J Vis 13(4):11\u201311","journal-title":"J Vis"},{"issue":"10","key":"3647_CR11","doi-asserted-by":"publisher","first-page":"1915","DOI":"10.1109\/TPAMI.2011.272","volume":"34","author":"S Goferman","year":"2011","unstructured":"Goferman S, Zelnik-Manor L, Tal A (2011) Context-aware saliency detection. IEEE Trans Patt Anal Mach Intell 34(10):1915\u2013 1926","journal-title":"IEEE Trans Patt Anal Mach Intell"},{"issue":"2","key":"3647_CR12","doi-asserted-by":"publisher","first-page":"392","DOI":"10.1109\/TNNLS.2016.2628878","volume":"29","author":"N Liu","year":"2016","unstructured":"Liu N, Han J, Liu T, Li X (2016) Learning to predict eye fixations via multiresolution convolutional neural networks. IEEE Trans Neural Netw Learn Syst 29(2):392\u2013404","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"3647_CR13","doi-asserted-by":"crossref","unstructured":"Kruthiventi Srinivas SS, Gudisa V, Dholakiya J H, Venkatesh Babu R (2016) Saliency unified: A deep architecture for simultaneous eye fixation prediction and salient object segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5781\u20135790","DOI":"10.1109\/CVPR.2016.623"},{"key":"3647_CR14","unstructured":"Liu N, Han J, Zhang D, Wen S, Liu T (2015) Predicting eye fixations using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 362\u2013370"},{"issue":"9","key":"3647_CR15","doi-asserted-by":"publisher","first-page":"4446","DOI":"10.1109\/TIP.2017.2710620","volume":"26","author":"SrinivasSS Kruthiventi","year":"2017","unstructured":"Kruthiventi Srinivas SS, Ayush K, Babu R V (2017) Deepfix: A fully convolutional neural network for predicting human eye fixations. IEEE Trans Image Process 26(9):4446\u20134456","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR16","doi-asserted-by":"crossref","unstructured":"Zhao Q, Sheng T, Wang Y, Tang Z, Chen Y, Cai L, Ling H (2019) M2det: A single-shot object detector based on multi-level feature pyramid network. In: Proceedings of the AAAI conference on artificial intelligence, vol 33, pp 9259\u20139266","DOI":"10.1609\/aaai.v33i01.33019259"},{"key":"3647_CR17","doi-asserted-by":"crossref","unstructured":"Wan B, Zhou D, Liu Y, Li R, He X (2019) Pose-aware multi-level feature network for human object interaction detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp 9469\u20139478","DOI":"10.1109\/ICCV.2019.00956"},{"key":"3647_CR18","doi-asserted-by":"publisher","first-page":"200","DOI":"10.1016\/j.neucom.2021.04.053","volume":"452","author":"Z Huang","year":"2021","unstructured":"Huang Z, Chen H-X, Zhou T, Yang Y-Z, Liu B-Y (2021) Multi-level cross-modal interaction network for rgb-d salient object detection. Neurocomputing 452:200\u2013211","journal-title":"Neurocomputing"},{"key":"3647_CR19","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J-Y, So Kweon I (2018) Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision, pp 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"3647_CR20","doi-asserted-by":"crossref","unstructured":"Zhao T, Wu X (2019) Pyramid feature attention network for saliency detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3085\u20133094","DOI":"10.1109\/CVPR.2019.00320"},{"issue":"5","key":"3647_CR21","doi-asserted-by":"publisher","first-page":"2368","DOI":"10.1109\/TIP.2017.2787612","volume":"27","author":"W Wang","year":"2017","unstructured":"Wang W, Shen J (2017) Deep visual attention prediction. IEEE Trans Image Process 27 (5):2368\u20132378","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR22","unstructured":"Bruce N, Tsotsos J (2006) Saliency based on information maximization. In: Advances in Neural Information Processing Systems, pp 155\u2013162"},{"issue":"3","key":"3647_CR23","doi-asserted-by":"publisher","first-page":"1178","DOI":"10.1109\/TIP.2015.2395713","volume":"24","author":"M Liang","year":"2015","unstructured":"Liang M, Hu X (2015) Predicting eye fixations with higher-level visual features. IEEE Trans Image Process 24(3):1178\u20131189","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR24","unstructured":"Krizhevsky A, Sutskever I, Hinton G E (2012) Imagenet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems, pp 1097\u20131105"},{"key":"3647_CR25","doi-asserted-by":"publisher","first-page":"1973","DOI":"10.1109\/TIP.2021.3050303","volume":"30","author":"Z Che","year":"2021","unstructured":"Che Z, Borji A, Zhai G, Ling S, Li J, Tian Y, Guo G, Le Callet P (2021) Adversarial attack against deep saliency models powered by non-redundant priors. IEEE Trans Image Process 30:1973\u20131988","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR26","unstructured":"Shen C, Song M, Zhao Q (2012) Learning high-level concepts by training a deep network on eye fixations. In: NIPS Deep Learning and Unsup Feat Learn Workshop, vol 2"},{"key":"3647_CR27","doi-asserted-by":"crossref","unstructured":"Huang X, Shen C, Boix X, Zhao Q (2015) Salicon: Reducing the semantic gap in saliency prediction by adapting deep neural networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp 262\u2013270","DOI":"10.1109\/ICCV.2015.38"},{"key":"3647_CR28","doi-asserted-by":"crossref","unstructured":"Pan J, Sayrol E, Giro-i Nieto X, McGuinness K, O\u2019Connor N E (2016) Shallow and deep convolutional networks for saliency prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 598\u2013606","DOI":"10.1109\/CVPR.2016.71"},{"issue":"10","key":"3647_CR29","doi-asserted-by":"publisher","first-page":"5142","DOI":"10.1109\/TIP.2018.2851672","volume":"27","author":"M Cornia","year":"2018","unstructured":"Cornia M, Baraldi L, Serra G, Cucchiara R (2018) Predicting human eye fixations via an lstm-based saliency attentive model. IEEE Trans Image Process 27(10):5142\u20135154","journal-title":"IEEE Trans Image Process"},{"issue":"7","key":"3647_CR30","doi-asserted-by":"publisher","first-page":"3264","DOI":"10.1109\/TIP.2018.2817047","volume":"27","author":"N Liu","year":"2018","unstructured":"Liu N, Han J (2018) A deep spatial contextual long-term recurrent convolutional network for saliency detection. IEEE Trans Image Process 27(7):3264\u20133274","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR31","doi-asserted-by":"crossref","unstructured":"Kummerer M, Wallis Thomas SA, Gatys L A, Bethge M (2017) Understanding low-and high-level contributions to fixation prediction. In: Proceedings of the IEEE International Conference on Computer Vision, pp 4789\u20134798","DOI":"10.1109\/ICCV.2017.513"},{"key":"3647_CR32","doi-asserted-by":"crossref","unstructured":"He S, Tavakoli H R, Borji A, Mi Y, Pugeault N (2020) Understanding and visualizing deep visual saliency models. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 10206\u201310215","DOI":"10.1109\/CVPR.2019.01045"},{"key":"3647_CR33","doi-asserted-by":"crossref","unstructured":"Wang W, Lai Q, Fu H, Shen J, Ling H, Yang R (2021) Salient object detection in the deep learning era: An in-depth survey. IEEE Trans Patt Anal Mach Intell","DOI":"10.1109\/TPAMI.2021.3051099"},{"issue":"6","key":"3647_CR34","doi-asserted-by":"publisher","first-page":"3450","DOI":"10.1007\/s10489-020-01961-4","volume":"51","author":"H-B Bi","year":"2021","unstructured":"Bi H-B, Lu D, Zhu H-H, Yang L-N, Guan H-P (2021) Sta-net: spatial-temporal attention network for video salient object detection. Appl Intell 51(6):3450\u20133459","journal-title":"Appl Intell"},{"issue":"3","key":"3647_CR35","doi-asserted-by":"publisher","first-page":"569","DOI":"10.1109\/TPAMI.2014.2345401","volume":"37","author":"M-M Cheng","year":"2014","unstructured":"Cheng M-M, Mitra N J, Huang X, Torr PHS, Hu S-M (2014) Global contrast based salient region detection. IEEE Trans Patt Anal Mach Intell 37(3):569\u2013582","journal-title":"IEEE Trans Patt Anal Mach Intell"},{"key":"3647_CR36","doi-asserted-by":"crossref","unstructured":"Pang Y, Zhao X, Zhang L, Lu H (2020) Multi-scale interactive network for salient object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 9413\u20139422","DOI":"10.1109\/CVPR42600.2020.00943"},{"key":"3647_CR37","unstructured":"Li G, Yu Y (2015) Visual saliency based on multiscale deep features. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5455\u20135463"},{"key":"3647_CR38","doi-asserted-by":"crossref","unstructured":"Hou Q, Cheng M-M, Hu X, Borji A, Tu Z, Torr PHS (2017) Deeply supervised salient object detection with short connections. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3203\u20133212","DOI":"10.1109\/CVPR.2017.563"},{"key":"3647_CR39","doi-asserted-by":"crossref","unstructured":"Zhang Q, Cong R, Li C, Cheng M-M, Fang Y, Cao X, Zhao Y, Kwong S (2020) Dense attention fluid network for salient object detection in optical remote sensing images. IEEE Trans Image Process","DOI":"10.1109\/TIP.2020.3042084"},{"key":"3647_CR40","doi-asserted-by":"crossref","unstructured":"Chen S, Tan X, Wang B, Hu X (2018) Reverse attention for salient object detection. In: Proceedings of the European Conference on Computer Vision, pp 234\u2013250","DOI":"10.1007\/978-3-030-01240-3_15"},{"key":"3647_CR41","doi-asserted-by":"crossref","unstructured":"Feng M, Lu H, Ding E (2019) Attentive feedback network for boundary-aware salient object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1623\u20131632","DOI":"10.1109\/CVPR.2019.00172"},{"key":"3647_CR42","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez A N, Kaiser L, Polosukhin I (2017) Attention is all you need. In: Advances in Neural Information Processing Systems, pp 5998\u20136008"},{"key":"3647_CR43","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"3647_CR44","doi-asserted-by":"crossref","unstructured":"Wang F, Jiang M, Qian C, Yang S, Li C, Zhang H, Wang X, Tang X (2017) Residual attention network for image classification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3156\u20133164","DOI":"10.1109\/CVPR.2017.683"},{"key":"3647_CR45","doi-asserted-by":"publisher","first-page":"1949","DOI":"10.1109\/TIP.2021.3049959","volume":"30","author":"Z Zhang","year":"2021","unstructured":"Zhang Z, Lin Z, Xu J, Jin W-D, Lu S-P, Fan D-P (2021) Bilateral attention network for rgb-d salient object detection. IEEE Trans Image Process 30:1949\u20131961","journal-title":"IEEE Trans Image Process"},{"key":"3647_CR46","doi-asserted-by":"crossref","unstructured":"Xu K, Li D, Cassimatis N, Wang X (2018) Lcanet: End-to-end lipreading with cascaded attention-ctc. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition. IEEE, pp 548\u2013555","DOI":"10.1109\/FG.2018.00088"},{"key":"3647_CR47","doi-asserted-by":"crossref","unstructured":"Fan D-P, Ji G-P, Zhou T, Chen G, Fu H, Shen J, Shao L (2020) Pranet: Parallel reverse attention network for polyp segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, pp 263\u2013273","DOI":"10.1007\/978-3-030-59725-2_26"},{"key":"3647_CR48","unstructured":"Xu K, Ba J, Kiros R, Cho K, Courville A, Salakhudinov R, Zemel R, Bengio Y (2015) Show, attend and tell: Neural image caption generation with visual attention. In: International Conference on Machine Learning, pp 2048\u20132057"},{"key":"3647_CR49","doi-asserted-by":"crossref","unstructured":"Chen L, Zhang H, Xiao J, Nie L, Shao J, Liu W, Chua T-S (2017) Sca-cnn: Spatial and channel-wise attention in convolutional networks for image captioning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5659\u20135667","DOI":"10.1109\/CVPR.2017.667"},{"key":"3647_CR50","doi-asserted-by":"publisher","first-page":"1216","DOI":"10.1109\/TIFS.2019.2938870","volume":"15","author":"AR Lejb\u00f8lle","year":"2019","unstructured":"Lejb\u00f8lle A R, Nasrollahi K, Krogh B, Moeslund T B (2019) Person re-identification using spatial and layer-wise attention. IEEE Trans Inf Forensic Secur 15:1216\u20131231","journal-title":"IEEE Trans Inf Forensic Secur"},{"key":"3647_CR51","unstructured":"He K, Zhang X, Ren S, Sun J Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"issue":"1","key":"3647_CR52","doi-asserted-by":"publisher","first-page":"114","DOI":"10.1016\/j.imavis.2005.08.011","volume":"26","author":"AL Rothenstein","year":"2008","unstructured":"Rothenstein A L, Tsotsos J K (2008) Attention links sensing to recognition. Image Vis Comput 26(1):114\u2013126","journal-title":"Image Vis Comput"},{"issue":"2","key":"3647_CR53","doi-asserted-by":"publisher","first-page":"487","DOI":"10.1109\/TCYB.2015.2404432","volume":"46","author":"J Han","year":"2015","unstructured":"Han J, Zhang D, Wen S, Guo L, Liu T, Li X (2015) Two-stage learning to predict human eye fixations via sdaes. IEEE Trans Cybern 46(2):487\u2013498","journal-title":"IEEE Trans Cybern"},{"key":"3647_CR54","unstructured":"Kingma D P, Ba J (2015) Adam: A method for stochastic optimization. In: ICLR"},{"key":"3647_CR55","doi-asserted-by":"crossref","unstructured":"Deng J, Dong W, Socher R, Li L-J, Li K, Fei-Fei L (2009) Imagenet: A large-scale hierarchical image database. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. Ieee, pp 248\u2013255","DOI":"10.1109\/CVPR.2009.5206848"},{"issue":"5","key":"3647_CR56","doi-asserted-by":"publisher","first-page":"643","DOI":"10.1016\/j.visres.2004.09.017","volume":"45","author":"BW Tatler","year":"2005","unstructured":"Tatler B W, Baddeley R J, Gilchrist I D (2005) Visual correlates of fixation selection: Effects of scale and time. Vis Res 45(5):643\u2013659","journal-title":"Vis Res"},{"key":"3647_CR57","doi-asserted-by":"crossref","unstructured":"Cornia M, Baraldi L, Serra G, Cucchiara R (2016) A deep multi-level network for saliency prediction. In: 2016 23rd International Conference on Pattern Recognition. IEEE, pp 3488\u20133493","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"3647_CR58","doi-asserted-by":"crossref","unstructured":"Zhang J, Sclaroff S (2013) Saliency detection: A boolean map approach. In: Proceedings of the IEEE International Conference on Computer Vision, pp 153\u2013160","DOI":"10.1109\/ICCV.2013.26"},{"key":"3647_CR59","doi-asserted-by":"crossref","unstructured":"Vig E, Dorr M, Cox D (2014) Large-scale optimization of hierarchical features for saliency prediction in natural images. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2798\u20132805","DOI":"10.1109\/CVPR.2014.358"},{"key":"3647_CR60","doi-asserted-by":"publisher","first-page":"10","DOI":"10.1016\/j.neucom.2017.03.018","volume":"244","author":"HR Tavakoli","year":"2017","unstructured":"Tavakoli H R, Borji A, Laaksonen J, Rahtu E (2017) Exploiting inter-image similarity and ensemble of extreme learners for fixation prediction using deep features. Neurocomputing 244:10\u201318","journal-title":"Neurocomputing"},{"key":"3647_CR61","unstructured":"Hou X, Zhang L (2009) Dynamic visual attention: Searching for coding length increments. In: Advances in Neural Information Processing Systems, pp 681\u2013688"},{"key":"3647_CR62","doi-asserted-by":"crossref","unstructured":"Jetley S, Murray N, Vig E (2016) End-to-end saliency mapping via probability distribution prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5753\u20135761","DOI":"10.1109\/CVPR.2016.620"},{"key":"3647_CR63","unstructured":"Harel J, Koch C, Perona P (2006) Graph-based visual saliency. In: Advances in Neural Information Processing Systems, pp 545\u2013552"},{"key":"3647_CR64","doi-asserted-by":"publisher","first-page":"261","DOI":"10.1016\/j.neunet.2020.05.004","volume":"129","author":"A Kroner","year":"2020","unstructured":"Kroner A, Senden M, Driessens K, Goebel R (2020) Contextual encoder\u2013decoder network for visual saliency prediction. Neural Netw 129:261\u2013270","journal-title":"Neural Netw"},{"key":"3647_CR65","doi-asserted-by":"publisher","first-page":"60428","DOI":"10.1109\/ACCESS.2019.2915630","volume":"7","author":"F Qi","year":"2019","unstructured":"Qi F, Lin C, Shi G, Li H (2019) A convolutional encoder-decoder network with skip connections for saliency prediction. IEEE Access 7:60428\u201360438","journal-title":"IEEE Access"},{"issue":"8","key":"3647_CR66","doi-asserted-by":"publisher","first-page":"2163","DOI":"10.1109\/TMM.2019.2947352","volume":"22","author":"S Yang","year":"2019","unstructured":"Yang S, Lin G, Jiang Q, Lin W (2019) A dilated inception network for visual saliency prediction. IEEE Trans Multimed 22(8):2163\u20132176","journal-title":"IEEE Trans Multimed"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03647-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-03647-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03647-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,27]],"date-time":"2023-02-27T04:26:35Z","timestamp":1677471995000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-03647-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,7]]},"references-count":66,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2023,3]]}},"alternative-id":["3647"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-03647-5","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,7,7]]},"assertion":[{"value":"16 April 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 July 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflicts of interest to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of Interests"}}]}}