{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,8]],"date-time":"2026-02-08T03:48:39Z","timestamp":1770522519259,"version":"3.49.0"},"reference-count":37,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"10","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Inf. &amp; Syst."],"published-print":{"date-parts":[[2023,10,1]]},"DOI":"10.1587\/transinf.2023edp7055","type":"journal-article","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:01:18Z","timestamp":1696114878000},"page":"1723-1731","source":"Crossref","is-referenced-by-count":3,"title":["Multi-Scale Estimation for Omni-Directional Saliency Maps Using Learnable Equator Bias"],"prefix":"10.1587","volume":"E106.D","author":[{"given":"Takao","family":"YAMANAKA","sequence":"first","affiliation":[{"name":"Department of Information and Communication Sciences, Sophia University"}]},{"given":"Tatsuya","family":"SUZUKI","sequence":"additional","affiliation":[{"name":"Department of Information and Communication Sciences, Sophia University"}]},{"given":"Taiki","family":"NOBUTSUNE","sequence":"additional","affiliation":[{"name":"Department of Information and Communication Sciences, Sophia University"}]},{"given":"Chenjunlin","family":"WU","sequence":"additional","affiliation":[{"name":"Department of Information and Communication Sciences, Sophia University"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"publisher","unstructured":"[1] M. Xu, C. Li, S. Zhang, and P.L. Callet, \u201cState-of-the-art in 360\u00b0 video\/image processing: perception, assessment and compression,\u201d IEEE Journal of Selected Topics in Signal Processing, vol.14, no.1, pp.5-26, 2020. 10.1109\/jstsp.2020.2966864","DOI":"10.1109\/JSTSP.2020.2966864"},{"key":"2","doi-asserted-by":"crossref","unstructured":"[2] A.D. Abreu, C. Ozcinar, and A. Smolic, \u201cLook around you: saliency maps for omnidirectional images in VR applications,\u201d International Conference on Quality of Multimedia Experience, 2017. 10.1109\/qomex.2017.7965634","DOI":"10.1109\/QoMEX.2017.7965634"},{"key":"3","doi-asserted-by":"publisher","unstructured":"[3] R. Monroy, S. Lutz, T. Chalasani, and A. Smolic, \u201cSalNet360: saliency maps for omni-directional images with CNN,\u201d Signal Processing: Image Communication, vol.69, pp.26-34, 2018. 10.1016\/j.image.2018.05.005","DOI":"10.1016\/j.image.2018.05.005"},{"key":"4","doi-asserted-by":"publisher","unstructured":"[4] C. Qing, H. Zhu, X. Xing, D. Chen, and J. Jin, \u201cAttentive and context-aware deep network for saliency prediction on omni-directional images,\u201d Digital Signal Processing, vol.120, 2022. 10.1016\/j.dsp.2021.103289","DOI":"10.1016\/j.dsp.2021.103289"},{"key":"5","doi-asserted-by":"crossref","unstructured":"[5] T. Suzuki and T. Yamanaka, \u201cSaliency map estimation for omni-directional image considering prior distributions,\u201d International Conference on Systems, Man, and Cybernetics, 2018. 10.1109\/smc.2018.00358","DOI":"10.1109\/SMC.2018.00358"},{"key":"6","unstructured":"[6] Z. Bylinskii, T. Judd, A. Borji, L. Itti, F. Durand, A. Oliva, and A. Torralba, \u201cMit saliency benchmark,\u201d available at: http:\/\/saliency.mit.edu."},{"key":"7","unstructured":"[7] M. K\u00fcmmerer, Z. Bylinskii, T. Judd, A. Borji, L. Itti, F. Durand, A. Oliva, and A. Torralba, \u201cMIT\/Tuebingen saliency benchmark,\u201d available at: https:\/\/saliency.tuebingen.ai\/."},{"key":"8","doi-asserted-by":"publisher","unstructured":"[8] T. Oyama and T. Yamanaka, \u201cInfluence of image classification accuracy on saliency map estimation,\u201d CAAI Transactions on Intelligence Technology, vol.3, no.3, pp.140-152, 2018. 10.1049\/trit.2018.1012","DOI":"10.1049\/trit.2018.1012"},{"key":"9","doi-asserted-by":"publisher","unstructured":"[9] V. Sitzmann, A. Serrano, A. Pavel, M. Agrawala, D. Gutierrez, B. Masia, and G. Wetzstein, \u201cSaliency in VR: how do people explore virtual environments?\u201d IEEE Transactions on Visualization and Computer Graphics, vol.24, no.4, pp.1633-1642, 2018. 10.1109\/tvcg.2018.2793599","DOI":"10.1109\/TVCG.2018.2793599"},{"key":"10","doi-asserted-by":"publisher","unstructured":"[10] L. Itti, C. Koch, and E. Niebur, \u201cA model of saliency-based visual attention for rapid scene analysis,\u201d IEEE Transactions on Pattern Analysis and Machine Intelligence, vol.20, no.11, pp.1254-1259, 1998. 10.1109\/34.730558","DOI":"10.1109\/34.730558"},{"key":"11","doi-asserted-by":"publisher","unstructured":"[11] A. Garcia-Diaz, X.R. Fdez-Vidal, X.M. Pardo, and R. Dosil, \u201cSaliency from hierarchical adaptation through decorrelation and variance normalization,\u201d Image and Vision Computing, vol.30, no.1, pp.51-64, 2012. 10.1016\/j.imavis.2011.11.007","DOI":"10.1016\/j.imavis.2011.11.007"},{"key":"12","doi-asserted-by":"crossref","unstructured":"[12] X. Huang, C. Shen, X. Boix, and Q. Zhao, \u201cSALICON: reducing the semantic gap in saliency prediction by adapting deep neural networks,\u201d International Conference on Computer Vision, 2015. 10.1109\/iccv.2015.38","DOI":"10.1109\/ICCV.2015.38"},{"key":"13","doi-asserted-by":"crossref","unstructured":"[13] J. Pan, E. Sayrol, X. Giro-I-Nieto, K. McGuinness, and N.E. O&apos;Connor, \u201cShallow and deep convolutional networks for saliency prediction,\u201d IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2016. 10.1109\/cvpr.2016.71","DOI":"10.1109\/CVPR.2016.71"},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] E. Vig, M. Dorr, and D. Cox, \u201cLarge-scale optimization of hierarchical features for saliency prediction in natural images,\u201d IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2014. 10.1109\/cvpr.2014.358","DOI":"10.1109\/CVPR.2014.358"},{"key":"15","unstructured":"[15] M. K\u00fcmmerer, L. Theis, and M. Bethge, \u201cDeep gaze I: boosting saliency prediction with feature maps trained on ImageNet,\u201d arXiv, 2014."},{"key":"16","doi-asserted-by":"publisher","unstructured":"[16] S.S.S. Kruthiventi, K. Ayush, and R.V. Babu, \u201cDeepFix: a fully convolutional neural network for predicting human eye fixations,\u201d IEEE Transactions on Image Processing, vol.26, no.9, pp.4446-4456, 2017. 10.1109\/tip.2017.2710620","DOI":"10.1109\/TIP.2017.2710620"},{"key":"17","doi-asserted-by":"crossref","unstructured":"[17] M. K\u00fcmmerer, T.S.A. Wallis, and M. Bethge, \u201cDeepGaze II: reading fixations from deep features trained on object recognition,\u201d arXiv, 2016.","DOI":"10.1167\/17.10.1147"},{"key":"18","doi-asserted-by":"crossref","unstructured":"[18] O. Russakovsky, J. Deng, H. Su, J. Krause, S. Satheesh, S. Ma, Z. Huang, A. Karpathy, A. Khosla, M. Bernstein, A.C. Berg, and L. Fei-Fei, \u201cImageNet large scale visual recognition challenge,\u201d International Journal of Computer Vision, vol.115, pp.211-252, 2015. 10.1007\/s11263-015-0816-y","DOI":"10.1007\/s11263-015-0816-y"},{"key":"19","doi-asserted-by":"publisher","unstructured":"[19] A. Borji and J. Tanner, \u201cReconciling saliency and object center-bias hypotheses in explaining free-viewing fixations,\u201d IEEE Transactions on Neural Networks and Learning Systems, vol.27, no.6, pp.1214-1226, 2016. 10.1109\/tnnls.2015.2480683","DOI":"10.1109\/TNNLS.2015.2480683"},{"key":"20","unstructured":"[20] F. Yu and V. Koltun, \u201cMulti-scale context aggregation by dilated convolutions,\u201d International Conference on Learning Representations, 2016."},{"key":"21","unstructured":"[21] J. Pan, C.C. Ferrer, K. McGuinness, N.E. O&apos;Connor, J. Torres, E. Sayrol, and X. Giro-i-Nieto, \u201cSalGAN: visual saliency prediction with generative adversarial networks,\u201d Workshop on IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2017."},{"key":"22","doi-asserted-by":"crossref","unstructured":"[22] G. Huang, Z. Liu, L. Van Der Maaten, and K.Q. Weinberger, \u201cDensely connected convolutional networks,\u201d IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2017. 10.1109\/cvpr.2017.243","DOI":"10.1109\/CVPR.2017.243"},{"key":"23","unstructured":"[23] Y. Chen, J. Li, H. Xiao, X. Jin, S. Yan, and J. Feng, \u201cDual path networks,\u201d Conference on Neural Information Processing Systems, 2017."},{"key":"24","doi-asserted-by":"publisher","unstructured":"[24] S. Jia and N.D.B. Bruce, \u201cEML-NET: an expandable multi-layer network for saliency prediction,\u201d Image and Vision Computing, vol.95, 2020. 10.1016\/j.imavis.2020.103887","DOI":"10.1016\/j.imavis.2020.103887"},{"key":"25","doi-asserted-by":"crossref","unstructured":"[25] B. Zoph, V. Vasudevan, J. Shlens, and Q.V. Le, \u201cLearning transferable architectures for scalable image recognition,\u201d IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2018. 10.1109\/cvpr.2018.00907","DOI":"10.1109\/CVPR.2018.00907"},{"key":"26","doi-asserted-by":"crossref","unstructured":"[26] A. Linardos, M. K\u00fcmmerer, O. Press, and M. Bethge, \u201cDeepGaze IIE: Calibrated prediction in and out-of-domain for state-of-the-art saliency modeling,\u201d IEEE\/CVF International Conference on Computer Vision, 2021. 10.1109\/iccv48922.2021.01268","DOI":"10.1109\/ICCV48922.2021.01268"},{"key":"27","doi-asserted-by":"publisher","unstructured":"[27] J. Guti\u00e9rrez, E. David, Y. Rai, and P.L. Callet, \u201cToolbox and dataset for the development of saliency and scanpath models for omnidirectional\/360\u00b0 still images,\u201d Signal Processing: Image Communication, vol.69, pp.35-42, 2018. 10.1016\/j.image.2018.05.003","DOI":"10.1016\/j.image.2018.05.003"},{"key":"28","doi-asserted-by":"crossref","unstructured":"[28] F.Y. Chao, L. Zhang, W. Hamidouche, and O. D\u00e9forges, \u201cSalgan360: visual saliency prediction on 360 degree images with generative adversarial networks,\u201d Workshop on International Conference on Multimedia &amp; Expo, 2018.","DOI":"10.1109\/ICMEW.2018.8551543"},{"key":"29","doi-asserted-by":"crossref","unstructured":"[29] M. Cornia, L. Baraldi, G. Serra, and R. Cucchiara, \u201cA deep multi-level network for saliency prediction,\u201d International Conference on Pattern Recognition, 2016. 10.1109\/icpr.2016.7900174","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"30","doi-asserted-by":"publisher","unstructured":"[30] H. Li, H. Lu, Z. Lin, X. Shen, and B. Price, \u201cInner and inter label propagation: Salient object detection in the wild,\u201d IEEE Transactions on Image Processing, vol.24, no.10, pp.3176-3186, 2015. 10.1109\/tip.2015.2440174","DOI":"10.1109\/TIP.2015.2440174"},{"key":"31","doi-asserted-by":"crossref","unstructured":"[31] J. Guti\u00e9rrez, E.J. David, A. Coutrot, M.P. Da Silva, and P. Le Callet, \u201cIntroducing UN Salient360! Benchmark: A platform for evaluating visual attention models for 360\u00b0 contents,\u201d International Conference on Quality of Multimedia Experience, 2018. 10.1109\/qomex.2018.8463369","DOI":"10.1109\/QoMEX.2018.8463369"},{"key":"32","doi-asserted-by":"publisher","unstructured":"[32] P. Lebreton and A. Raake, \u201cGBVS360, BMS360, ProSal: extending existing saliency prediction models from 2D to omnidirectional images,\u201d Signal Processing: Image Communication, vol.69, pp.69-78, 2018. 10.1016\/j.image.2018.03.006","DOI":"10.1016\/j.image.2018.03.006"},{"key":"33","doi-asserted-by":"crossref","unstructured":"[33] M. Jiang, S. Huang, J. Duan, and Q. Zhao, \u201cSALICON: saliency in context,\u201d IEEE\/CVF Computer Vision and Pattern Recognition Conference, 2015. 10.1109\/cvpr.2015.7298710","DOI":"10.1109\/CVPR.2015.7298710"},{"key":"34","doi-asserted-by":"publisher","unstructured":"[34] J. Xu, M. Jiang, S. Wang, M.S. Kankanhalli, and Q. Zhao, \u201cPredicting human gaze beyond pixels,\u201d Journal of Vision, vol.14, no.1, p.28, 2014. 10.1167\/14.1.28","DOI":"10.1167\/14.1.28"},{"key":"35","doi-asserted-by":"publisher","unstructured":"[35] M. Startsev and M. Dorr, \u201c360-aware saliency estimation with conventional image saliency predictors,\u201d Signal Processing: Image Communication, vol.69, pp.43-52, 2018. 10.1016\/j.image.2018.03.013","DOI":"10.1016\/j.image.2018.03.013"},{"key":"36","doi-asserted-by":"publisher","unstructured":"[36] Y. Zhu, G. Zhai, and X. Min, \u201cThe prediction of head and eye movement for 360 degree images,\u201d Signal Processing: Image Communication, vol.69, pp.15-25, 2018. 10.1016\/j.image.2018.05.010","DOI":"10.1016\/j.image.2018.05.010"},{"key":"37","doi-asserted-by":"publisher","unstructured":"[37] J. Ling, K. Zhang, Y. Zhang, D. Yang, and Z. Chen, \u201cA saliency prediction model on 360 degree images using color dictionary based sparse representation,\u201d Signal Processing: Image Communication, vol.69, pp.60-68, 2018. 10.1016\/j.image.2018.03.007","DOI":"10.1016\/j.image.2018.03.007"}],"container-title":["IEICE Transactions on Information and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E106.D\/10\/E106.D_2023EDP7055\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,7]],"date-time":"2023-10-07T04:23:36Z","timestamp":1696652616000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E106.D\/10\/E106.D_2023EDP7055\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,1]]},"references-count":37,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2023]]}},"URL":"https:\/\/doi.org\/10.1587\/transinf.2023edp7055","relation":{},"ISSN":["0916-8532","1745-1361"],"issn-type":[{"value":"0916-8532","type":"print"},{"value":"1745-1361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,1]]},"article-number":"2023EDP7055"}}