{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T03:23:38Z","timestamp":1740108218513,"version":"3.37.3"},"reference-count":40,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2024,5,5]],"date-time":"2024-05-05T00:00:00Z","timestamp":1714867200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,5,5]],"date-time":"2024-05-05T00:00:00Z","timestamp":1714867200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2021YFB1714700"],"award-info":[{"award-number":["2021YFB1714700"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62088102 and 62106192"],"award-info":[{"award-number":["62088102 and 62106192"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007128","name":"Natural Science Foundation of Shaanxi Province","doi-asserted-by":"publisher","award":["2022JC-41 and 2021JQ-054"],"award-info":[{"award-number":["2022JC-41 and 2021JQ-054"]}],"id":[{"id":"10.13039\/501100007128","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2020M683490 and 2022T150518"],"award-info":[{"award-number":["2020M683490 and 2022T150518"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Fundamental Research Funds for the Central Unversities","award":["XTR042021005 and XTR072022001"],"award-info":[{"award-number":["XTR042021005 and XTR072022001"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1007\/s00138-024-01545-z","type":"journal-article","created":{"date-parts":[[2024,5,5]],"date-time":"2024-05-05T04:01:21Z","timestamp":1714881681000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Residual feature learning with hierarchical calibration for gaze estimation"],"prefix":"10.1007","volume":"35","author":[{"given":"Zhengdan","family":"Yin","sequence":"first","affiliation":[]},{"given":"Sanping","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Le","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Dai","sequence":"additional","affiliation":[]},{"given":"Gang","family":"Hua","sequence":"additional","affiliation":[]},{"given":"Nanning","family":"Zheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,5,5]]},"reference":[{"issue":"2","key":"1545_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3361330","volume":"39","author":"R Konrad","year":"2020","unstructured":"Konrad, R., Angelopoulos, A., Wetzstein, G.: Gaze-contingent ocular parallax rendering for virtual reality. ACM Trans. Graph. (TOG) 39(2), 1\u201312 (2020)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"4","key":"1545_CR2","doi-asserted-by":"publisher","first-page":"1633","DOI":"10.1109\/TVCG.2018.2793599","volume":"24","author":"V Sitzmann","year":"2018","unstructured":"Sitzmann, V., Serrano, A., Pavel, A., Agrawala, M., Gutierrez, D., Masia, B., Wetzstein, G.: Saliency in vr: how do people explore virtual environments? IEEE Trans. Visual Comput. Graph. 24(4), 1633\u20131642 (2018)","journal-title":"IEEE Trans. Visual Comput. Graph."},{"key":"1545_CR3","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Bulling, A.: Everyday eye contact detection using unsupervised gaze target discovery. In: Proceedings of the 30th Annual ACM Symposium on User Interface Software and Technology, pp. 193\u2013203 (2017)","DOI":"10.1145\/3126594.3126614"},{"key":"1545_CR4","doi-asserted-by":"crossref","unstructured":"Terzio\u011flu, Y., Mutlu, B., \u015eahin, E.: Designing social cues for collaborative robots: the role of gaze and breathing in human\u2013robot collaboration. In: Proceedings of the 2020 ACM\/IEEE International Conference on Human\u2013Robot Interaction, pp. 343\u2013357 (2020)","DOI":"10.1145\/3319502.3374829"},{"key":"1545_CR5","doi-asserted-by":"crossref","unstructured":"Gerber, M.A., Schroeter, R., Xiaomeng, L., Elhenawy, M.: Self-interruptions of non-driving related tasks in automated vehicles: mobile vs head-up display. In: Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, pp. 1\u20139 (2020)","DOI":"10.1145\/3313831.3376751"},{"issue":"7","key":"1545_CR6","doi-asserted-by":"publisher","first-page":"1720","DOI":"10.1109\/TPAMI.2018.2845370","volume":"41","author":"A Palazzi","year":"2018","unstructured":"Palazzi, A., Abati, D., Solera, F., Cucchiara, R., et al.: Predicting the driver\u2019s focus of attention: the dr (eye) ve project. IEEE Trans. Pattern Anal. Mach. Intell. 41(7), 1720\u20131733 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1545_CR7","doi-asserted-by":"crossref","unstructured":"Jin, S., Dai, J., Nguyen, T.: Kappa angle regression with ocular counter-rolling awareness for gaze estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2658\u20132667 (2023)","DOI":"10.1109\/CVPRW59228.2023.00266"},{"key":"1545_CR8","doi-asserted-by":"crossref","unstructured":"Li, Y., Zhan, Y., Yang, Z.: Evaluation of appearance-based eye tracking calibration data selection. In: 2020 IEEE International Conference on Artificial Intelligence and Computer Applications (ICAICA), pp. 222\u2013224. IEEE (2020)","DOI":"10.1109\/ICAICA50127.2020.9181854"},{"key":"1545_CR9","doi-asserted-by":"crossref","unstructured":"Park, S., Aksan, E., Zhang, X., Hilliges, O.: Towards end-to-end video-based eye-tracking. In: Computer Vision\u2014ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XII 16, pp. 747\u2013763. Springer (2020)","DOI":"10.1007\/978-3-030-58610-2_44"},{"key":"1545_CR10","doi-asserted-by":"publisher","first-page":"3322","DOI":"10.1109\/TIP.2022.3171416","volume":"31","author":"J Bao","year":"2022","unstructured":"Bao, J., Liu, B., Yu, J.: An individual-difference-aware model for cross-person gaze estimation. IEEE Trans. Image Process. 31, 3322\u20133333 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"1545_CR11","doi-asserted-by":"crossref","unstructured":"Miao, Q., Hoai, M., Samaras, D.: Patch-level gaze distribution prediction for gaze following. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 880\u2013889 (2023)","DOI":"10.1109\/WACV56688.2023.00094"},{"key":"1545_CR12","unstructured":"Ghosh, S., Dhall, A., Hayat, M., Knibbe, J., Ji, Q.: Automatic gaze analysis: a survey of deep learning based approaches. arXiv preprint arXiv:2108.05479 (2021)"},{"issue":"1","key":"1545_CR13","doi-asserted-by":"publisher","first-page":"162","DOI":"10.1109\/TPAMI.2017.2778103","volume":"41","author":"X Zhang","year":"2017","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Mpiigaze: real-world dataset and deep appearance-based gaze estimation. IEEE Trans. Pattern Anal. Mach. Intell. 41(1), 162\u2013175 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1545_CR14","doi-asserted-by":"crossref","unstructured":"Model, D., Eizenman, M.: User-calibration-free remote eye-gaze tracking system with extended tracking range. In: 2011 24th Canadian Conference on Electrical and Computer Engineering (CCECE), pp. 001268\u2013001271. IEEE (2011)","DOI":"10.1109\/CCECE.2011.6030667"},{"key":"1545_CR15","doi-asserted-by":"crossref","unstructured":"Wang, K., Ji, Q.: Real time eye gaze tracking with kinect. In: 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 2752\u20132757. IEEE (2016)","DOI":"10.1109\/ICPR.2016.7900052"},{"issue":"2","key":"1545_CR16","doi-asserted-by":"publisher","first-page":"75","DOI":"10.1109\/THMS.2020.3035176","volume":"51","author":"J Liu","year":"2020","unstructured":"Liu, J., Chi, J., Hu, W., Wang, Z.: 3d model-based gaze tracking via iris features with a single camera and a single light source. IEEE Trans. Hum. Mach. Syst. 51(2), 75\u201386 (2020)","journal-title":"IEEE Trans. Hum. Mach. Syst."},{"key":"1545_CR17","doi-asserted-by":"crossref","unstructured":"Chen, J., Ji, Q.: 3d gaze estimation with a single camera without ir illumination. In: 2008 19th International Conference on Pattern Recognition, pp. 1\u20134. IEEE (2008)","DOI":"10.1109\/ICPR.2008.4761343"},{"key":"1545_CR18","doi-asserted-by":"crossref","unstructured":"Wu, Y., Ji, Q.: Learning the deep features for eye detection in uncontrolled conditions. In: 2014 22nd International Conference on Pattern Recognition, pp. 455\u2013459. IEEE (2014)","DOI":"10.1109\/ICPR.2014.87"},{"key":"1545_CR19","doi-asserted-by":"crossref","unstructured":"Wood, E., Baltru\u0161aitis, T., Morency, L.-P., Robinson, P., Bulling, A.: A 3d morphable eye region model for gaze estimation. In: Computer Vision\u2014ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11\u201314, 2016, Proceedings, Part I 14, pp. 297\u2013313. Springer (2016)","DOI":"10.1007\/978-3-319-46448-0_18"},{"key":"1545_CR20","doi-asserted-by":"crossref","unstructured":"Fischer, T., Chang, H.J., Demiris, Y.: Rt-gene: real-time eye gaze estimation in natural environments. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 334\u2013352 (2018)","DOI":"10.1007\/978-3-030-01249-6_21"},{"key":"1545_CR21","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Lu, F., Zhang, X.: Appearance-based gaze estimation via evaluation-guided asymmetric regression. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 100\u2013115 (2018)","DOI":"10.1007\/978-3-030-01264-9_7"},{"key":"1545_CR22","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 10623\u201310630 (2020)","DOI":"10.1609\/aaai.v34i07.6636"},{"key":"1545_CR23","doi-asserted-by":"crossref","unstructured":"Bao, Y., Cheng, Y., Liu, Y., Lu, F.: Adaptive feature fusion network for gaze tracking in mobile tablets. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9936\u20139943. IEEE (2021)","DOI":"10.1109\/ICPR48806.2021.9412205"},{"key":"1545_CR24","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It\u2019s written all over your face: full-face appearance-based gaze estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 51\u201360 (2017)","DOI":"10.1109\/CVPRW.2017.284"},{"key":"1545_CR25","doi-asserted-by":"crossref","unstructured":"Krafka, K., Khosla, A., Kellnhofer, P., Kannan, H., Bhandarkar, S., Matusik, W., Torralba, A.: Eye tracking for everyone. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2176\u20132184 (2016)","DOI":"10.1109\/CVPR.2016.239"},{"key":"1545_CR26","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Lu, F.: Gaze estimation using transformer. In: 2022 26th International Conference on Pattern Recognition (ICPR), pp. 3341\u20133347. IEEE (2022)","DOI":"10.1109\/ICPR56361.2022.9956687"},{"key":"1545_CR27","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Bao, Y., Lu, F.: Puregaze: purifying gaze feature for generalizable gaze estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 436\u2013443 (2022)","DOI":"10.1609\/aaai.v36i1.19921"},{"key":"1545_CR28","doi-asserted-by":"crossref","unstructured":"Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9368\u20139377 (2019)","DOI":"10.1109\/ICCV.2019.00946"},{"key":"1545_CR29","doi-asserted-by":"crossref","unstructured":"Xiong, Y., Kim, H.J., Singh, V.: Mixed effects neural networks (menets) with applications to gaze estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7743\u20137752 (2019)","DOI":"10.1109\/CVPR.2019.00793"},{"key":"1545_CR30","doi-asserted-by":"crossref","unstructured":"Lind\u00e9n, E., Sjostrand, J., Proutiere, A.: Learning to personalize in appearance-based gaze tracking. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops, pp. 0\u20130 (2019)","DOI":"10.1109\/ICCVW.2019.00145"},{"key":"1545_CR31","doi-asserted-by":"crossref","unstructured":"He, J., Pham, K., Valliappan, N., Xu, P., Roberts, C., Lagun, D., Navalpakkam, V.: On-device few-shot personalization for real-time gaze estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision Workshops, pp. 0\u20130 (2019)","DOI":"10.1109\/ICCVW.2019.00146"},{"key":"1545_CR32","doi-asserted-by":"crossref","unstructured":"Guo, Z., Yuan, Z., Zhang, C., Chi, W., Ling, Y., Zhang, S.: Domain adaptation gaze estimation by embedding with prediction consistency. In: Proceedings of the Asian Conference on Computer Vision (2020)","DOI":"10.1007\/978-3-030-69541-5_18"},{"issue":"8","key":"1545_CR33","doi-asserted-by":"publisher","first-page":"1913","DOI":"10.1109\/TPAMI.2019.2905607","volume":"42","author":"W Wang","year":"2019","unstructured":"Wang, W., Shen, J., Dong, X., Borji, A., Yang, R.: Inferring salient objects from human fixations. IEEE Trans. Pattern Anal. Mach. Intell. 42(8), 1913\u20131927 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1545_CR34","doi-asserted-by":"crossref","unstructured":"Kruthiventi, S.S., Gudisa, V., Dholakiya, J.H., Babu, R.V.: Saliency unified: a deep architecture for simultaneous eye fixation prediction and salient object segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5781\u20135790 (2016)","DOI":"10.1109\/CVPR.2016.623"},{"key":"1545_CR35","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"1545_CR36","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1545_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: Eth-xgaze: A large scale dataset for gaze estimation under extreme head pose and gaze variation. In: Computer Vision\u2014ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part V 16, pp. 365\u2013381. Springer (2020)","DOI":"10.1007\/978-3-030-58558-7_22"},{"key":"1545_CR38","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Zhang, X., Lu, F., Sato, Y.: Gaze estimation by exploring two-eye asymmetry. IEEE Trans. Image Process. 29, 5259\u20135272 (2020)","DOI":"10.1109\/TIP.2020.2982828"},{"key":"1545_CR39","unstructured":"Biswas, P., et al.: Appearance-based gaze estimation using attention and difference mechanism. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3143\u20133152 (2021)"},{"key":"1545_CR40","doi-asserted-by":"crossref","unstructured":"Abdelrahman, A.A., Hempel, T., Khalifa, A., Al-Hamadi, A.: L2cs-net: fine-grained gaze estimation in unconstrained environments. arXiv preprint arXiv:2203.03339 (2022)","DOI":"10.1109\/ICFSP59764.2023.10372944"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01545-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-024-01545-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01545-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,18]],"date-time":"2024-11-18T01:47:04Z","timestamp":1731894424000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-024-01545-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,5]]},"references-count":40,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024,7]]}},"alternative-id":["1545"],"URL":"https:\/\/doi.org\/10.1007\/s00138-024-01545-z","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"type":"print","value":"0932-8092"},{"type":"electronic","value":"1432-1769"}],"subject":[],"published":{"date-parts":[[2024,5,5]]},"assertion":[{"value":"26 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 April 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 April 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 May 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"61"}}