{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T15:19:46Z","timestamp":1759331986662,"version":"3.40.3"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031189067"},{"type":"electronic","value":"9783031189074"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-18907-4_43","type":"book-chapter","created":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T23:03:53Z","timestamp":1666825433000},"page":"552-565","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["EFG-Net: A Unified Framework for\u00a0Estimating Eye Gaze and\u00a0Face Gaze Simultaneously"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9243-7936","authenticated-orcid":false,"given":"Hekuangyi","family":"Che","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1579-3942","authenticated-orcid":false,"given":"Dongchen","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6868-7051","authenticated-orcid":false,"given":"Minjing","family":"Lin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2882-2981","authenticated-orcid":false,"given":"Wenjun","family":"Shi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0432-7329","authenticated-orcid":false,"given":"Guanghui","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9875-696X","authenticated-orcid":false,"given":"Hang","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3307-9838","authenticated-orcid":false,"given":"Xiaolin","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7478-4544","authenticated-orcid":false,"given":"Jiamao","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,27]]},"reference":[{"key":"43_CR1","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"43_CR2","doi-asserted-by":"crossref","unstructured":"Bao, Y., Cheng, Y., Liu, Y., Lu, F.: Adaptive feature fusion network for gaze tracking in mobile tablets. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9936\u20139943. IEEE (2021)","DOI":"10.1109\/ICPR48806.2021.9412205"},{"key":"43_CR3","unstructured":"Cai, X., et al.: Gaze estimation with an ensemble of four architectures. arXiv preprint arXiv:2107.01980 (2021)"},{"key":"43_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"309","DOI":"10.1007\/978-3-030-20876-9_20","volume-title":"Computer Vision \u2013 ACCV 2018","author":"Z Chen","year":"2019","unstructured":"Chen, Z., Shi, B.E.: Appearance-based gaze estimation using dilated-convolutions. In: Jawahar, C.V., Li, H., Mori, G., Schindler, K. (eds.) ACCV 2018. LNCS, vol. 11366, pp. 309\u2013324. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-20876-9_20"},{"key":"43_CR5","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Huang, S., Wang, F., Qian, C., Lu, F.: A coarse-to-fine adaptive network for appearance-based gaze estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 10623\u201310630 (2020)","DOI":"10.1609\/aaai.v34i07.6636"},{"key":"43_CR6","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Lu, F.: Gaze estimation using transformer. arXiv preprint arXiv:2105.14424 (2021)","DOI":"10.1109\/ICPR56361.2022.9956687"},{"key":"43_CR7","doi-asserted-by":"publisher","first-page":"5259","DOI":"10.1109\/TIP.2020.2982828","volume":"29","author":"Y Cheng","year":"2020","unstructured":"Cheng, Y., Zhang, X., Lu, F., Sato, Y.: Gaze estimation by exploring two-eye asymmetry. IEEE Trans. Image Process. 29, 5259\u20135272 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"43_CR8","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"43_CR9","doi-asserted-by":"crossref","unstructured":"Fischer, T., Chang, H.J., Demiris, Y.: RT-GENE: real-time eye gaze estimation in natural environments. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 334\u2013352 (2018)","DOI":"10.1007\/978-3-030-01249-6_21"},{"key":"43_CR10","doi-asserted-by":"crossref","unstructured":"Ghosh, S., Hayat, M., Dhall, A., Knibbe, J.: MTGLS: multi-task gaze estimation with limited supervision. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 3223\u20133234 (2022)","DOI":"10.1109\/WACV51458.2022.00123"},{"key":"43_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"43_CR12","doi-asserted-by":"crossref","unstructured":"Kellnhofer, P., Recasens, A., Stent, S., Matusik, W., Torralba, A.: Gaze360: physically unconstrained gaze estimation in the wild. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6912\u20136921 (2019)","DOI":"10.1109\/ICCV.2019.00701"},{"key":"43_CR13","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"43_CR14","doi-asserted-by":"crossref","unstructured":"Krafka, K., et al.: Eye tracking for everyone. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2176\u20132184 (2016)","DOI":"10.1109\/CVPR.2016.239"},{"issue":"11","key":"43_CR15","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"43_CR16","doi-asserted-by":"crossref","unstructured":"Park, S., Mello, S.D., Molchanov, P., Iqbal, U., Hilliges, O., Kautz, J.: Few-shot adaptive gaze estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9368\u20139377 (2019)","DOI":"10.1109\/ICCV.2019.00946"},{"key":"43_CR17","doi-asserted-by":"crossref","unstructured":"Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 721\u2013738 (2018)","DOI":"10.1007\/978-3-030-01261-8_44"},{"key":"43_CR18","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"43_CR19","unstructured":"Tolstikhin, I.O., et al.: MLP-mixer: an all-MLP architecture for vision. In: Advances in Neural Information Processing Systems, vol. 34 (2021)"},{"key":"43_CR20","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"43_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"365","DOI":"10.1007\/978-3-030-58558-7_22","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Zhang","year":"2020","unstructured":"Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: ETH-XGaze: a large scale dataset for gaze estimation under extreme head pose and gaze variation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 365\u2013381. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_22"},{"key":"43_CR22","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: Proceedings of the 2018 ACM Symposium on Eye Tracking Research & Applications, pp. 1\u20139 (2018)","DOI":"10.1145\/3204493.3204548"},{"key":"43_CR23","unstructured":"Zhang, X., Sugano, Y., Bulling, A., Hilliges, O.: Learning-based region selection for end-to-end gaze estimation. In: BMVC (2020)"},{"key":"43_CR24","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4511\u20134520 (2015)","DOI":"10.1109\/CVPR.2015.7299081"},{"key":"43_CR25","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It\u2019s written all over your face: full-face appearance-based gaze estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 51\u201360 (2017)","DOI":"10.1109\/CVPRW.2017.284"},{"issue":"1","key":"43_CR26","doi-asserted-by":"publisher","first-page":"162","DOI":"10.1109\/TPAMI.2017.2778103","volume":"41","author":"X Zhang","year":"2017","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Mpiigaze: real-world dataset and deep appearance-based gaze estimation. IEEE Trans. Pattern Anal. Mach. Intell. 41(1), 162\u2013175 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"43_CR27","first-page":"13127","volume":"33","author":"Y Zheng","year":"2020","unstructured":"Zheng, Y., Park, S., Zhang, X., De Mello, S., Hilliges, O.: Self-learning transformations for improving gaze and head redirection. Adv. Neural. Inf. Process. Syst. 33, 13127\u201313138 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-18907-4_43","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,10]],"date-time":"2023-03-10T04:14:23Z","timestamp":1678421663000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-18907-4_43"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031189067","9783031189074"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-18907-4_43","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"27 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shenzhen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/en.prcv.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"564","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"233","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.03","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.35","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}