{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T14:26:05Z","timestamp":1775744765617,"version":"3.50.1"},"publisher-location":"Cham","reference-count":42,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031783401","type":"print"},{"value":"9783031783418","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78341-8_6","type":"book-chapter","created":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T15:14:36Z","timestamp":1733066076000},"page":"77-90","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Merging Multiple Datasets for\u00a0Improved Appearance-Based Gaze Estimation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5214-7715","authenticated-orcid":false,"given":"Liang","family":"Wu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9167-7495","authenticated-orcid":false,"given":"Bertram E.","family":"Shi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"6_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2980179.2980246","volume":"35","author":"A Patney","year":"2016","unstructured":"Patney, A., et al.: Towards foveated rendering for gaze-tracked virtual reality. ACM Trans. Graph. (TOG) 35, 1\u201312 (2016)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"6_CR2","doi-asserted-by":"publisher","first-page":"240","DOI":"10.1080\/10447318.2018.1452351","volume":"35","author":"Z Chen","year":"2019","unstructured":"Chen, Z., Shi, B.: Using variable dwell time to accelerate gaze-based web browsing with two-step selection. Int. J. Hum.-Comput. Interact. 35, 240\u2013255 (2019)","journal-title":"Int. J. Hum.-Comput. Interact."},{"key":"6_CR3","doi-asserted-by":"crossref","unstructured":"Pi, J., Shi, B.: Probabilistic adjustment of dwell time for eye typing. In: 2017 10th International Conference on Human System Interactions (HSI), pp. 251\u2013257 (2017)","DOI":"10.1109\/HSI.2017.8005041"},{"key":"6_CR4","unstructured":"Recasens, A., Khosla, A., Vondrick, C., Torralba, A.: Where are they looking? In: Advances in Neural Information Processing Systems, vol. 28 (2015)"},{"key":"6_CR5","doi-asserted-by":"crossref","unstructured":"Chong, E., Wang, Y., Ruiz, N., Rehg, J.: Detecting attended visual targets in video. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5396\u20135406 (2020)","DOI":"10.1109\/CVPR42600.2020.00544"},{"key":"6_CR6","doi-asserted-by":"crossref","unstructured":"Gehrer, N., Sch\u00f6nenberg, M., Duchowski, A., Krejtz, K.: Implementing innovative gaze analytic mods in clinical psychology: a study on eye movements in antisocial violent offenders. In: Proceedings of the 2018 ACM Symposium on Eye Tracking Research & Applications, pp. 1\u20139 (2018)","DOI":"10.1145\/3204493.3204543"},{"key":"6_CR7","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: Appearance-based gaze estimation in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4511\u20134520 (2015)","DOI":"10.1109\/CVPR.2015.7299081"},{"key":"6_CR8","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Fritz, M., Bulling, A.: It\u2019s written all over your face: full-face appearance-based gaze estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 51\u201360 (2017)","DOI":"10.1109\/CVPRW.2017.284"},{"key":"6_CR9","doi-asserted-by":"crossref","unstructured":"Chen, Z., Shi, B.: Appearance-based gaze estimation using dilated-convolutions. In: Asian Conference on Computer Vision, pp. 309\u2013324 (2018)","DOI":"10.1007\/978-3-030-20876-9_20"},{"key":"6_CR10","doi-asserted-by":"crossref","unstructured":"Fischer, T., Chang, H., Demiris, Y.: RT-gene: real-time eye gaze estimation in natural environments. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 334\u2013352 (2018)","DOI":"10.1007\/978-3-030-01249-6_21"},{"key":"6_CR11","doi-asserted-by":"publisher","first-page":"1124","DOI":"10.1109\/TBME.2005.863952","volume":"53","author":"E Guestrin","year":"2006","unstructured":"Guestrin, E., Eizenman, M.: General theory of remote gaze estimation using the pupil center and corneal reflections. IEEE Trans. Biomed. Eng. 53, 1124\u20131133 (2006)","journal-title":"IEEE Trans. Biomed. Eng."},{"key":"6_CR12","doi-asserted-by":"crossref","unstructured":"Krafka, K., et al.: Eye tracking for everyone. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2176\u20132184 (2016)","DOI":"10.1109\/CVPR.2016.239"},{"key":"6_CR13","doi-asserted-by":"crossref","unstructured":"Chen, J., Ji, Q.: 3D gaze estimation with a single camera without IR illumination. In: 2008 19th International Conference on Pattern Recognition, pp. 1\u20134 (2008)","DOI":"10.1109\/ICPR.2008.4761343"},{"key":"6_CR14","doi-asserted-by":"publisher","first-page":"802","DOI":"10.1109\/TIP.2011.2162740","volume":"21","author":"R Valenti","year":"2011","unstructured":"Valenti, R., Sebe, N., Gevers, T.: Combining head pose and eye location information for gaze estimation. IEEE Trans. Image Process. 21, 802\u2013815 (2011)","journal-title":"IEEE Trans. Image Process."},{"key":"6_CR15","doi-asserted-by":"crossref","unstructured":"Wood, E., Bulling, A.: EyeTab: model-based gaze estimation on unmodified tablet computers. In: Proceedings of the Symposium on Eye Tracking Research and Applications, pp. 207\u2013210 (2014)","DOI":"10.1145\/2578153.2578185"},{"key":"6_CR16","unstructured":"Tan, K., Kriegman, D., Ahuja, N.: Appearance-based eye gaze estimation. In: Proceedings of the Sixth IEEE Workshop on Applications of Computer Vision, WACV 2002, pp. 191\u2013195 (2002)"},{"key":"6_CR17","doi-asserted-by":"publisher","first-page":"2033","DOI":"10.1109\/TPAMI.2014.2313123","volume":"36","author":"F Lu","year":"2014","unstructured":"Lu, F., Sugano, Y., Okabe, T., Sato, Y.: Adaptive linear regression for appearance-based gaze estimation. IEEE Trans. Pattern Anal. Mach. Intell. 36, 2033\u20132046 (2014)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"6_CR18","doi-asserted-by":"crossref","unstructured":"Williams, O., Blake, A., Cipolla, R.: Sparse and semi-supervised visual mapping with the sundefined 3GP. In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR), vol. 1, pp. 230\u2013237 (2006)","DOI":"10.1109\/CVPR.2006.285"},{"key":"6_CR19","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Lu, F.: Gaze estimation using transformer. In: 2022 26th International Conference on Pattern Recognition (ICPR), pp. 3341\u20133347 (2022)","DOI":"10.1109\/ICPR56361.2022.9956687"},{"key":"6_CR20","doi-asserted-by":"crossref","unstructured":"Tu, D., Min, X., Duan, H., Guo, G., Zhai, G., Shen, W.: End-to-end human-gaze-target detection with transformers. arXiv Preprint arXiv:2203.10433 (2022)","DOI":"10.1109\/CVPR52688.2022.00224"},{"key":"6_CR21","unstructured":"Cai, X., et al.: Gaze estimation with an ensemble of four architectures. arXiv Preprint arXiv:2107.01980 (2021)"},{"key":"6_CR22","doi-asserted-by":"crossref","unstructured":"Lv, J., Chen, W., Li, Q., Yang, C.: Unsupervised cross-dataset person re-identification by transfer learning of spatial-temporal patterns. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7948\u20137956 (2018)","DOI":"10.1109\/CVPR.2018.00829"},{"key":"6_CR23","doi-asserted-by":"crossref","unstructured":"Li, Y., Lin, C., Lin, Y., Wang, Y.: Cross-dataset person re-identification via unsupervised pose disentanglement and adaptation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7919\u20137929 (2019)","DOI":"10.1109\/ICCV.2019.00801"},{"key":"6_CR24","doi-asserted-by":"publisher","first-page":"1623","DOI":"10.1109\/TPAMI.2020.3019967","volume":"44","author":"R Ranftl","year":"2020","unstructured":"Ranftl, R., Lasinger, K., Hafner, D., Schindler, K., Koltun, V.: Towards robust monocular depth estimation: mixing datasets for zero-shot cross-dataset transfer. IEEE Trans. Pattern Anal. Mach. Intell. 44, 1623\u20131637 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"6_CR25","doi-asserted-by":"crossref","unstructured":"He, H., Zhang, J., Zhang, Q., Tao, D.: Grapy-ML: graph pyramid mutual learning for cross-dataset human parsing. In: The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, 7\u201312 February 2020, pp. 10949\u201310956 (2020). https:\/\/ojs.aaai.org\/index.php\/AAAI\/article\/view\/6728","DOI":"10.1609\/aaai.v34i07.6728"},{"key":"6_CR26","doi-asserted-by":"crossref","unstructured":"Lambert, J., Liu, Z., Sener, O., Hays, J., Koltun, V. MSeg: a composite dataset for multi-domain semantic segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2879\u20132888 (2020)","DOI":"10.1109\/CVPR42600.2020.00295"},{"key":"6_CR27","doi-asserted-by":"publisher","first-page":"1238","DOI":"10.1007\/s11263-020-01408-w","volume":"129","author":"D Li","year":"2021","unstructured":"Li, D., Jiang, T., Jiang, M.: Unified quality assessment of in-the-wild videos with mixed datasets training. Int. J. Comput. Vis. 129, 1238\u20131257 (2021)","journal-title":"Int. J. Comput. Vis."},{"key":"6_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, W., Li, W., Xu, D.: SRDAN: scale-aware and range-aware domain adaptation network for cross-dataset 3D object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6769\u20136779 (2021)","DOI":"10.1109\/CVPR46437.2021.00670"},{"key":"6_CR29","doi-asserted-by":"crossref","unstructured":"Smith, B., Yin, Q., Feiner, S., Nayar, S.: Gaze locking: passive eye contact detection for human-object interaction. In: Proceedings of the 26th Annual ACM Symposium on User Interface Software and Technology, pp. 271\u2013280 (2013)","DOI":"10.1145\/2501988.2501994"},{"key":"6_CR30","doi-asserted-by":"publisher","first-page":"5923","DOI":"10.1109\/TIP.2019.2923051","volume":"28","author":"J Korhonen","year":"2019","unstructured":"Korhonen, J.: Two-level approach for no-reference consumer video quality assessment. IEEE Trans. Image Process. 28, 5923\u20135938 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"6_CR31","doi-asserted-by":"crossref","unstructured":"Zhang, X., Sugano, Y., Bulling, A.: Revisiting data normalization for appearance-based gaze estimation. In: Proceedings of the 2018 ACM Symposium on Eye Tracking Research & Applications, pp. 1\u20139 (2018)","DOI":"10.1145\/3204493.3204548"},{"key":"6_CR32","doi-asserted-by":"crossref","unstructured":"Rodrigues, R., Barreto, J., Nunes, U.: Camera pose estimation using images of planar mirror reflections. In: European Conference on Computer Vision, pp. 382\u2013395 (2010)","DOI":"10.1007\/978-3-642-15561-1_28"},{"key":"6_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, X., Park, S., Beeler, T., Bradley, D., Tang, S., Hilliges, O.: ETH-XGaze: a large scale dataset for gaze estimation under extreme head pose and gaze variation. In: European Conference on Computer Vision, pp. 365\u2013381 (2020)","DOI":"10.1007\/978-3-030-58558-7_22"},{"key":"6_CR34","doi-asserted-by":"crossref","unstructured":"Funes Mora, K., Monay, F., Odobez, J.: Eyediap: a database for the development and evaluation of gaze estimation algorithms from RGB and RGB-D cameras. In: Proceedings of the Symposium on Eye Tracking Research and Applications, pp. 255\u2013258 (2014)","DOI":"10.1145\/2578153.2578190"},{"key":"6_CR35","doi-asserted-by":"crossref","unstructured":"Park, S., Spurr, A., Hilliges, O.: Deep pictorial gaze estimation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 721\u2013738 (2018)","DOI":"10.1007\/978-3-030-01261-8_44"},{"key":"6_CR36","doi-asserted-by":"publisher","first-page":"1092","DOI":"10.1109\/TPAMI.2019.2957373","volume":"43","author":"G Liu","year":"2019","unstructured":"Liu, G., Yu, Y., Mora, K., Odobez, J.: A differential approach for gaze estimation. IEEE Trans. Pattern Anal. Mach. Intell. 43, 1092\u20131099 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"6_CR37","unstructured":"Dosovitskiy, A., et al.: An image is worth $$16 \\times 16$$ words: transformers for image recognition at scale. arXiv Preprint arXiv:2010.11929 (2020)"},{"key":"6_CR38","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"6_CR39","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J., Luo, P.: SegFormer: simple and efficient design for semantic segmentation with transformers. In: Advances in Neural Information Processing Systems, vol. 34, pp. 12077\u201312090 (2021)"},{"key":"6_CR40","doi-asserted-by":"crossref","unstructured":"Kim, B., Lee, J., Kang, J., Kim, E., Kim, H.: HOTR: end-to-end human-object interaction detection with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 74\u201383 (2021)","DOI":"10.1109\/CVPR46437.2021.00014"},{"key":"6_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, W., et al.: Transformer-based multimodal information fusion for facial expression analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2428\u20132437 (2022)","DOI":"10.1109\/CVPRW56347.2022.00271"},{"key":"6_CR42","doi-asserted-by":"crossref","unstructured":"Oh, J.O., Chang, H., Choi, S.: Self-attention with convolution and deconvolution for efficient eye gaze estimation from a full face image. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4992\u20135000 (2022)","DOI":"10.1109\/CVPRW56347.2022.00547"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78341-8_6","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T16:05:20Z","timestamp":1733069120000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78341-8_6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"ISBN":["9783031783401","9783031783418"],"references-count":42,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78341-8_6","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"2 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}