{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,13]],"date-time":"2025-05-13T16:15:32Z","timestamp":1747152932929,"version":"3.40.5"},"publisher-location":"Cham","reference-count":39,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030628062"},{"type":"electronic","value":"9783030628079"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-62807-9_4","type":"book-chapter","created":{"date-parts":[[2020,11,19]],"date-time":"2020-11-19T12:07:18Z","timestamp":1605787638000},"page":"36-49","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A Method to Gaze Following Detection by Computer Vision Applied to Production Environments"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4023-4728","authenticated-orcid":false,"given":"Emannuell Dartora","family":"Cenzi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6170-3370","authenticated-orcid":false,"given":"Marcelo","family":"Rudek","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,10]]},"reference":[{"key":"4_CR1","doi-asserted-by":"crossref","unstructured":"Xiong, X. et al.: Eye gaze tracking using an RGBD camera: a comparison with a RGB solution. In: Proceedings of the 2014 ACM International Joint Conference on Pervasive and Ubiquitous Computing: Adjunct Publication, pp. 1113\u20131121. ACM (2014)","DOI":"10.1145\/2638728.2641694"},{"key":"4_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1007\/978-3-030-20893-6_3","volume-title":"Computer Vision \u2013 ACCV 2018","author":"D Lian","year":"2019","unstructured":"Lian, D., Yu, Z., Gao, S.: Believe it or not, we know what you are looking at! In: Jawahar, C.V., Li, H., Mori, G., Schindler, K. (eds.) ACCV 2018. LNCS, vol. 11363, pp. 35\u201350. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-20893-6_3"},{"issue":"3","key":"4_CR3","doi-asserted-by":"publisher","first-page":"282","DOI":"10.1007\/s11263-013-0655-7","volume":"106","author":"MJ Mar\u00edn-Jim\u00e9nez","year":"2014","unstructured":"Mar\u00edn-Jim\u00e9nez, M.J., et al.: Detecting people looking at each other in videos. Int. J. Comput. Vis. 106(3), 282\u2013296 (2014)","journal-title":"Int. J. Comput. Vis."},{"key":"4_CR4","unstructured":"Recasens, A. et al.: Where are they looking? In: Advances in Neural Information Processing Systems, pp. 199\u2013207 (2015)"},{"key":"4_CR5","doi-asserted-by":"publisher","first-page":"119429","DOI":"10.1016\/j.jclepro.2019.119429","volume":"249","author":"AYU Reche","year":"2019","unstructured":"Reche, A.Y.U., Canciglieri Jr., O., Rudek, M., Estorilio, C.C.A.: Integrated product development process and green supply chain management: contributions, limitations and applications. J. Cleaner Prod. 249, 119429\u2013119459 (2019)","journal-title":"J. Cleaner Prod."},{"key":"4_CR6","doi-asserted-by":"crossref","unstructured":"Krafka, K., et al.: Eye tracking for everyone. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2176\u20132184 (2016)","DOI":"10.1109\/CVPR.2016.239"},{"key":"4_CR7","unstructured":"Aung, A.M., Ramakrishnan, A., Whitehill, J.R.: Who Are They Looking At? Automatic Eye Gaze Following for Classroom Observation Video Analysis. International Educational Data Mining Society (2018)"},{"key":"4_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"314","DOI":"10.1007\/978-3-642-33718-5_23","volume-title":"Computer Vision \u2013 ECCV 2012","author":"A Fathi","year":"2012","unstructured":"Fathi, A., Li, Y., Rehg, James M.: Learning to recognize daily actions using gaze. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7572, pp. 314\u2013327. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33718-5_23"},{"key":"4_CR9","doi-asserted-by":"crossref","unstructured":"Pfister, T., Charles, J., Zisserman, A.: Flowing ConvNet for human pose estimation in videos. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1913\u20131921 (2015)","DOI":"10.1109\/ICCV.2015.222"},{"key":"4_CR10","doi-asserted-by":"crossref","unstructured":"Recasens, A., et al.: Following gaze in video. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1435\u20131443 (2017)","DOI":"10.1109\/ICCV.2017.160"},{"issue":"11","key":"4_CR11","doi-asserted-by":"publisher","first-page":"2094","DOI":"10.1109\/TMM.2015.2482819","volume":"17","author":"SS Mukherjee","year":"2015","unstructured":"Mukherjee, S.S., Robertson, N.M.: Deep head pose: gaze-direction estimation in multimodal video. IEEE Trans. Multimed. 17(11), 2094\u20132107 (2015)","journal-title":"IEEE Trans. Multimed."},{"key":"4_CR12","doi-asserted-by":"crossref","unstructured":"Zhu, W., Deng, H.: Monocular free-head 3D gaze tracking with deep learning and geometry constraints. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3143\u20133152 (2017)","DOI":"10.1109\/ICCV.2017.341"},{"key":"4_CR13","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1016\/j.visres.2014.10.027","volume":"116","author":"D Parks","year":"2015","unstructured":"Parks, D., Borji, A., Itti, L.: Augmented saliency model using automatic 3D head pose detection and learned gaze following in natural scenes. Vis. Res. 116, 113\u2013126 (2015)","journal-title":"Vis. Res."},{"key":"4_CR14","unstructured":"Mora, K.A.F., Odobez, J.-M.: Person independent 3D gaze estimation from remote RGB-D cameras. In: 2013 IEEE International Conference on Image Processing, pp. 2787\u20132791. IEEE (2013)"},{"key":"4_CR15","doi-asserted-by":"crossref","unstructured":"Fan, L., et al.: Inferring shared attention in social scene videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6460\u20136468 (2018)","DOI":"10.1109\/CVPR.2018.00676"},{"issue":"4","key":"4_CR16","doi-asserted-by":"publisher","first-page":"2014","DOI":"10.1109\/TITS.2015.2396031","volume":"16","author":"F Vicente","year":"2015","unstructured":"Vicente, F., et al.: Driver gaze tracking and eyes off the road detection system. IEEE Trans. Intell. Transp. Syst. 16(4), 2014\u20132027 (2015)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"4_CR17","doi-asserted-by":"crossref","unstructured":"Wang, K., Wang, S., JI, Q.: Deep eye fixation map learning for calibration-free eye gaze tracking. In: Proceedings of the 9th Biennial ACM Symposium on Eye Tracking Research & Applications, pp. 47\u201355. ACM (2016)","DOI":"10.1145\/2857491.2857515"},{"key":"4_CR18","doi-asserted-by":"publisher","first-page":"2941","DOI":"10.1109\/TCSVT.2018.2870832","volume":"29","author":"R Cong","year":"2018","unstructured":"Cong, R., et al.: Review of visual saliency detection with comprehensive information. IEEE Trans. Circ. Syst. Video Technol. 29, 2941\u20132959 (2018)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"key":"4_CR19","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Chong, E., Rehg, J.M.: Fine-grained head pose estimation without keypoints. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 2074\u20132083 (2018)","DOI":"10.1109\/CVPRW.2018.00281"},{"key":"4_CR20","doi-asserted-by":"crossref","unstructured":"Yang, T.-Y., et al.: FSA-Net: learning fine-grained structure aggregation for head pose estimation from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1087\u20131096 (2019)","DOI":"10.1109\/CVPR.2019.00118"},{"key":"4_CR21","doi-asserted-by":"crossref","unstructured":"Yang, S., et al.: Wider face: a face detection benchmark. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2016.596"},{"key":"4_CR22","unstructured":"Jain, V., Learned-Miller, E.: FDDB: A benchmark for face detection in unconstrained settings. Technical report, Technical Report UM-CS-2010-009, University of Massachusetts, Amherst (2010)"},{"key":"4_CR23","doi-asserted-by":"crossref","unstructured":"Chen, Y., Tai, Y., Liu, X., Shen, C., Yang, J.: FSRNet: end-to-end learning face super-resolution with facial priors. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2018)","DOI":"10.1109\/CVPR.2018.00264"},{"key":"4_CR24","doi-asserted-by":"crossref","unstructured":"Chi, C., Zhang, S., Xing, J., Lei, Z., Li, S.Z., Zou, X.: Selective refinement network for high performance face detection. In: Proceedings of Association for the Advancement of Artificial Intelligence (AAAI) (2019)","DOI":"10.1609\/aaai.v33i01.33018231"},{"key":"4_CR25","doi-asserted-by":"crossref","unstructured":"Li, H., Lin, Z., Shen, X., Brandt, J., Hua, G.: A convolutional neural network cascade for face detection. In: Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)","DOI":"10.1109\/CVPR.2015.7299170"},{"key":"4_CR26","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Proceedings of Advances in Neural Information Processing Systems (NIPS) (2015)"},{"issue":"1","key":"4_CR27","doi-asserted-by":"publisher","first-page":"156","DOI":"10.1109\/TPAMI.2016.2535218","volume":"39","author":"J Yang","year":"2017","unstructured":"Yang, J., Luo, L., Qian, J., Tai, Y., Zhang, F., Yong, X.: Nuclear norm based matrix regression with applications to face recognition with occlusion and illumination changes. IEEE Trans. Pattern Anal. Mach. Intell. (TPAMI) 39(1), 156\u2013171 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell. (TPAMI)"},{"key":"4_CR28","doi-asserted-by":"crossref","unstructured":"Xiao, J., Hays, J., Ehinger, K.A., Oliva, A., Torralba, A.: Sun database: large-scale scene recognition from abbey to zoo. In: 2010 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3485\u20133492. IEEE (2010)","DOI":"10.1109\/CVPR.2010.5539970"},{"key":"4_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"4_CR30","doi-asserted-by":"crossref","unstructured":"Yao, B., Jiang, X., Khosla, A., Lin, A.L., Guibas, L., Fei-Fei, L.: Human action recognition by learning bases of action attributes and parts. In: 2011 IEEE International Conference on Computer Vision (ICCV), pp. 1331\u20131338. IEEE (2011)","DOI":"10.1109\/ICCV.2011.6126386"},{"issue":"2","key":"4_CR31","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2010","unstructured":"Everingham, M., Van Gool, L., Williams, C.K., Winn, J., Zisserman, A.: The pascal visual object classes (voc) challenge. Int. J. Comput. Vis. 88(2), 303\u2013338 (2010)","journal-title":"Int. J. Comput. Vis."},{"issue":"3","key":"4_CR32","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"4_CR33","unstructured":"Zhou, B., Lapedriza, A., Xiao, J., Torralba, A., Oliva, A.: Learning deep features for scene recognition using places database. In: Advances in Neural Information Processing Systems, pp. 487\u2013495 (2014)"},{"key":"4_CR34","doi-asserted-by":"crossref","unstructured":"Li, J., et al.: DSFD: dual shot face detector. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00520"},{"key":"4_CR35","unstructured":"GazeFollowing Repository \u2013 Github. https:\/\/github.com\/svip-lab\/GazeFollowing. Accessed 2 Mar 2020"},{"key":"4_CR36","unstructured":"Deep head pose Hopenet \u2013 Github. https:\/\/github.com\/natanielruiz\/deep-head-pose. Accessed 2 Mar 2020"},{"key":"4_CR37","volume-title":"Deep Learning","author":"I Goodfellow","year":"2016","unstructured":"Goodfellow, I., Bengio, Y., Courville, A.: Deep Learning. MIT Press, Cambridge (2016)"},{"issue":"4","key":"4_CR38","doi-asserted-by":"publisher","first-page":"427","DOI":"10.1016\/j.ipm.2009.03.002","volume":"45","author":"M Sokolova","year":"2009","unstructured":"Sokolova, M., Lapalme, G.: A systematic analysis of performance measures for classification tasks. Inf. Process. Manage. 45(4), 427\u2013437 (2009)","journal-title":"Inf. Process. Manage."},{"key":"4_CR39","doi-asserted-by":"publisher","first-page":"631","DOI":"10.1007\/978-3-030-01614-2_58","volume":"540","author":"RL Silva","year":"2018","unstructured":"Silva, R.L., Rudek, M., Sjeika, A., Canciglieri Jr., O.: Machine vision systems for industrial quality control inspections. IFIP Adv. Inf. Commun. Technol. 540, 631\u2013641 (2018)","journal-title":"IFIP Adv. Inf. Commun. Technol."}],"container-title":["IFIP Advances in Information and Communication Technology","Product Lifecycle Management Enabling Smart X"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-62807-9_4","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,19]],"date-time":"2024-11-19T00:03:18Z","timestamp":1731974598000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-62807-9_4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030628062","9783030628079"],"references-count":39,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-62807-9_4","relation":{},"ISSN":["1868-4238","1868-422X"],"issn-type":[{"type":"print","value":"1868-4238"},{"type":"electronic","value":"1868-422X"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"10 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PLM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"IFIP International Conference on Product Lifecycle Management","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Rapperswil","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Switzerland","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 July 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 July 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"plm2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.plm-conference.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"conftool.net","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"80","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"60","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"75% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.6","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2 technical industrial contributions are also included. The conference was held virtually.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}