{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T15:32:03Z","timestamp":1775143923407,"version":"3.50.1"},"publisher-location":"Cham","reference-count":47,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031915772","type":"print"},{"value":"9783031915789","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91578-9_20","type":"book-chapter","created":{"date-parts":[[2025,6,6]],"date-time":"2025-06-06T09:23:36Z","timestamp":1749201816000},"page":"266-277","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["AHMF: Adaptive Hybrid-Memory-Fusion Model for\u00a0Driver Attention Prediction"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-9927-8649","authenticated-orcid":false,"given":"Dongyang","family":"Xu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8556-1760","authenticated-orcid":false,"given":"Qingfan","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-4110-113X","authenticated-orcid":false,"given":"Ji","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Xiangyun","family":"Zeng","sequence":"additional","affiliation":[]},{"given":"Lei","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"20_CR1","doi-asserted-by":"crossref","unstructured":"Hu, F., Venkatesh, G.M., O\u2019Connor, N.E., Smeaton, A.F., Little, S.: Utilising visual attention cues for vehicle detection and tracking. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 5535\u20135542. IEEE (2021)","DOI":"10.1109\/ICPR48806.2021.9412931"},{"issue":"7","key":"20_CR2","doi-asserted-by":"publisher","first-page":"990","DOI":"10.1109\/TITS.2022.3155613","volume":"23","author":"MM Karim","year":"2022","unstructured":"Karim, M.M., Li, Y., Qin, R., Yin, Z.: A dynamic spatial-temporal attention network for early anticipation of traffic accidents. IEEE Trans. Intell. Transp. Syst. 23(7), 990\u20139600 (2022)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"20_CR3","volume-title":"Driving visual saliency prediction of dynamic night scenes via a spatio-temporal dual-encoder network","author":"T Deng","year":"2023","unstructured":"Deng, T., et al.: Driving visual saliency prediction of dynamic night scenes via a spatio-temporal dual-encoder network. IEEE Trans. Intell. Transp, Syst (2023)"},{"key":"20_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.119157","volume":"214","author":"F Rui","year":"2023","unstructured":"Rui, F., Huang, T., Li, M., Sun, Q., Chen, Y.: A multimodal deep neural network for prediction of the driver\u2019s focus of attention based on anthropomorphic attention mechanism and prior knowledge. Expert Syst. Appl. 214, 119157 (2023)","journal-title":"Expert Syst. Appl."},{"key":"20_CR5","first-page":"1","volume":"71","author":"A Lin","year":"2022","unstructured":"Lin, A., Chen, B., Jiayu, X., Zhang, Z., Guangming, L., Zhang, D.: Ds-transunet: dual swin transformer u-net for medical image segmentation. IEEE Trans. Instrum. Meas. 71, 1\u201315 (2022)","journal-title":"IEEE Trans. Instrum. Meas."},{"issue":"7","key":"20_CR6","doi-asserted-by":"publisher","first-page":"1335","DOI":"10.1109\/JAS.2022.105716","volume":"9","author":"H Tian","year":"2022","unstructured":"Tian, H., Deng, T., Yan, H.: Driving as well as on a sunny day? predicting driver\u2019s fixation in rainy weather conditions via a dual-branch visual model. IEEE\/CAA J. Automatica Sinica 9(7), 1335\u20131338 (2022)","journal-title":"IEEE\/CAA J. Automatica Sinica"},{"issue":"4","key":"20_CR7","doi-asserted-by":"publisher","first-page":"454","DOI":"10.1016\/j.jarmac.2016.04.009","volume":"5","author":"G Wood","year":"2016","unstructured":"Wood, G., Hartley, G., Furley, P.A., Wilson, M.R.: Working memory capacity, visual attention and hazard perception in driving. J. Appl. Res. Mem. Cogn. 5(4), 454\u2013462 (2016)","journal-title":"J. Appl. Res. Mem. Cogn."},{"key":"20_CR8","doi-asserted-by":"crossref","unstructured":"Broadbent, D.P., D\u2019Innocenzo, G., Ellmers, T.J., Parsler, J., Szameitat, A.J., Bishop, D.T.: Cognitive load, working memory capacity and driving performance: A preliminary fnirs and eye tracking study. Transp. Res. Part F: Traffic Psychol. Behav. 92, 121\u2013132 (2023)","DOI":"10.1016\/j.trf.2022.11.013"},{"key":"20_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.aap.2023.107071","volume":"187","author":"H Zhang","year":"2023","unstructured":"Zhang, H., Guo, Y., Yuan, W., Li, K.: On the importance of working memory in the driving safety field: a systematic review. Accident Analysis & Prevention 187, 107071 (2023)","journal-title":"Accident Analysis & Prevention"},{"key":"20_CR10","doi-asserted-by":"publisher","unstructured":"Krems, J.F., Baumann, M.: Driving and situation awareness: a cognitive model of memory-update processes. In: Kurosu, M. (ed.) HCD 2009. LNCS, vol. 5619, pp. 986\u2013994. Springer, Heidelberg (2009). https:\/\/doi.org\/10.1007\/978-3-642-02806-9-113","DOI":"10.1007\/978-3-642-02806-9-113"},{"issue":"1","key":"20_CR11","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1007\/s10111-018-0527-6","volume":"21","author":"J De Winter","year":"2019","unstructured":"De Winter, J., Eisma, Y.B., Cabrall, C., Hancock, P.A., Stanton, N.A.: Situation awareness based on eye movements in relation to the task environment. Cogn. Technol. Work 21(1), 99\u2013111 (2019)","journal-title":"Cogn. Technol. Work"},{"key":"20_CR12","doi-asserted-by":"crossref","unstructured":"Gan, S., Li, Q., Wang, Q., Chen, W., Qin, D., Nie, B.: Constructing personalized situation awareness dataset for hazard perception, comprehension, projection, and action of drivers. In: 2021 IEEE International Intelligent Transportation Systems Conference (ITSC), pp. 1697\u20131704. IEEE (2021)","DOI":"10.1109\/ITSC48978.2021.9564543"},{"key":"20_CR13","doi-asserted-by":"crossref","unstructured":"Pang, D., Kimura, A., Takeuchi, T., Yamato, J., Kashino, K.: A stochastic model of selective visual attention with a dynamic bayesian network. In: 2008 IEEE International Conference on Multimedia and Expo, pp. 1073\u20131076. IEEE (2008)","DOI":"10.1109\/ICME.2008.4607624"},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Heracles, M., Sagerer, G., K\u00f6rner, U., Michalke, T., Fritsch, J., Goerick, C.: A dynamic attention system that reorients to unexpected motion in real-world traffic environments. In: 2009 IEEE\/RSJ International Conference on Intelligent Robots and Systems, pp. 1735\u20131742. IEEE (2009)","DOI":"10.1109\/IROS.2009.5354387"},{"key":"20_CR15","doi-asserted-by":"crossref","unstructured":"Ban, S.-W., Kim, B., Lee, M.: Top-down visual selective attention model combined with bottom-up saliency map for incremental object perception. In: The 2010 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20138. IEEE (2010)","DOI":"10.1109\/IJCNN.2010.5596376"},{"key":"20_CR16","doi-asserted-by":"crossref","unstructured":"Tawari, A., Kang, B.: A computational framework for driver\u2019s visual attention using a fully convolutional architecture. In: 2017 IEEE Intelligent Vehicles Symposium (IV), pp. 887\u2013894. IEEE (2017)","DOI":"10.1109\/IVS.2017.7995828"},{"key":"20_CR17","doi-asserted-by":"crossref","unstructured":"Xia, Y., Zhang, D., Kim, J., Nakayama, K., Zipser, K., Whitney, D.: Predicting driver attention in critical situations. In: Computer Vision\u2013ACCV 2018: 14th Asian Conference on Computer Vision, Perth, Australia, December 2\u20136, 2018, Revised Selected Papers, Part V 14, pp. 658\u2013674. Springer (2019)","DOI":"10.1007\/978-3-030-20873-8_42"},{"issue":"7","key":"20_CR18","doi-asserted-by":"publisher","first-page":"1720","DOI":"10.1109\/TPAMI.2018.2845370","volume":"41","author":"A Palazzi","year":"2018","unstructured":"Palazzi, A., Abati, D., Solera, F., Cucchiara, R., et al.: Predicting the driver\u2019s focus of attention: the dr (eye) ve project. IEEE Trans. Pattern Anal. Mach. Intell. 41(7), 1720\u20131733 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"5","key":"20_CR19","doi-asserted-by":"publisher","first-page":"2146","DOI":"10.1109\/TITS.2019.2915540","volume":"21","author":"T Deng","year":"2019","unstructured":"Deng, T., Yan, H., Qin, L., Ngo, T., Manjunath, B.S.: How do drivers allocate their potential attention? driving fixation prediction via convolutional neural networks. IEEE Trans. Intell. Transp. Syst. 21(5), 2146\u20132154 (2019)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"20_CR20","doi-asserted-by":"crossref","unstructured":"Deng, T., Yan, F., Yan, H.: Driving video fixation prediction model via spatio-temporal networks and attention gates. In: 2021 IEEE International Conference on Multimedia and Expo (ICME), pp. 1\u20136. IEEE (2021)","DOI":"10.1109\/ICME51207.2021.9428151"},{"key":"20_CR21","doi-asserted-by":"crossref","unstructured":"Xie, C., Xia, C., Ma, M., Zhao, Z., Chen, X., Li, J.: Pyramid grafting network for one-stage high resolution saliency detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11717\u201311726 (2022)","DOI":"10.1109\/CVPR52688.2022.01142"},{"key":"20_CR22","doi-asserted-by":"crossref","unstructured":"Huang, P.-J., Lu, C.-A., Chen, K.-W.: Temporally-aggregating multiple-discontinuous-image saliency prediction with transformer-based attention. In: 2022 International Conference on Robotics and Automation (ICRA), pp. 6571\u20136577. IEEE (2022)","DOI":"10.1109\/ICRA46639.2022.9811544"},{"issue":"10","key":"20_CR23","doi-asserted-by":"publisher","first-page":"6850","DOI":"10.1109\/TCSVT.2022.3172971","volume":"32","author":"C Ma","year":"2022","unstructured":"Ma, C., Sun, H., Rao, Y., Zhou, J., Jiwen, L.: Video saliency forecasting transformer. IEEE Trans. Circuits Syst. Video Technol. 32(10), 6850\u20136862 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"20_CR24","doi-asserted-by":"crossref","unstructured":"Chen, Y., Nan, Z., Xiang, T.: Fblnet: feedback loop network for driver attention prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13371\u201313380 (2023)","DOI":"10.1109\/ICCV51070.2023.01230"},{"key":"20_CR25","unstructured":"Le, H.: Memory and attention in deep learning. arXiv preprint arXiv:2107.01390 (2021)"},{"issue":"8","key":"20_CR26","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"20_CR27","unstructured":"Lopez-Paz, D., Ranzato, M.A.: Gradient episodic memory for continual learning. Advances in neural information processing systems, 30, 2017"},{"key":"20_CR28","doi-asserted-by":"crossref","unstructured":"Prakash, A., et al.: Condensed memory networks for clinical diagnostic inferencing. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a031 (2017)","DOI":"10.1609\/aaai.v31i1.10964"},{"key":"20_CR29","doi-asserted-by":"crossref","unstructured":"Gao, J., Ge, R., Chen, K., Nevatia, R.: Motion-appearance co-memory networks for video question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6576\u20136585 (2018)","DOI":"10.1109\/CVPR.2018.00688"},{"key":"20_CR30","unstructured":"Le, H., Tran, T., Venkatesh, S.: Learning to remember more with less memorization. arXiv preprint arXiv:1901.01347 (2019)"},{"key":"20_CR31","doi-asserted-by":"publisher","first-page":"334","DOI":"10.1016\/j.neunet.2021.08.030","volume":"144","author":"F Landi","year":"2021","unstructured":"Landi, F., Baraldi, L., Cornia, M., Cucchiara, R.: Working memory connections for lstm. Neural Netw. 144, 334\u2013341 (2021)","journal-title":"Neural Netw."},{"key":"20_CR32","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"20_CR33","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: Cbam: convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV), pages 3\u201319, 2018","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"20_CR34","unstructured":"Shi, X., Chen, Z., Wang, H., Yeung, D.-Y., Wong, W.-K., Woo, W.: Convolutional lstm network: A machine learning approach for precipitation nowcasting. Advances in neural information processing systems 28 (2015)"},{"key":"20_CR35","doi-asserted-by":"publisher","unstructured":"Zhang, Z., Robinson, D., Tepper, J.: Detecting hate speech on twitter using a convolution-GRU based deep neural network. In: Gangemi, A., Navigli, R., Vidal, M.-E., Hitzler, P., Troncy, R., Hollink, L., Tordai, A., Alam, M. (eds.) ESWC 2018. LNCS, vol. 10843, pp. 745\u2013760. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-319-93417-4-48","DOI":"10.1007\/978-3-319-93417-4-48"},{"key":"20_CR36","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.-C.: Mobilenetv2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520 (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"20_CR37","doi-asserted-by":"publisher","unstructured":"Droste, R., Jiao, J., Noble, J.A.: Unified image and video saliency modeling. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 419\u2013435. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_25","DOI":"10.1007\/978-3-030-58558-7_25"},{"issue":"11","key":"20_CR38","doi-asserted-by":"publisher","first-page":"20912","DOI":"10.1109\/TITS.2022.3177640","volume":"23","author":"S Gan","year":"2022","unstructured":"Gan, S., et al.: Multisource adaption for driver attention prediction in arbitrary driving scenes. IEEE Trans. Intell. Transp. Syst. 23(11), 20912\u201320925 (2022)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"20_CR39","doi-asserted-by":"crossref","unstructured":"Chang, W.-G., You, T., Seo, S., Kwak, S., Han, B.: Domain-specific batch normalization for unsupervised domain adaptation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7354\u20137362 (2019)","DOI":"10.1109\/CVPR.2019.00753"},{"issue":"7","key":"20_CR40","doi-asserted-by":"publisher","first-page":"2051","DOI":"10.1109\/TITS.2016.2535402","volume":"17","author":"T Deng","year":"2016","unstructured":"Deng, T., Yang, K., Li, Y., Yan, H.: Where does the driver look? top-down-based saliency detection in a traffic driving environment. IEEE Trans. Intell. Transp. Syst. 17(7), 2051\u20132062 (2016)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"20_CR41","doi-asserted-by":"crossref","unstructured":"Fang, J., Yan, D., Qiao, J., Xue, J., Wang, H., Li, S.: Dada-2000: can driving accident be predicted by driver attentionf analyzed by a benchmark. In: 2019 IEEE Intelligent Transportation Systems Conference (ITSC), pp. 4303\u20134309. IEEE (2019)","DOI":"10.1109\/ITSC.2019.8917218"},{"key":"20_CR42","doi-asserted-by":"crossref","unstructured":"Alletto, S., Palazzi, A., Solera, F., Calderara, S., Cucchiara, R.: Dr (eye) ve: a dataset for attention-based tasks with applications to autonomous and assisted driving. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 54\u201360 (2016)","DOI":"10.1109\/CVPRW.2016.14"},{"key":"20_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2020.107404","volume":"106","author":"X Qin","year":"2020","unstructured":"Qin, X., Zhang, Z., Huang, C., Dehghan, M., Zaiane, O.R., Jagersand, M.: U2-net: going deeper with nested u-structure for salient object detection. Pattern Recogn. 106, 107404 (2020)","journal-title":"Pattern Recogn."},{"key":"20_CR44","doi-asserted-by":"crossref","unstructured":"Pang, Y., Zhao, X., Zhang, L., Lu, H.: Multi-scale interactive network for salient object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9413\u20139422 (2020)","DOI":"10.1109\/CVPR42600.2020.00943"},{"key":"20_CR45","doi-asserted-by":"crossref","unstructured":"Bao, W., Yu, Q., Kong, Y.: Drive: deep reinforced accident anticipation with visual explanation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7619\u20137628 (2021)","DOI":"10.1109\/ICCV48922.2021.00752"},{"key":"20_CR46","doi-asserted-by":"crossref","unstructured":"Cornia, M., Baraldi, L., Serra, G., Cucchiara, R.: A deep multi-level network for saliency prediction. In: 2016 23rd International Conference on Pattern Recognition (ICPR), pp. 3488\u20133493. IEEE (2016)","DOI":"10.1109\/ICPR.2016.7900174"},{"key":"20_CR47","doi-asserted-by":"crossref","unstructured":"Wang, P., et al.: Pgnet: real-time arbitrarily-shaped text spotting with point gathering network. In: Proceedings of the AAAI Conference on Artificial Intelligence 35, pp. 2782\u20132790 (2021)","DOI":"10.1609\/aaai.v35i4.16383"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91578-9_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,6]],"date-time":"2025-06-06T09:23:50Z","timestamp":1749201830000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91578-9_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031915772","9783031915789"],"references-count":47,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91578-9_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}