{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,11]],"date-time":"2026-01-11T08:29:33Z","timestamp":1768120173900,"version":"3.49.0"},"publisher-location":"Cham","reference-count":41,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031784552","type":"print"},{"value":"9783031784569","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78456-9_23","type":"book-chapter","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T11:23:53Z","timestamp":1733138633000},"page":"358-372","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Audio-Visual Navigation with Anti-Backtracking"],"prefix":"10.1007","author":[{"given":"Zhenghao","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Hao","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Yan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,3]]},"reference":[{"key":"23_CR1","doi-asserted-by":"crossref","unstructured":"P.\u00a0Anderson, Q.\u00a0Wu, D.\u00a0Teney, J.\u00a0Bruce, M.\u00a0Johnson, N.\u00a0S\u00fcnderhauf, I.\u00a0Reid, S.\u00a0Gould, and A.\u00a0Van Den\u00a0Hengel, \u201cVision-and-language navigation: Interpreting visually-grounded navigation instructions in real environments,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2018","DOI":"10.1109\/CVPR.2018.00387"},{"key":"23_CR2","doi-asserted-by":"crossref","unstructured":"S.\u00a0Gupta, J.\u00a0Davidson, S.\u00a0Levine, R.\u00a0Sukthankar, and J.\u00a0Malik, \u201cCognitive mapping and planning for visual navigation,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2017","DOI":"10.1109\/CVPR.2017.769"},{"key":"23_CR3","doi-asserted-by":"crossref","unstructured":"J.\u00a0Truong, S.\u00a0Chernova, and D.\u00a0Batra, \u201cBi-directional domain adaptation for sim2real transfer of embodied navigation agents,\u201d IEEE Robotics and Automation Letters (RA-L), vol.\u00a06, no.\u00a02, 2021","DOI":"10.1109\/LRA.2021.3062303"},{"key":"23_CR4","doi-asserted-by":"crossref","unstructured":"Z.\u00a0Zhao, H.\u00a0Tang, J.\u00a0Wan, and Y.\u00a0Yan, \u201cMonocular expressive 3d human reconstruction of multiple people,\u201d in Proceedings of the 2024 International Conference on Multimedia Retrieval, 2024, pp. 423\u2013432","DOI":"10.1145\/3652583.3658092"},{"key":"23_CR5","doi-asserted-by":"crossref","unstructured":"H.\u00a0Wang, Z.\u00a0Yu, Y.\u00a0Yue, A.\u00a0Anandkumar, A.\u00a0Liu, and J.\u00a0Yan, \u201cLearning calibrated uncertainties for domain shift: A distributionally robust learning approach.\u201d in IJCAI, 2023, pp. 1460\u20131469","DOI":"10.24963\/ijcai.2023\/162"},{"key":"23_CR6","doi-asserted-by":"crossref","unstructured":"J.\u00a0Liang, W.\u00a0Huang, F.\u00a0Xia, P.\u00a0Xu, K.\u00a0Hausman, B.\u00a0Ichter, P.\u00a0Florence, and A.\u00a0Zeng, \u201cCode as policies: Language model programs for embodied control,\u201d in 2023 IEEE International Conference on Robotics and Automation (ICRA), IEEE, 2023","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"23_CR7","doi-asserted-by":"crossref","unstructured":"J.\u00a0Duan, S.\u00a0Yu, H.\u00a0L. Tan, H.\u00a0Zhu, and C.\u00a0Tan, \u201cA survey of embodied ai: From simulators to research tasks,\u201d IEEE Transactions on Emerging Topics in Computational Intelligence (TETCI), vol.\u00a06, no.\u00a02, 2022","DOI":"10.1109\/TETCI.2022.3141105"},{"key":"23_CR8","unstructured":"G.\u00a0Zhang, H.\u00a0Tang, and Y.\u00a0Yan, \u201cVersatile navigation under partial observability via value-guided diffusion policy,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 17\u00a0943\u201317\u00a0951"},{"key":"23_CR9","unstructured":"Y.\u00a0Shang, D.\u00a0Xu, G.\u00a0Liu, R.\u00a0R. Kompella, and Y.\u00a0Yan, \u201cEfficient multitask dense predictor via binarization,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 15\u00a0899\u201315\u00a0908"},{"key":"23_CR10","doi-asserted-by":"crossref","unstructured":"C.\u00a0Chen, U.\u00a0Jain, C.\u00a0Schissler, S.\u00a0V.\u00a0A. Gari, Z.\u00a0Al-Halah, V.\u00a0K. Ithapu, P.\u00a0Robinson, and K.\u00a0Grauman, \u201cSoundspaces: Audio-visual navigation in 3d environments,\u201d in Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part VI 16,Springer, 2020","DOI":"10.1007\/978-3-030-58539-6_2"},{"key":"23_CR11","unstructured":"C.\u00a0Chen, C.\u00a0Schissler, S.\u00a0Garg, P.\u00a0Kobernik, A.\u00a0Clegg, P.\u00a0Calamia, D.\u00a0Batra, P.\u00a0Robinson, and K.\u00a0Grauman, \u201cSoundspaces 2.0: A simulation platform for visual-acoustic learning,\u201d Advances in Neural Information Processing Systems (NeurIPS), vol.\u00a035, 2022"},{"key":"23_CR12","doi-asserted-by":"crossref","unstructured":"C.\u00a0Gan, Y.\u00a0Zhang, J.\u00a0Wu, B.\u00a0Gong, and J.\u00a0B. Tenenbaum, \u201cLook, listen, and act: Towards audio-visual embodied navigation,\u201d in 2020 IEEE International Conference on Robotics and Automation (ICRA), IEEE, 2020","DOI":"10.1109\/ICRA40945.2020.9197008"},{"key":"23_CR13","unstructured":"Y.\u00a0Yu, W.\u00a0Huang, F.\u00a0Sun, C.\u00a0Chen, Y.\u00a0Wang, and X.\u00a0Liu, \u201cSound adversarial audio-visual navigation,\u201d arXiv preprint arXiv:2202.10910, 2022"},{"key":"23_CR14","doi-asserted-by":"crossref","unstructured":"C.\u00a0Chen, Z.\u00a0Al-Halah, and K.\u00a0Grauman, \u201cSemantic audio-visual navigation,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2021","DOI":"10.1109\/CVPR46437.2021.01526"},{"key":"23_CR15","doi-asserted-by":"crossref","unstructured":"C.\u00a0Chen, S.\u00a0Majumder, Z.\u00a0Al-Halah, R.\u00a0Gao, S.\u00a0K. Ramakrishnan, and K.\u00a0Grauman, \u201cLearning to set waypoints for audio-visual navigation,\u201d arXiv preprint arXiv:2008.09622, 2020","DOI":"10.1109\/CVPR46437.2021.01526"},{"key":"23_CR16","unstructured":"A.\u00a0Dosovitskiy, L.\u00a0Beyer, A.\u00a0Kolesnikov, D.\u00a0Weissenborn, X.\u00a0Zhai, T.\u00a0Unterthiner, M.\u00a0Dehghani, M.\u00a0Minderer, G.\u00a0Heigold, S.\u00a0Gelly et\u00a0al., \u201cAn image is worth 16x16 words: Transformers for image recognition at scale,\u201d arXiv preprint arXiv:2010.11929, 2020"},{"key":"23_CR17","doi-asserted-by":"crossref","unstructured":"Z.\u00a0Liu, Y.\u00a0Lin, Y.\u00a0Cao, H.\u00a0Hu, Y.\u00a0Wei, Z.\u00a0Zhang, S.\u00a0Lin, and B.\u00a0Guo, \u201cSwin transformer: Hierarchical vision transformer using shifted windows,\u201d in Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), 2021","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"23_CR18","doi-asserted-by":"crossref","unstructured":"J.\u00a0Wu, B.\u00a0Duan, W.\u00a0Kang, H.\u00a0Tang, and Y.\u00a0Yan, \u201cToken transformation matters: Towards faithful post-hoc explanation for vision transformer,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 10\u00a0926\u201310\u00a0935","DOI":"10.1109\/CVPR52733.2024.01039"},{"key":"23_CR19","doi-asserted-by":"crossref","unstructured":"A.\u00a0D. Ekstrom, \u201cWhy vision is important to how we navigate,\u201d Hippocampus, vol.\u00a025, no.\u00a06, 2015","DOI":"10.1002\/hipo.22449"},{"key":"23_CR20","doi-asserted-by":"crossref","unstructured":"E.\u00a0C. Tolman, \u201cCognitive maps in rats and men.\u201d Psychological review, vol.\u00a055, no.\u00a04, 1948","DOI":"10.1037\/h0061626"},{"key":"23_CR21","unstructured":"D.\u00a0S. Chaplot, R.\u00a0Salakhutdinov, A.\u00a0Gupta, and S.\u00a0Gupta, \u201cNeural topological slam for visual navigation,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020"},{"key":"23_CR22","unstructured":"S.\u00a0Bansal, V.\u00a0Tolani, S.\u00a0Gupta, J.\u00a0Malik, and C.\u00a0Tomlin, \u201cCombining optimal control and learning for visual navigation in novel environments,\u201d in Conference on Robot Learning, PMLR, 2020"},{"key":"23_CR23","doi-asserted-by":"crossref","unstructured":"Z.\u00a0Al-Halah, S.\u00a0K. Ramakrishnan, and K.\u00a0Grauman, \u201cZero experience required: Plug & play modular transfer learning for semantic visual navigation,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022","DOI":"10.1109\/CVPR52688.2022.01652"},{"key":"23_CR24","doi-asserted-by":"crossref","unstructured":"A.\u00a0Das, S.\u00a0Datta, G.\u00a0Gkioxari, S.\u00a0Lee, D.\u00a0Parikh, and D.\u00a0Batra, \u201cEmbodied question answering,\u201d in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 1\u201310","DOI":"10.1109\/CVPR.2018.00008"},{"key":"23_CR25","doi-asserted-by":"crossref","unstructured":"U.\u00a0Jain, L.\u00a0Weihs, E.\u00a0Kolve, M.\u00a0Rastegari, S.\u00a0Lazebnik, A.\u00a0Farhadi, A.\u00a0G. Schwing, and A.\u00a0Kembhavi, \u201cTwo body problem: Collaborative visual task completion,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 6689\u20136699","DOI":"10.1109\/CVPR.2019.00685"},{"key":"23_CR26","doi-asserted-by":"crossref","unstructured":"M.\u00a0Wortsman, K.\u00a0Ehsani, M.\u00a0Rastegari, A.\u00a0Farhadi, and R.\u00a0Mottaghi, \u201cLearning to learn how to learn: Self-adaptive visual navigation using meta-learning,\u201d in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 2019, pp. 6750\u20136759","DOI":"10.1109\/CVPR.2019.00691"},{"key":"23_CR27","unstructured":"J.\u00a0Thomason, M.\u00a0Murray, M.\u00a0Cakmak, and L.\u00a0Zettlemoyer, \u201cVision-and-dialog navigation,\u201d in Conference on Robot Learning (CoRL).\u00a0\u00a0\u00a0PMLR, 2020"},{"key":"23_CR28","doi-asserted-by":"crossref","unstructured":"Y.\u00a0Hong, Q.\u00a0Wu, Y.\u00a0Qi, C.\u00a0Rodriguez-Opazo, and S.\u00a0Gould, \u201cVln bert: A recurrent vision-and-language bert for navigation,\u201d in Proceedings of the IEEE\/CVF conference on Computer Vision and Pattern Recognition (CVPR), 2021","DOI":"10.1109\/CVPR46437.2021.00169"},{"key":"23_CR29","doi-asserted-by":"crossref","unstructured":"W.\u00a0Hao, C.\u00a0Li, X.\u00a0Li, L.\u00a0Carin, and J.\u00a0Gao, \u201cTowards learning a generic agent for vision-and-language navigation via pre-training,\u201d in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2020","DOI":"10.1109\/CVPR42600.2020.01315"},{"key":"23_CR30","unstructured":"J.\u00a0Devlin, M.-W. Chang, K.\u00a0Lee, and K.\u00a0Toutanova, \u201cBert: Pre-training of deep bidirectional transformers for language understanding,\u201d arXiv preprint arXiv:1810.04805, 2018"},{"key":"23_CR31","doi-asserted-by":"crossref","unstructured":"M.\u00a0Savva, A.\u00a0Kadian, O.\u00a0Maksymets, Y.\u00a0Zhao, E.\u00a0Wijmans, B.\u00a0Jain, J.\u00a0Straub, J.\u00a0Liu, V.\u00a0Koltun, J.\u00a0Malik et\u00a0al., \u201cHabitat: A platform for embodied ai research,\u201d in Proceedings of the IEEE\/CVF international conference on computer vision, 2019, pp. 9339\u20139347","DOI":"10.1109\/ICCV.2019.00943"},{"key":"23_CR32","unstructured":"D.\u00a0S. Chaplot, S.\u00a0Gupta, D.\u00a0Gandhi, A.\u00a0K. Gupta, and R.\u00a0Salakhutdinov, \u201cLearning to explore using active neural mapping,\u201d in International Conference on Learning Representations, 2020. [Online]. Available: https:\/\/api.semanticscholar.org\/CorpusID:204770375"},{"key":"23_CR33","doi-asserted-by":"crossref","unstructured":"D.\u00a0Gordon, A.\u00a0Kadian, D.\u00a0Parikh, J.\u00a0Hoffman, and D.\u00a0Batra, \u201cSplitnet: Sim2sim and task2task transfer for embodied visual navigation,\u201d in Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2019, pp. 1022\u20131031","DOI":"10.1109\/ICCV.2019.00111"},{"key":"23_CR34","doi-asserted-by":"crossref","unstructured":"H.\u00a0Kuttruff, Room acoustics.Crc Press, 2016","DOI":"10.1201\/9781315372150"},{"key":"23_CR35","unstructured":"J.\u00a0Chung, K.\u00a0Kastner, L.\u00a0Dinh, K.\u00a0Goel, A.\u00a0C. Courville, and Y.\u00a0Bengio, \u201cA recurrent latent variable model for sequential data,\u201d Advances in neural information processing systems, vol.\u00a028, 2015"},{"key":"23_CR36","unstructured":"J.\u00a0Schulman, F.\u00a0Wolski, P.\u00a0Dhariwal, A.\u00a0Radford, and O.\u00a0Klimov, \u201cProximal policy optimization algorithms,\u201d arXiv preprint arXiv:1707.06347, 2017"},{"key":"23_CR37","doi-asserted-by":"crossref","unstructured":"M.\u00a0Savva, A.\u00a0Kadian, O.\u00a0Maksymets, Y.\u00a0Zhao, E.\u00a0Wijmans, B.\u00a0Jain, J.\u00a0Straub, J.\u00a0Liu, V.\u00a0Koltun, J.\u00a0Malik et\u00a0al., \u201cHabitat: A platform for embodied ai research,\u201d in Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), 2019","DOI":"10.1109\/ICCV.2019.00943"},{"key":"23_CR38","unstructured":"A.\u00a0Szot, A.\u00a0Clegg, E.\u00a0Undersander, E.\u00a0Wijmans, Y.\u00a0Zhao, J.\u00a0Turner, N.\u00a0Maestre, M.\u00a0Mukadam, D.\u00a0S. Chaplot, O.\u00a0Maksymets et\u00a0al., \u201cHabitat 2.0: Training home assistants to rearrange their habitat,\u201d Advances in Neural Information Processing Systems (NeurIPS), vol.\u00a034, 2021"},{"key":"23_CR39","doi-asserted-by":"crossref","unstructured":"A.\u00a0Chang, A.\u00a0Dai, T.\u00a0Funkhouser, M.\u00a0Halber, M.\u00a0Niessner, M.\u00a0Savva, S.\u00a0Song, A.\u00a0Zeng, and Y.\u00a0Zhang, \u201cMatterport3d: Learning from rgb-d data in indoor environments,\u201d arXiv preprint arXiv:1709.06158, 2017","DOI":"10.1109\/3DV.2017.00081"},{"key":"23_CR40","unstructured":"J.\u00a0Straub, T.\u00a0Whelan, L.\u00a0Ma, Y.\u00a0Chen, E.\u00a0Wijmans, S.\u00a0Green, J.\u00a0J. Engel, R.\u00a0Mur-Artal, C.\u00a0Ren, S.\u00a0Verma et\u00a0al., \u201cThe replica dataset: A digital replica of indoor spaces,\u201d arXiv preprint arXiv:1906.05797, 2019"},{"key":"23_CR41","unstructured":"P.\u00a0Anderson, A.\u00a0Chang, D.\u00a0S. Chaplot, A.\u00a0Dosovitskiy, S.\u00a0Gupta, V.\u00a0Koltun, J.\u00a0Kosecka, J.\u00a0Malik, R.\u00a0Mottaghi, M.\u00a0Savva et\u00a0al., \u201cOn evaluation of embodied navigation agents,\u201d arXiv preprint arXiv:1807.06757, 2018"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78456-9_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T12:13:17Z","timestamp":1733141597000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78456-9_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,3]]},"ISBN":["9783031784552","9783031784569"],"references-count":41,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78456-9_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,3]]},"assertion":[{"value":"3 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}