{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T23:55:25Z","timestamp":1768434925193,"version":"3.49.0"},"publisher-location":"Cham","reference-count":74,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031733963","type":"print"},{"value":"9783031733970","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73397-0_3","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:10:38Z","timestamp":1730574638000},"page":"37-54","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Controllable Navigation Instruction Generation with\u00a0Chain of\u00a0Thought Prompting"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-9865-4105","authenticated-orcid":false,"given":"Xianghao","family":"Kong","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-7106-8312","authenticated-orcid":false,"given":"Jinyu","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0802-9567","authenticated-orcid":false,"given":"Wenguan","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8294-6315","authenticated-orcid":false,"given":"Hang","family":"Su","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4907-7354","authenticated-orcid":false,"given":"Xiaolin","family":"Hu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0512-880X","authenticated-orcid":false,"given":"Yi","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9180-2935","authenticated-orcid":false,"given":"Si","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Allen, G.L.: From knowledge to words to wayfinding: issues in the production and comprehension of route directions. In: International Conference on Spatial Information Theory (1997)","DOI":"10.1007\/3-540-63623-4_61"},{"key":"3_CR2","unstructured":"An, D., et al.: Bevbert: multimodal map pre-training for language-guided navigation. In: ICCV (2023)"},{"key":"3_CR3","doi-asserted-by":"crossref","unstructured":"An, D., et al.: Etpnav: evolving topological planning for vision-language navigation in continuous environments. In: IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3386695"},{"key":"3_CR4","doi-asserted-by":"crossref","unstructured":"Anderson, P., Fernando, B., Johnson, M., Gould, S.: Spice: semantic propositional image caption evaluation. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46454-1_24"},{"key":"3_CR5","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Vision-and-language navigation: interpreting visually-grounded navigation instructions in real environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00387"},{"key":"3_CR6","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"3_CR7","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: ACL Workshop (2005)"},{"key":"3_CR8","unstructured":"Brown, T., et\u00a0al.: Language models are few-shot learners. In: NeurIPS (2020)"},{"key":"3_CR9","doi-asserted-by":"crossref","unstructured":"Chen, J., Gao, C., Meng, E., Zhang, Q., Liu, S.: Reinforced structured state-evolution for vision-language navigation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01501"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Chen, S., Guhur, P.L., Schmid, C., Laptev, I.: History aware multimodal transformer for vision-and-language navigation. In: NeurIPS (2021)","DOI":"10.1109\/ICCV48922.2021.00166"},{"key":"3_CR11","doi-asserted-by":"crossref","unstructured":"Chen, S., Guhur, P.L., Tapaswi, M., Schmid, C., Laptev, I.: Think global, act local: dual-scale graph transformer for vision-and-language navigation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01604"},{"key":"3_CR12","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"3_CR13","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"3_CR14","unstructured":"Dosovitskiy, A., Ros, G., Codevilla, F., Lopez, A., Koltun, V.: Carla: an open urban driving simulator. In: CoRL (2017)"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Dou, Z.Y., Peng, N.: Foam: a follower-aware speaker model for vision-and-language navigation. In: NAACL (2022)","DOI":"10.18653\/v1\/2022.naacl-main.322"},{"key":"3_CR16","unstructured":"Fried, D., et al.: Speaker-follower models for vision-and-language navigation. In: NeurIPS (2018)"},{"key":"3_CR17","doi-asserted-by":"crossref","unstructured":"Gao, C., Chen, J., Liu, S., Wang, L., Zhang, Q., Wu, Q.: Room-and-object aware knowledge reasoning for remote embodied referring expression. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00308"},{"key":"3_CR18","unstructured":"Gao, C., et al.: Room-object entity prompting and reasoning for embodied referring expression. In: IEEE TPAMI (2023)"},{"key":"3_CR19","doi-asserted-by":"crossref","unstructured":"Gao, C., et al.: Adaptive zone-aware hierarchical planner for vision-language navigation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01432"},{"key":"3_CR20","unstructured":"Gao, P., et al.: Llama-adapter v2: parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010 (2023)"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Hao, W., Li, C., Li, X., Carin, L., Gao, J.: Towards learning a generic agent for vision-and-language navigation via pre-training. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01315"},{"key":"3_CR22","unstructured":"He, K., et al.: Landmark-RXR: solving vision-and-language navigation with fine-grained alignment supervision. In: NeurIPS (2021)"},{"key":"3_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Si, C., Lu, Z., Huang, Y., Wang, L., Wang, X.: Frequency-enhanced data augmentation for vision-and-language navigation. In: NeurIPS (2024)","DOI":"10.2139\/ssrn.4539118"},{"key":"3_CR24","doi-asserted-by":"crossref","unstructured":"Hong, Y., Rodriguez, C., Wu, Q., Gould, S.: Sub-instruction aware vision-and-language navigation. In: EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.271"},{"key":"3_CR25","unstructured":"Hu, E.J., et\u00a0al.: Lora: low-rank adaptation of large language models. In: ICLR (2021)"},{"key":"3_CR26","doi-asserted-by":"publisher","unstructured":"Huang, Z., Shangguan, Z., Zhang, J., Bar, G., Boyd, M., Ohn-Bar, E.: ASSISTER: assistive navigation via\u00a0conditional instruction generation. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022, Part XXXVI, pp. 271\u2013289. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20059-5_16","DOI":"10.1007\/978-3-031-20059-5_16"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Hund, A.M., Minarik, J.L.: Getting from here to there: spatial anxiety, wayfinding strategies, direction type, and wayfinding efficiency. Spatial Cognit. Comput. 6(3), 179\u2013201 (2006)","DOI":"10.1207\/s15427633scc0603_1"},{"key":"3_CR28","doi-asserted-by":"crossref","unstructured":"Jain, V., Magalhaes, G., Ku, A., Vaswani, A., Ie, E., Baldridge, J.: Stay on the path: instruction fidelity in vision-and-language navigation. In: ACL (2019)","DOI":"10.18653\/v1\/P19-1181"},{"key":"3_CR29","doi-asserted-by":"crossref","unstructured":"Kamath, A., et al.: A new path: scaling vision-and-language navigation with synthetic instructions and imitation learning. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01041"},{"key":"3_CR30","unstructured":"Karimi\u00a0Mahabadi, R., Henderson, J., Ruder, S.: Compacter: efficient low-rank hypercomplex adapter layers. In: NeurIPS (2021)"},{"key":"3_CR31","doi-asserted-by":"crossref","unstructured":"Ku, A., Anderson, P., Patel, R., Ie, E., Baldridge, J.: Room-across-room: multilingual vision-and-language navigation with dense spatiotemporal grounding. In: EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.356"},{"key":"3_CR32","doi-asserted-by":"crossref","unstructured":"Kuipers, B.: Modeling spatial knowledge. Cognit. Sci. 2(2), 129\u2013153 (1978)","DOI":"10.1016\/S0364-0213(78)80003-2"},{"key":"3_CR33","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: EMNLP (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"3_CR34","unstructured":"Li, J., Bansal, M.: Panogen: text-conditioned panoramic environment generation for vision-and-language navigation. In: NeurIPS (2024)"},{"key":"3_CR35","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: ICML. PMLR (2022)"},{"key":"3_CR36","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: optimizing continuous prompts for generation. In: ACL-IJCNLP (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"3_CR37","unstructured":"Lin, C.Y.: Rouge: a package for automatic evaluation of summaries. In: Text Summarization Branches Out (2004)"},{"key":"3_CR38","doi-asserted-by":"crossref","unstructured":"Liu, C., Zhu, F., Chang, X., Liang, X., Ge, Z., Shen, Y.D.: Vision-language navigation with random environmental mixup. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00167"},{"key":"3_CR39","doi-asserted-by":"crossref","unstructured":"Liu, R., Wang, W., Yang, Y.: Volumetric environment representation for vision-language navigation. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01544"},{"key":"3_CR40","doi-asserted-by":"crossref","unstructured":"Liu, R., Wang, X., Wang, W., Yang, Y.: Bird\u2019s-eye-view scene graph for vision-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01007"},{"key":"3_CR41","doi-asserted-by":"crossref","unstructured":"Liu, X., et al.: GPT understands, too. AI Open (2023)","DOI":"10.1016\/j.aiopen.2023.08.012"},{"key":"3_CR42","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"3_CR43","unstructured":"Lynch, K.: The image of the city (1964)"},{"key":"3_CR44","doi-asserted-by":"crossref","unstructured":"Mehta, H., Artzi, Y., Baldridge, J., Ie, E., Mirowski, P.: Retouchdown: adding touchdown to streetlearn as a shareable resource for language grounding tasks in street view. arXiv preprint arXiv:2001.03671 (2020)","DOI":"10.18653\/v1\/2020.splu-1.7"},{"key":"3_CR45","doi-asserted-by":"crossref","unstructured":"Nguyen, K., Daum\u00e9\u00a0III, H.: Help, anna! visual navigation with natural multimodal assistance via retrospective curiosity-encouraging imitation learning. In: EMNLP-IJCNLP (2019)","DOI":"10.18653\/v1\/D19-1063"},{"key":"3_CR46","doi-asserted-by":"crossref","unstructured":"Nguyen, K., Dey, D., Brockett, C., Dolan, B.: Vision-based navigation with language-based assistance via imitation learning with indirect intervention. In: CVPR (2018)","DOI":"10.1109\/CVPR.2019.01281"},{"key":"3_CR47","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"3_CR48","doi-asserted-by":"crossref","unstructured":"Qi, Y., Wu, Q., Anderson, P., Wang, X., Wang, W.Y., Shen, C., Hengel, A.V.D.: Reverie: remote embodied visual referring expression in real indoor environments. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01000"},{"key":"3_CR49","doi-asserted-by":"crossref","unstructured":"Qiao, Y., Qi, Y., Yu, Z., Liu, J., Wu, Q.: March in chat: Interactive prompting for remote embodied referring expression. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01444"},{"key":"3_CR50","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"3_CR51","doi-asserted-by":"crossref","unstructured":"Shridhar, M., et al.: Alfred: a benchmark for interpreting grounded instructions for everyday tasks. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01075"},{"key":"3_CR52","doi-asserted-by":"crossref","unstructured":"Tan, H., Yu, L., Bansal, M.: Learning to navigate unseen environments: Back translation with environmental dropout. In: NAACL (2019)","DOI":"10.18653\/v1\/N19-1268"},{"key":"3_CR53","doi-asserted-by":"crossref","unstructured":"Taylor, A., Marcus, M., Santorini, B.: The Penn treebank: an overview. Treebanks: Building and Using Parsed Corpora, pp. 5\u201322 (2003)","DOI":"10.1007\/978-94-010-0201-1_1"},{"key":"3_CR54","unstructured":"Thomason, J., Murray, M., Cakmak, M., Zettlemoyer, L.: Vision-and-dialog navigation. In: CoRL (2020)"},{"key":"3_CR55","unstructured":"Touvron, H., et\u00a0al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"3_CR56","doi-asserted-by":"crossref","unstructured":"Vanetti, E.J., Allen, G.L.: Communicating environmental knowledge: the impact of verbal and spatial abilities on the production and comprehension of route directions. Environ. Behav. 20(6), 667\u2013682 (1988)","DOI":"10.1177\/0013916588206001"},{"key":"3_CR57","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: Cider: consensus-based image description evaluation. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"3_CR58","doi-asserted-by":"crossref","unstructured":"Wang, H., Liang, W., Shen, J., Van\u00a0Gool, L., Wang, W.: Counterfactual cycle-consistent learning for instruction following and generation in vision-language navigation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01503"},{"key":"3_CR59","doi-asserted-by":"crossref","unstructured":"Wang, H., Liang, W., Van\u00a0Gool, L., Wang, W.: Dreamwalker: mental planning for continuous vision-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00998"},{"key":"3_CR60","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, W., Liang, W., Xiong, C., Shen, J.: Structured scene memory for vision-language navigation. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00835"},{"key":"3_CR61","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, W., Shu, T., Liang, W., Shen, J.: Active visual information gathering for vision-language navigation. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58542-6_19"},{"key":"3_CR62","doi-asserted-by":"crossref","unstructured":"Wang, S., et al.: Less is more: generating grounded navigation instructions from landmarks. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01499"},{"key":"3_CR63","doi-asserted-by":"crossref","unstructured":"Wang, X., Wang, W., Shao, J., Yang, Y.: Lana: a language-capable navigator for instruction following and generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01826"},{"key":"3_CR64","doi-asserted-by":"crossref","unstructured":"Wang, Z., Li, X., Yang, J., Liu, Y., Jiang, S.: Gridmm: grid memory map for vision-and-language navigation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01432"},{"key":"3_CR65","doi-asserted-by":"crossref","unstructured":"Ward, S.L., Newcombe, N., Overton, W.F.: Turn left at the church, or three miles north a study of direction giving and sex differences. Environ. Behav. (1986)","DOI":"10.1177\/0013916586182003"},{"key":"3_CR66","unstructured":"Wei, J., et\u00a0al.: Chain-of-thought prompting elicits reasoning in large language models. In: NeurIPS (2022)"},{"key":"3_CR67","unstructured":"Yang, Z., Chen, G., Li, X., Wang, W., Yang, Y.: Doraemongpt: toward understanding dynamic scenes with large language models (exemplified as a video agent). In: Forty-first International Conference on Machine Learning (2024)"},{"key":"3_CR68","unstructured":"Zeng, H., Wang, X., Wang, W., Yang, Y.: Kefa: a knowledge enhanced and fine-grained aligned speaker for navigation instruction generation. arXiv preprint arXiv:2307.13368 (2023)"},{"key":"3_CR69","unstructured":"Zhang, R., et al.: Llama-adapter: efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv:2303.16199 (2023)"},{"key":"3_CR70","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Kordjamshidi, P.: VLN-trans, translator for the vision and language navigation agent. In: ACL (2023)","DOI":"10.18653\/v1\/2023.acl-long.737"},{"key":"3_CR71","doi-asserted-by":"crossref","unstructured":"Zhao, Y., et al.: Target-driven structured transformer planner for vision-language navigation. In: ACM MM (2022)","DOI":"10.1145\/3503161.3548281"},{"key":"3_CR72","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. In: IJCV (2022)","DOI":"10.1007\/s11263-022-01653-1"},{"key":"3_CR73","doi-asserted-by":"crossref","unstructured":"Zhu, F., Liang, X., Zhu, Y., Yu, Q., Chang, X., Liang, X.: Soon: Scenario oriented object navigation with graph-based exploration. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01250"},{"key":"3_CR74","doi-asserted-by":"crossref","unstructured":"Zhu, W., et al.: Babywalk: going farther in vision-and-language navigation by taking baby steps. In: ACL (2020)","DOI":"10.18653\/v1\/2020.acl-main.229"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73397-0_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:17:43Z","timestamp":1730575063000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73397-0_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031733963","9783031733970"],"references-count":74,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73397-0_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}