{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T20:38:42Z","timestamp":1768855122500,"version":"3.49.0"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032144911","type":"print"},{"value":"9783032144928","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-14492-8_4","type":"book-chapter","created":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T07:07:43Z","timestamp":1768806463000},"page":"41-52","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["CATCH: A Modular Cross-Domain Adaptive Template with\u00a0Hook"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-2467-4372","authenticated-orcid":false,"given":"Xinjin","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0007-5839-4371","authenticated-orcid":false,"given":"Yulie","family":"Lu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5629-7901","authenticated-orcid":false,"given":"Jinghan","family":"Cao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-6750-5210","authenticated-orcid":false,"given":"Yu","family":"Ma","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-6401-7094","authenticated-orcid":false,"given":"Zhenglin","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3713-1042","authenticated-orcid":false,"given":"Yeyang","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,20]]},"reference":[{"key":"4_CR1","unstructured":"Banerjee, S., Lavie, A.: Meteor: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, pp. 65\u201372 (2005)"},{"key":"4_CR2","unstructured":"Chen, J., Sun, X., Zhang, Y., Shao, S., Yang, Y.: Chartqa: a benchmark for question answering about charts with visual and logical reasoning. In: Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL) (2021)"},{"key":"4_CR3","unstructured":"Chen, Z., et al.: Vision transformer adapter for dense predictions. In: Proceedings of the International Conference on Learning Representations (ICLR) (2023)"},{"key":"4_CR4","doi-asserted-by":"crossref","unstructured":"Cho, K., Pfeiffer, J., Gurevych, I.: Adapterfusion: non-destructive task composition for transfer learning. In: Proceedings of EMNLP (2022)","DOI":"10.18653\/v1\/2021.eacl-main.39"},{"key":"4_CR5","unstructured":"Dai, W., et al.: Instructblip: towards general-purpose vision-language models with instruction tuning (2023). https:\/\/arxiv.org\/abs\/2305.06500"},{"key":"4_CR6","doi-asserted-by":"crossref","unstructured":"Du, Z., et\u00a0al.: Domain-agnostic mutual prompting for unsupervised domain adaptation. arXiv preprint arXiv:2403.02899 (2024)","DOI":"10.1109\/CVPR52733.2024.02206"},{"key":"4_CR7","unstructured":"Gebru, D.M., et\u00a0al.: Mathvqa: math-aware question answering with symbolic expressions. arXiv preprint arXiv:2006.05511 (2020)"},{"key":"4_CR8","unstructured":"Hu, E.J., et al.: Lora: low-rank adaptation of large language models. In: ICLR, vol. 1, no. 2, p. 3 (2022)"},{"key":"4_CR9","first-page":"28541","volume":"36","author":"C Li","year":"2023","unstructured":"Li, C., et al.: Llava-med: training a large language-and-vision assistant for biomedicine in one day. Adv. Neural. Inf. Process. Syst. 36, 28541\u201328564 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"4_CR10","unstructured":"Li, H., et\u00a0al.: Da-ada: learning domain-aware adapter for domain adaptive object detection. arXiv preprint arXiv:2410.09004 (2024)"},{"key":"4_CR11","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: International Conference on Machine Learning, pp. 19730\u201319742. PMLR (2023)"},{"key":"4_CR12","first-page":"13448","volume":"36","author":"X Li","year":"2023","unstructured":"Li, X., Lian, D., Lu, Z., Bai, J., Chen, Z., Wang, X.: Graphadapter: tuning vision-language models with dual knowledge graph. Adv. Neural. Inf. Process. Syst. 36, 13448\u201313466 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"4_CR13","unstructured":"Lialin, V., Deshpande, V., Rumshisky, A.: Scaling down to scale up: a guide to parameter-efficient fine-tuning. arXiv preprint arXiv:2303.15647 (2023)"},{"key":"4_CR14","unstructured":"Lin, C.Y.: Rouge: a package for automatic evaluation of summaries. In: Text Summarization Branches Out: Proceedings of the ACL-04 Workshop, pp. 74\u201381 (2004)"},{"key":"4_CR15","first-page":"34892","volume":"36","author":"H Liu","year":"2023","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. Adv. Neural. Inf. Process. Syst. 36, 34892\u201334916 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"4_CR16","unstructured":"Lobry, S., Gaetano, R., Ienco, D., Ose, K.: Introducing rsvqa-high resolution: a benchmark dataset for remote sensing visual question answering. arXiv preprint arXiv:2108.04698 (2021)"},{"issue":"6","key":"4_CR17","first-page":"5152","volume":"59","author":"S Lobry","year":"2020","unstructured":"Lobry, S., Ienco, D., Gaetano, R., Marconcini, M., Ose, K.: Rsvqa: visual question answering for remote sensing images. IEEE Trans. Geosci. Remote Sens. 59(6), 5152\u20135165 (2020)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"4_CR18","doi-asserted-by":"crossref","unstructured":"Long, Z., Killick, G., McCreadie, R., Camarasa, G.A.: Multiway-adapter: adapting multimodal large language models for scalable image-text retrieval. In: ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6580\u20136584. IEEE (2024)","DOI":"10.1109\/ICASSP48485.2024.10446792"},{"key":"4_CR19","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"4_CR20","unstructured":"Rashidi, P., et\u00a0al.: Medvqa: a collection of medical visual question answering datasets. arXiv preprint arXiv:2104.00625 (2021)"},{"key":"4_CR21","unstructured":"Redko, I., Morvant, E., Habrard, A., Sebban, M., Bennani, Y.: A survey on domain adaptation theory. arXiv preprint arXiv:2004.11829, vol. 8, pp. 14\u201330 (2020)"},{"key":"4_CR22","unstructured":"Wang, J., et\u00a0al.: Modular adapter bank with dynamic routing for multimodal models. arXiv preprint arXiv:2404.05789 (2024)"},{"key":"4_CR23","unstructured":"Wang, X., Chen, J., Hu, X., et\u00a0al.: Univl: unified model for vision-language tasks using mixture-of-adapters. arXiv preprint arXiv:2305.07816 (2023)"},{"key":"4_CR24","doi-asserted-by":"crossref","unstructured":"Yang, L., Zhang, R.Y., Wang, Y., Xie, X.: MMA: multi-modal adapter for vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23826\u201323837 (2024)","DOI":"10.1109\/CVPR52733.2024.02249"},{"key":"4_CR25","unstructured":"Yue, Z., et\u00a0al.: Domain adaptation for question answering via question classification. In: COLING (2022)"},{"key":"4_CR26","doi-asserted-by":"publisher","first-page":"558","DOI":"10.1007\/978-3-031-24340-0_42","volume-title":"Computational Linguistics and Intelligent Text Processing","author":"Y Zhou","year":"2023","unstructured":"Zhou, Y., et al.: Eagle: An enhanced attention-based strategy by generating answers from learning questions to a remote sensing image. In: Gelbukh, A. (ed.) Computational Linguistics and Intelligent Text Processing, pp. 558\u2013572. Springer, Cham (2023)"},{"key":"4_CR27","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"}],"container-title":["Lecture Notes in Computer Science","Advances in Visual Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-14492-8_4","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T07:07:47Z","timestamp":1768806467000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-14492-8_4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9783032144911","9783032144928"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-14492-8_4","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"20 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ISVC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Symposium on Visual Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Las Vegas, NV","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 November 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19 November 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"isvc2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.isvc.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}