{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T20:00:47Z","timestamp":1772913647422,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":21,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819777068","type":"print"},{"value":"9789819777075","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-97-7707-5_30","type":"book-chapter","created":{"date-parts":[[2024,9,16]],"date-time":"2024-09-16T15:04:02Z","timestamp":1726499042000},"page":"364-376","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Instruction Tuning Large Language Models for\u00a0Multimodal Relation Extraction Using LoRA"],"prefix":"10.1007","author":[{"given":"Zou","family":"Li","sequence":"first","affiliation":[]},{"given":"Ning","family":"Pang","sequence":"additional","affiliation":[]},{"given":"Xiang","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,11]]},"reference":[{"key":"30_CR1","doi-asserted-by":"crossref","unstructured":"Chen, X., et al.: Good visual guidance makes a better extractor: hierarchical visual prefix for multimodal entity and relation extraction. arXiv preprint arXiv:2205.03521 (2022)","DOI":"10.18653\/v1\/2022.findings-naacl.121"},{"issue":"3","key":"30_CR2","doi-asserted-by":"publisher","first-page":"103264","DOI":"10.1016\/j.ipm.2023.103264","volume":"60","author":"Q Zhao","year":"2023","unstructured":"Zhao, Q., Gao, T., Guo, N.: TSVFN: two-stage visual fusion network for multimodal relation extraction. Inf. Process. Manage. 60(3), 103264 (2023)","journal-title":"Inf. Process. Manage."},{"key":"30_CR3","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. PMLR (2021)"},{"key":"30_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"319","DOI":"10.1007\/978-981-99-6222-8_27","volume-title":"Web Information Systems and Applications","author":"Z Kong","year":"2023","unstructured":"Kong, Z., Li, W., Zhang, H., Yuan, X.: FocusCap: object-focused image captioning with clip-guided language model. In: Yuan, L., Yang, S., Li, R., Kanoulas, E., Zhao, X. (eds.) WISA 2023. LNCS, vol. 14094, pp. 319\u2013330. Springer, Singapore (2023). https:\/\/doi.org\/10.1007\/978-981-99-6222-8_27"},{"key":"30_CR5","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"30_CR6","unstructured":"Zhang, S., et\u00a0al.: Instruction tuning for large language models: a survey. arXiv preprint arXiv:2308.10792 (2023)"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. arXiv preprint arXiv:2310.03744 (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"30_CR8","first-page":"2507","volume":"35","author":"P Lu","year":"2022","unstructured":"Lu, P., et al.: Learn to explain: multimodal reasoning via thought chains for science question answering. Adv. Neural. Inf. Process. Syst. 35, 2507\u20132521 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"30_CR9","doi-asserted-by":"crossref","unstructured":"Zheng, C., Wu, Z., Feng, J., Fu, Z., Cai, Y.: MNRE: a challenge multimodal dataset for neural relation extraction with visual evidence in social media posts. In: 2021 IEEE International Conference on Multimedia and Expo (ICME). IEEE (2021)","DOI":"10.1109\/ICME51207.2021.9428274"},{"key":"30_CR10","doi-asserted-by":"crossref","unstructured":"Zheng, C., Feng, J., Fu, Z., Cai, Y., Li, Q., Wang, T.: Multimodal relation extraction with efficient graph alignment. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 5298\u20135306 (2021)","DOI":"10.1145\/3474085.3476968"},{"key":"30_CR11","unstructured":"Li, L., Chen, X., Qiao, S., Xiong, F., et\u00a0al.: On analyzing the role of image for visual-enhanced relation extraction. arXiv preprint arXiv:2211.07504 (2022)"},{"key":"30_CR12","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"30_CR13","unstructured":"Wang, W., Lv, Q., Yu, W., Hong, W., Qi, X., et\u00a0al.: CogVLM: visual expert for pretrained language models. arXiv preprint arXiv:2311.03079 (2023)"},{"key":"30_CR14","unstructured":"Bai, J., et al.: Qwen-VL: a versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023)"},{"key":"30_CR15","doi-asserted-by":"crossref","unstructured":"Zhang, K., Guti\u00e9rrez, B.J.: Aligning instruction tasks unlocks large language models as zero-shot relation extractors. arXiv preprint arXiv:2305.11159 (2023)","DOI":"10.18653\/v1\/2023.findings-acl.50"},{"key":"30_CR16","unstructured":"Sun, Y., Zhang, K., Su, Y.: Multimodal question answering for unified information extraction. arXiv preprint arXiv:2310.03017 (2023)"},{"key":"30_CR17","doi-asserted-by":"crossref","unstructured":"Zeng, D., Liu, K., Chen, Y., Zhao, J.: Distant supervision for relation extraction via piecewise convolutional neural networks. In: Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pp. 1753\u20131762 (2015)","DOI":"10.18653\/v1\/D15-1203"},{"key":"30_CR18","unstructured":"Soares, L.B., FitzGerald, N., Ling, J., Kwiatkowski, T.: Matching the blanks: distributional similarity for relation learning. arXiv preprint arXiv:1906.03158 (2019)"},{"key":"30_CR19","doi-asserted-by":"crossref","unstructured":"Yu, J., Jiang, J., Yang, L., Xia, R.: Improving multimodal named entity recognition via entity span detection with unified multimodal transformer. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.acl-main.306"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"Zhang, D., Wei, S., Li, S., Wu, H., Zhu, Q., Zhou, G.: Multi-modal graph fusion for named entity recognition with targeted visual guidance. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 14347\u201314355 (2021)","DOI":"10.1609\/aaai.v35i16.17687"},{"key":"30_CR21","doi-asserted-by":"crossref","unstructured":"Wang, X., Cai, J., Jiang, Y., Xie, P., Tu, K., Lu, W.: Named entity and relation extraction with multi-modal retrieval. arXiv preprint arXiv:2212.01612 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.437"}],"container-title":["Lecture Notes in Computer Science","Web Information Systems and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-97-7707-5_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T06:31:56Z","timestamp":1732775516000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-97-7707-5_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9789819777068","9789819777075"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-981-97-7707-5_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"11 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"WISA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Web Information Systems and Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Yinchuan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"wisa22024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conf.ccf.org.cn\/web\/html7\/index.html?globalId=m1216704987858604032171012667439&type=1","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}