{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T07:01:23Z","timestamp":1771743683558,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":32,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819570805","type":"print"},{"value":"9789819570812","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-7081-2_21","type":"book-chapter","created":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T06:46:40Z","timestamp":1771742800000},"page":"316-331","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Focus on\u00a0What Matters: Object-Level Semantic Alignment for\u00a0Multimodal Named Entity Recognition with\u00a0Multiple Images"],"prefix":"10.1007","author":[{"given":"Peng","family":"Fan","sequence":"first","affiliation":[]},{"given":"Yanli","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Yunyu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Peng","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xianxian","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,23]]},"reference":[{"key":"21_CR1","doi-asserted-by":"crossref","unstructured":"Chen, X., et al.: Good visual guidance makes a better extractor: hierarchical visual prefix for multimodal entity and relation extraction. In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 1607\u20131618 (2022)","DOI":"10.18653\/v1\/2022.findings-naacl.121"},{"key":"21_CR2","doi-asserted-by":"crossref","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pp. 4171\u20134186 (2019)","DOI":"10.18653\/v1\/N19-1423"},{"key":"21_CR3","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"21_CR4","unstructured":"Huang, S., Xu, B., Li, C., Ye, J., Lin, X.: Mner-mi: a multi-image dataset for multimodal named entity recognition in social media. In: Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pp. 11452\u201311462 (2024)"},{"key":"21_CR5","unstructured":"Huang, Z., Xu, W., Yu, K.: Bidirectional lstm-crf models for sequence tagging. arXiv preprint arXiv:1508.01991 (2015)"},{"key":"21_CR6","unstructured":"Lafferty, J.D., McCallum, A., Pereira, F.C.: Conditional random fields: probabilistic models for segmenting and labeling sequence data. In: Proceedings of the Eighteenth International Conference on Machine Learning (2001)"},{"key":"21_CR7","doi-asserted-by":"crossref","unstructured":"Lample, G., Ballesteros, M., Subramanian, S., Kawakami, K., Dyer, C.: Neural architectures for named entity recognition. In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 260\u2013270 (2016)","DOI":"10.18653\/v1\/N16-1030"},{"key":"21_CR8","doi-asserted-by":"crossref","unstructured":"Lu, D., Neves, L., Carvalho, V., Zhang, N., Ji, H.: Visual attention model for name tagging in multimodal social media. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1990\u20131999 (2018)","DOI":"10.18653\/v1\/P18-1185"},{"key":"21_CR9","doi-asserted-by":"crossref","unstructured":"Ma, X., Hovy, E.: End-to-end sequence labeling via bi-directional LSTM-CNNs-CRF. arXiv preprint arXiv:1603.01354 (2016)","DOI":"10.18653\/v1\/P16-1101"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Moon, S., Neves, L., Carvalho, V.: Multimodal named entity recognition for short social media posts. arXiv preprint arXiv:1802.07862 (2018)","DOI":"10.18653\/v1\/N18-1078"},{"key":"21_CR11","doi-asserted-by":"crossref","unstructured":"Ok, H., Kil, T., Seo, S., Lee, J.: Scanner: knowledge-enhanced approach for robust multi-modal named entity recognition of unseen entities. arXiv preprint arXiv:2404.01914 (2024)","DOI":"10.18653\/v1\/2024.naacl-long.427"},{"key":"21_CR12","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"21_CR13","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster r-CNN: Towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems 28 (NIPS 2015), pp. 91\u201399 (2015)"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Sang, E.F.T.K., Veenstra, J.: Representing text chunks. In: Proceedings of the Ninth Conference of the European Chapter of the Association for Computational Linguistics, pp. 173\u2013179. Bergen, Norway (1999)","DOI":"10.3115\/977035.977059"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Sun, L., Wang, J., Zhang, K., Su, Y., Weng, F.: Rpbert: a text-image relation propagation-based BERT model for multimodal NER. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 13860\u201313868 (2021)","DOI":"10.1609\/aaai.v35i15.17633"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"Tsai, Y.H.H., Bai, S., Liang, P.P., Kolter, J.Z., Morency, L.P., Salakhutdinov, R.: Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the Conference Association for computational linguistics Meeting, vol.\u00a02019, p.\u00a06558 (2019)","DOI":"10.18653\/v1\/P19-1656"},{"key":"21_CR17","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"21_CR18","doi-asserted-by":"crossref","unstructured":"Wang, X., Cai, J., Jiang, Y., Xie, P., Tu, K., Lu, W.: Named entity and relation extraction with multi-modal retrieval. arXiv preprint arXiv:2212.01612 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.437"},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: Promptmner: prompt-based entity-related visual clue extraction and integration for multimodal named entity recognition. In: International Conference on Database Systems for Advanced Applications, pp. 297\u2013305. Springer (2022)","DOI":"10.1007\/978-3-031-00129-1_24"},{"key":"21_CR20","doi-asserted-by":"crossref","unstructured":"Wu, Z., Zheng, C., Cai, Y., Chen, J., Leung, H.f., Li, Q.: Multimodal representation with embedded visual guiding objects for named entity recognition in social media posts. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 1038\u20131046 (2020)","DOI":"10.1145\/3394171.3413650"},{"key":"21_CR21","unstructured":"Xu, B., et al.: Different data, different modalities! reinforced data splitting for effective multimodal information extraction from social media posts. In: Proceedings of the 29th International Conference on Computational Linguistics, pp. 1855\u20131864 (2022)"},{"key":"21_CR22","doi-asserted-by":"crossref","unstructured":"Xu, B., et al.: A unified visual prompt tuning framework with mixture-of-experts for multimodal information extraction. In: International Conference on Database Systems for Advanced Applications, pp. 544\u2013554. Springer (2023)","DOI":"10.1007\/978-3-031-30675-4_40"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Xu, B., Huang, S., Sha, C., Wang, H.: MAF: a general matching and alignment framework for multimodal named entity recognition. In: Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining, pp. 1215\u20131223 (2022)","DOI":"10.1145\/3488560.3498475"},{"key":"21_CR24","unstructured":"Xu, B., et al.: Enhancing multimodal named entity recognition through adaptive mixup image augmentation. In: Proceedings of the 31st International Conference on Computational Linguistics, pp. 1802\u20131812 (2025)"},{"key":"21_CR25","doi-asserted-by":"crossref","unstructured":"Yu, J., Jiang, J., Yang, L., Xia, R.: Improving multimodal named entity recognition via entity span detection with unified multimodal transformer.(2020). In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 3342\u20133352 (2022)","DOI":"10.18653\/v1\/2020.acl-main.306"},{"key":"21_CR26","unstructured":"Zelenko, D., Aone, C., Richardella, A.: Kernel methods for relation extraction. J. Mach. Learn. Res. 3(Feb), 1083\u20131106 (2003)"},{"key":"21_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, D., et al.: Multi-modal multi-label emotion recognition with heterogeneous hierarchical message passing. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 14338\u201314346 (2021)","DOI":"10.1609\/aaai.v35i16.17686"},{"key":"21_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, D., Wei, S., Li, S., Wu, H., Zhu, Q., Zhou, G.: Multi-modal graph fusion for named entity recognition with targeted visual guidance. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 14347\u201314355 (2021)","DOI":"10.1609\/aaai.v35i16.17687"},{"key":"21_CR29","doi-asserted-by":"crossref","unstructured":"Zhang, Q., Fu, J., Liu, X., Huang, X.: Adaptive co-attention network for named entity recognition in tweets. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a032 (2018)","DOI":"10.1609\/aaai.v32i1.11962"},{"key":"21_CR30","unstructured":"Zhao, J., Huang, S., Lin, X.: A graph interaction framework on relevance for multimodal named entity recognition with multiple images. In: Proceedings of the 31st International Conference on Computational Linguistics, pp. 1237\u20131246 (2025)"},{"key":"21_CR31","doi-asserted-by":"publisher","first-page":"2520","DOI":"10.1109\/TMM.2020.3013398","volume":"23","author":"C Zheng","year":"2020","unstructured":"Zheng, C., Wu, Z., Wang, T., Cai, Y., Li, Q.: Object-aware multimodal named entity recognition in social media posts with adversarial learning. IEEE Trans. Multimedia 23, 2520\u20132532 (2020)","journal-title":"IEEE Trans. Multimedia"},{"key":"21_CR32","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"}],"container-title":["Lecture Notes in Computer Science","PRICAI 2025: Trends in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-7081-2_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T06:46:48Z","timestamp":1771742808000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-7081-2_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819570805","9789819570812"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-7081-2_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"23 February 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRICAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pacific Rim International Conference on Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Wellington","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Zealand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 November 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 November 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pricai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.pricai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}