{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T03:48:46Z","timestamp":1776138526688,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":64,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2019YFB1704003"],"award-info":[{"award-number":["2019YFB1704003"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"nsf","award":["III-1763325, III-1909323, III-2106758, SaTC-1930941"],"award-info":[{"award-number":["III-1763325, III-1909323, III-2106758, SaTC-1930941"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3611899","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:12Z","timestamp":1698391632000},"page":"5185-5194","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":24,"title":["Prompt Me Up: Unleashing the Power of Alignments for Multimodal Entity and Relation Extraction"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6075-4224","authenticated-orcid":false,"given":"Xuming","family":"Hu","sequence":"first","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7573-0707","authenticated-orcid":false,"given":"Junzhe","family":"Chen","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4965-8263","authenticated-orcid":false,"given":"Aiwei","family":"Liu","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-1685-1874","authenticated-orcid":false,"given":"Shiao","family":"Meng","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0358-3160","authenticated-orcid":false,"given":"Lijie","family":"Wen","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3491-5968","authenticated-orcid":false,"given":"Philip S.","family":"Yu","sequence":"additional","affiliation":[{"name":"University of Illinois at Chicago, Chicago, IL, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"24206","article-title":"Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text","volume":"34","author":"Akbari Hassan","year":"2021","unstructured":"Hassan Akbari, Liangzhe Yuan, Rui Qian, Wei-Hong Chuang, Shih-Fu Chang, Yin Cui, and Boqing Gong. 2021. Vatt: Transformers for multimodal self-supervised learning from raw video, audio and text. Advances in Neural Information Processing Systems, Vol. 34 (2021), 24206--24221.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_2_1","first-page":"25","article-title":"Self-supervised multimodal versatile networks","volume":"33","author":"Alayrac Jean-Baptiste","year":"2020","unstructured":"Jean-Baptiste Alayrac, Adria Recasens, Rosalia Schneider, Relja Arandjelovi\u0107, Jason Ramapuram, Jeffrey De Fauw, Lucas Smaira, Sander Dieleman, and Andrew Zisserman. 2020. Self-supervised multimodal versatile networks. Advances in Neural Information Processing Systems, Vol. 33 (2020), 25--37.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16822"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00061"},{"key":"e_1_3_2_1_5_1","volume-title":"Multimodal machine learning: A survey and taxonomy","author":"Baltru\u0161aitis Tadas","year":"2018","unstructured":"Tadas Baltru\u0161aitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence, Vol. 41, 2 (2018), 423--443."},{"key":"e_1_3_2_1_6_1","volume-title":"A survey on deep multimodal learning for computer vision: advances, trends, applications, and datasets. The Visual Computer","author":"Bayoudh Khaled","year":"2021","unstructured":"Khaled Bayoudh, Raja Knani, Fay\u00e7al Hamdaoui, and Abdellatif Mtibaa. 2021. A survey on deep multimodal learning for computer vision: advances, trends, applications, and datasets. The Visual Computer (2021), 1--32."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.121"},{"key":"e_1_3_2_1_8_1","volume-title":"UK","author":"Chen Yen-Chun","year":"2020","unstructured":"Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020. Uniter: Universal image-text representation learning. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXX. Springer, 104--120."},{"key":"e_1_3_2_1_9_1","volume-title":"Enhancing Multimodal Entity and Relation Extraction with Variational Information Bottleneck. arXiv preprint arXiv:2304.02328","author":"Cui Shiyao","year":"2023","unstructured":"Shiyao Cui, Jiangxia Cao, Xin Cong, Jiawei Sheng, Quangang Li, Tingwen Liu, and Jinqiao Shi. 2023. Enhancing Multimodal Entity and Relation Extraction with Variational Information Bottleneck. arXiv preprint arXiv:2304.02328 (2023)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_1_11_1","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). 4171--4186."},{"key":"e_1_3_2_1_12_1","volume-title":"9th International Conference on Learning Representations, ICLR 2021","author":"Dosovitskiy Alexey","year":"2021","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/CBMI.2010.5529882"},{"key":"e_1_3_2_1_14_1","volume-title":"Stochastic neighbor embedding. Advances in neural information processing systems","author":"Hinton Geoffrey E","year":"2002","unstructured":"Geoffrey E Hinton and Sam Roweis. 2002. Stochastic neighbor embedding. Advances in neural information processing systems, Vol. 15 (2002)."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00947"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3592058"},{"key":"e_1_3_2_1_17_1","volume-title":"2023 b. Multimodal Relation Extraction with Cross-Modal Retrieval and Synthesis. arXiv preprint arXiv:2305.16166","author":"Hu Xuming","year":"2023","unstructured":"Xuming Hu, Zhijiang Guo, Zhiyang Teng, Irwin King, and Philip S Yu. 2023 b. Multimodal Relation Extraction with Cross-Modal Retrieval and Synthesis. arXiv preprint arXiv:2305.16166 (2023)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3592072"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.578"},{"key":"e_1_3_2_1_20_1","volume-title":"2023 e. GDA: Generative Data Augmentation Techniques for Relation Extraction Tasks. arXiv preprint arXiv:2305.16663","author":"Hu Xuming","year":"2023","unstructured":"Xuming Hu, Aiwei Liu, Zeqi Tan, Xin Zhang, Chenwei Zhang, Irwin King, and Philip S Yu. 2023 e. GDA: Generative Data Augmentation Techniques for Relation Extraction Tasks. arXiv preprint arXiv:2305.16663 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Yu","author":"Hu Xuming","year":"2020","unstructured":"Xuming Hu, Lijie Wen, Yusong Xu, Chenwei Zhang, and Philip S. Yu. 2020. SelfORE: Self-supervised Relational Feature Learning for Open Relation Extraction. In Proc. of EMNLP. 3673--3682."},{"key":"e_1_3_2_1_22_1","volume-title":"Yu","author":"Hu Xuming","year":"2021","unstructured":"Xuming Hu, Chenwei Zhang, Fukun Ma, Chenyao Liu, Lijie Wen, and Philip S. Yu. 2021a. Semi-supervised Relation Extraction via Incremental Meta Self-Training. In Findings of EMNLP. 487--496."},{"key":"e_1_3_2_1_23_1","volume-title":"Yu","author":"Hu Xuming","year":"2021","unstructured":"Xuming Hu, Chenwei Zhang, Yawen Yang, Xiaohe Li, Li Lin, Lijie Wen, and Philip S. Yu. 2021b. Gradient Imitation Reinforcement Learning for Low Resource Relation Extraction. In Proc. of EMNLP. 2737--2746."},{"key":"e_1_3_2_1_24_1","volume-title":"International Conference on Machine Learning. PMLR, 5583--5594","author":"Kim Wonjae","year":"2021","unstructured":"Wonjae Kim, Bokyung Son, and Ildoo Kim. 2021. Vilt: Vision-and-language transformer without convolution or region supervision. In International Conference on Machine Learning. PMLR, 5583--5594."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"e_1_3_2_1_26_1","volume-title":"International Conference on Machine Learning. PMLR, 12888--12900","author":"Li Junnan","year":"2022","unstructured":"Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022c. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In International Conference on Machine Learning. PMLR, 12888--12900."},{"key":"e_1_3_2_1_27_1","volume-title":"2023 a. On Analyzing the Role of Image for Visual-enhanced Relation Extraction. AAAI (Student Abstract)","author":"Li Lei","year":"2023","unstructured":"Lei Li, Xiang Chen, Shuofei Qiao, Feiyu Xiong, Huajun Chen, and Ningyu Zhang. 2023 a. On Analyzing the Role of Image for Visual-enhanced Relation Extraction. AAAI (Student Abstract) (2023)."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.420"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3270771"},{"key":"e_1_3_2_1_30_1","volume-title":"Pair-Level Supervised Contrastive Learning for Natural Language Inference. arXiv preprint arXiv:2201.10927","author":"Li Shu'ang","year":"2022","unstructured":"Shu'ang Li, Xuming Hu, Li Lin, and Lijie Wen. 2022a. Pair-Level Supervised Contrastive Learning for Natural Language Inference. arXiv preprint arXiv:2201.10927 (2022)."},{"key":"e_1_3_2_1_31_1","volume-title":"Oscar: Object-semantics aligned pre-training for vision-language tasks. In Computer Vision-ECCV 2020: 16th European Conference","author":"Li Xiujun","year":"2020","unstructured":"Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. 2020. Oscar: Object-semantics aligned pre-training for vision-language tasks. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXX 16. Springer, 121--137."},{"key":"e_1_3_2_1_32_1","volume-title":"Multimodal Learning on Graphs for Disease Relation Extraction. arXiv preprint arXiv:2203.08893","author":"Lin Yucong","year":"2022","unstructured":"Yucong Lin, Keming Lu, Sheng Yu, Tianxi Cai, and Marinka Zitnik. 2022. Multimodal Learning on Graphs for Disease Relation Extraction. arXiv preprint arXiv:2203.08893 (2022)."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539294"},{"key":"e_1_3_2_1_34_1","volume-title":"2023 a. A comprehensive evaluation of ChatGPT's zero-shot Text-to-SQL capability. arXiv preprint arXiv:2303.13547","author":"Liu Aiwei","year":"2023","unstructured":"Aiwei Liu, Xuming Hu, Lijie Wen, and Philip S Yu. 2023 a. A comprehensive evaluation of ChatGPT's zero-shot Text-to-SQL capability. arXiv preprint arXiv:2303.13547 (2023)."},{"key":"e_1_3_2_1_35_1","volume-title":"2023 b. Exploring the Compositional Generalization in Context Dependent Text-to-SQL Parsing. arXiv preprint arXiv:2306.04480","author":"Liu Aiwei","year":"2023","unstructured":"Aiwei Liu, Wei Liu, Xuming Hu, Shuang Li, Fukun Ma, Yawen Yang, and Lijie Wen. 2023 b. Exploring the Compositional Generalization in Context Dependent Text-to-SQL Parsing. arXiv preprint arXiv:2306.04480 (2023)."},{"key":"e_1_3_2_1_36_1","volume-title":"Yu","author":"Liu Shuliang","year":"2022","unstructured":"Shuliang Liu, Xuming Hu, Chenwei Zhang, Shu'ang Li, Lijie Wen, and Philip S. Yu. 2022b. HiURE: Hierarchical Exemplar Contrastive Learning for Unsupervised Relation Extraction. In Proc. of NAACL-HLT. 5970--5980."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3351046"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1185"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1101"},{"key":"e_1_3_2_1_40_1","volume-title":"Corpus, Guidelines, Methods and Results. IberLEF@ SEPLN","author":"Miranda-Escalada Antonio","year":"2020","unstructured":"Antonio Miranda-Escalada, Eul\u00e0lia Farr\u00e9, and Martin Krallinger. 2020. Named Entity Recognition, Concept Normalization and Clinical Coding: Overview of the Cantemist Track for Cancer Text Mining in Spanish, Corpus, Guidelines, Methods and Results. IberLEF@ SEPLN (2020), 303--323."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1078"},{"key":"e_1_3_2_1_42_1","volume-title":"When does label smoothing help? Advances in neural information processing systems","author":"M\u00fcller Rafael","year":"2019","unstructured":"Rafael M\u00fcller, Simon Kornblith, and Geoffrey E Hinton. 2019. When does label smoothing help? Advances in neural information processing systems, Vol. 32 (2019)."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-022-10174-9"},{"key":"e_1_3_2_1_44_1","volume-title":"International conference on machine learning. PMLR, 8748--8763","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PMLR, 8748--8763."},{"key":"e_1_3_2_1_45_1","volume-title":"Deep multimodal learning: A survey on recent advances and trends","author":"Ramachandram Dhanesh","year":"2017","unstructured":"Dhanesh Ramachandram and Graham W Taylor. 2017. Deep multimodal learning: A survey on recent advances and trends. IEEE signal processing magazine, Vol. 34, 6 (2017), 96--108."},{"key":"e_1_3_2_1_46_1","volume-title":"Self-supervised learning for videos: A survey. Comput. Surveys","author":"Schiappa Madeline C","year":"2022","unstructured":"Madeline C Schiappa, Yogesh S Rawat, and Mubarak Shah. 2022. Self-supervised learning for videos: A survey. Comput. Surveys (2022)."},{"key":"e_1_3_2_1_47_1","volume-title":"Video transformers: A survey","author":"Selva Javier","year":"2023","unstructured":"Javier Selva, Anders S Johansen, Sergio Escalera, Kamal Nasrollahi, Thomas B Moeslund, and Albert Clap\u00e9s. 2023. Video transformers: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence (2023)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1279"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240563"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i15.17633"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00377"},{"key":"e_1_3_2_1_52_1","volume-title":"Survey on Self-Supervised Multimodal Representation Learning and Foundation Models. arXiv preprint arXiv:2211.15837","author":"Thapa Sushil","year":"2022","unstructured":"Sushil Thapa. 2022. Survey on Self-Supervised Multimodal Representation Learning and Foundation Models. arXiv preprint arXiv:2211.15837 (2022)."},{"key":"e_1_3_2_1_53_1","volume-title":"Attention is all you need. Advances in neural information processing systems","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_1_54_1","volume-title":"YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. arXiv preprint arXiv:2207.02696","author":"Wang Chien-Yao","year":"2022","unstructured":"Chien-Yao Wang, Alexey Bochkovskiy, and Hong-Yuan Mark Liao. 2022a. YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. arXiv preprint arXiv:2207.02696 (2022)."},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.437"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR56361.2022.9956697"},{"key":"e_1_3_2_1_57_1","volume-title":"Visual chatgpt: Talking, drawing and editing with visual foundation models. arXiv preprint arXiv:2303.04671","author":"Wu Chenfei","year":"2023","unstructured":"Chenfei Wu, Shengming Yin, Weizhen Qi, Xiaodong Wang, Zecheng Tang, and Nan Duan. 2023. Visual chatgpt: Talking, drawing and editing with visual foundation models. arXiv preprint arXiv:2303.04671 (2023)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413650"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.306"},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17687"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11962"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3476968"},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICME51207.2021.9428274"},{"key":"e_1_3_2_1_64_1","volume-title":"Oisin Mac Aodha, and Timothy Hospedales","author":"Zong Yongshuo","year":"2023","unstructured":"Yongshuo Zong, Oisin Mac Aodha, and Timothy Hospedales. 2023. Self-Supervised Multimodal Learning: A Survey. arXiv preprint arXiv:2304.01008 (2023)."}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","location":"Ottawa ON Canada","acronym":"MM '23","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3611899","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3611899","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3611899","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:02:32Z","timestamp":1755820952000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3611899"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":64,"alternative-id":["10.1145\/3581783.3611899","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3611899","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}