{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:10:06Z","timestamp":1755821406973,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3612248","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:30Z","timestamp":1698391650000},"page":"5665-5673","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Expand BERT Representation with Visual Information via Grounded Language Learning with Multimodal Partial Alignment"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0931-460X","authenticated-orcid":false,"given":"Cong-Duy","family":"Nguyen","sequence":"first","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4480-5535","authenticated-orcid":false,"given":"The-Anh","family":"Vu-Le","sequence":"additional","affiliation":[{"name":"University of Illinois Urbana-Champaign Illinois, Urbana-Champaign, IL, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7447-7416","authenticated-orcid":false,"given":"Thong","family":"Nguyen","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0467-6254","authenticated-orcid":false,"given":"Tho","family":"Quan","sequence":"additional","affiliation":[{"name":"Ho Chi Minh City University of Technology (HCMUT), VNU-HCM, Ho Chi Minh city, Vietnam"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6062-207X","authenticated-orcid":false,"given":"Anh-Tuan","family":"Luu","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Turian","author":"Bisk Yonatan","year":"2020","unstructured":"Yonatan Bisk, Ari Holtzman, Jesse Thomason, Jacob Andreas, Yoshua Bengio, Joyce Chai, Mirella Lapata, Angeliki Lazaridou, Jonathan May, Aleksandr Nisnevich, Nicolas Pinto, and Joseph P. Turian. 2020. Experience Grounds Language. CoRR, Vol. abs\/2004.10151 (2020). arxiv: 2004.10151 https:\/\/arxiv.org\/abs\/2004.10151"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"crossref","unstructured":"Patrick Bordes \u00c9loi Zablocki Laure Soulier Benjamin Piwowarski and Patrick Gallinari. 2019. Incorporating Visual Semantics into Sentence Representations within a Grounded Space. In EMNLP.","DOI":"10.18653\/v1\/D19-1064"},{"key":"e_1_3_2_2_3_1","volume-title":"Language Models are Few-Shot Learners. CoRR","author":"Brown Tom B.","year":"2020","unstructured":"Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. CoRR, Vol. abs\/2005.14165 (2020). arxiv: 2005.14165 https:\/\/arxiv.org\/abs\/2005.14165"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S17-2001"},{"key":"e_1_3_2_2_5_1","volume-title":"Graph Optimal Transport for Cross-Domain Alignment. CoRR","author":"Chen Liqun","year":"2020","unstructured":"Liqun Chen, Zhe Gan, Yu Cheng, Linjie Li, Lawrence Carin, and Jingjing Liu. 2020. Graph Optimal Transport for Cross-Domain Alignment. CoRR , Vol. abs\/2006.14744 (2020). [arXiv]2006.14744 https:\/\/arxiv.org\/abs\/2006.14744"},{"key":"e_1_3_2_2_6_1","volume-title":"Improving sequence-to-sequence learning via optimal transport. arXiv preprint arXiv:1901.06283","author":"Chen Liqun","year":"2019","unstructured":"Liqun Chen, Yizhe Zhang, Ruiyi Zhang, Chenyang Tao, Zhe Gan, Haichao Zhang, Bai Li, Dinghan Shen, Changyou Chen, and Lawrence Carin. 2019b. Improving sequence-to-sequence learning via optimal transport. arXiv preprint arXiv:1901.06283 (2019)."},{"key":"e_1_3_2_2_7_1","volume-title":"Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu.","author":"Chen Yen-Chun","year":"2019","unstructured":"Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2019a. UNITER: Learning UNiversal Image-TExt Representations. CoRR, Vol. abs\/1909.11740 (2019). showeprint[arXiv]1909.11740 http:\/\/arxiv.org\/abs\/1909.11740"},{"key":"e_1_3_2_2_8_1","volume-title":"International Conference on Learning Representations.","author":"Clark Kevin","year":"2019","unstructured":"Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2019. ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.5555\/3298023.3298203"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1070"},{"key":"e_1_3_2_2_11_1","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers). 4171--4186."},{"key":"e_1_3_2_2_12_1","first-page":"05","volume-title":"Proceedings of the Third International Workshop on Paraphrasing (IWP2005)","author":"William","unstructured":"William B. Dolan and Chris Brockett. 2005. Automatically Constructing a Corpus of Sentential Paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005). https:\/\/www.aclweb.org\/anthology\/I05-5002"},{"key":"e_1_3_2_2_13_1","volume-title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. CoRR","author":"Dosovitskiy Alexey","year":"1929","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2020. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. CoRR, Vol. abs\/2010.11929 (2020). showeprint[arXiv]2010.11929 https:\/\/arxiv.org\/abs\/2010.11929"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1162"},{"key":"e_1_3_2_2_15_1","volume-title":"Matthew A. Kelly, and David Reitter.","author":"Alexander G.","year":"2018","unstructured":"Alexander G. Ororbia II, Ankur Arjun Mali, Matthew A. Kelly, and David Reitter. 2018. Visually Grounded, Situated Learning in Neural Models. CoRR, Vol. abs\/1805.11546 (2018). arxiv: 1805.11546 http:\/\/arxiv.org\/abs\/1805.11546"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1038"},{"key":"e_1_3_2_2_17_1","volume-title":"International Conference on Machine Learning. PMLR, 5583--5594","author":"Kim Wonjae","year":"2021","unstructured":"Wonjae Kim, Bokyung Son, and Ildoo Kim. 2021. Vilt: Vision-and-language transformer without convolution or region supervision. In International Conference on Machine Learning. PMLR, 5583--5594."},{"key":"e_1_3_2_2_18_1","volume-title":"Skip-Thought Vectors. In Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems 2015","author":"Kiros Ryan","year":"2015","unstructured":"Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. 2015. Skip-Thought Vectors. In Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems 2015, December 7-12, 2015, Montreal, Quebec, Canada. 3294--3302. http:\/\/papers.nips.cc\/paper\/5950-skip-thought-vectors"},{"key":"e_1_3_2_2_19_1","volume-title":"Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations. https:\/\/arxiv.org\/abs\/1602.07332","author":"Krishna Ranjay","year":"2016","unstructured":"Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, Michael Bernstein, and Li Fei-Fei. 2016. Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations. https:\/\/arxiv.org\/abs\/1602.07332"},{"key":"e_1_3_2_2_20_1","volume-title":"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. In International Conference on Learning Representations.","author":"Lan Zhenzhong","year":"2019","unstructured":"Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/N15-1016"},{"key":"e_1_3_2_2_22_1","volume-title":"International conference on machine learning. 1188--1196","author":"Le Quoc","year":"2014","unstructured":"Quoc Le and Tomas Mikolov. 2014. Distributed representations of sentences and documents. In International conference on machine learning. 1188--1196."},{"key":"e_1_3_2_2_23_1","volume-title":"Visualbert: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557","author":"Li Liunian Harold","year":"2019","unstructured":"Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. Visualbert: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)."},{"key":"e_1_3_2_2_24_1","volume-title":"Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks. arXiv preprint arXiv:2004.06165","author":"Li Xiujun","year":"2020","unstructured":"Xiujun Li, Xi Yin, Chunyuan Li, Xiaowei Hu, Pengchuan Zhang, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. 2020. Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks. arXiv preprint arXiv:2004.06165 (2020)."},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"e_1_3_2_2_26_1","volume-title":"RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_2_27_1","volume-title":"6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. https:\/\/openreview.net\/forum?id=rJvJXZb0W","author":"Logeswaran Lajanugen","year":"2018","unstructured":"Lajanugen Logeswaran and Honglak Lee. 2018. An efficient framework for learning sentence representations. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. https:\/\/openreview.net\/forum?id=rJvJXZb0W"},{"key":"e_1_3_2_2_28_1","unstructured":"Ilya Loshchilov and Frank Hutter. 2018. Fixing weight decay regularization in adam. (2018)."},{"key":"e_1_3_2_2_29_1","volume-title":"Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In Advances in Neural Information Processing Systems. 13--23.","author":"Lu Jiasen","year":"2019","unstructured":"Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In Advances in Neural Information Processing Systems. 13--23."},{"key":"e_1_3_2_2_30_1","unstructured":"Tomas Mikolov Ilya Sutskever Kai Chen Greg S Corrado and Jeff Dean. 2013. Distributed representations of words and phrases and their compositionality. In Advances in neural information processing systems. 3111--3119."},{"key":"e_1_3_2_2_31_1","volume-title":"Contrastive Learning for Neural Topic Model. CoRR","author":"Nguyen Thong","year":"2021","unstructured":"Thong Nguyen and Anh Tuan Luu. 2021. Contrastive Learning for Neural Topic Model. CoRR, Vol. abs\/2110.12764 (2021). showeprint[arXiv]2110.12764 https:\/\/arxiv.org\/abs\/2110.12764"},{"key":"e_1_3_2_2_32_1","volume-title":"Improving Neural Cross-Lingual Summarization via Employing Optimal Transport Distance for Knowledge Distillation. CoRR","author":"Nguyen Thong","year":"2021","unstructured":"Thong Nguyen and Luu Anh Tuan. 2021. Improving Neural Cross-Lingual Summarization via Employing Optimal Transport Distance for Knowledge Distillation. CoRR, Vol. abs\/2112.03473 (2021). showeprint[arXiv]2112.03473 https:\/\/arxiv.org\/abs\/2112.03473"},{"key":"e_1_3_2_2_33_1","volume-title":"Cong-Duy Nguyen, Zhen Hai, and Lidong Bing.","author":"Nguyen Thong","year":"2023","unstructured":"Thong Nguyen, Xiaobao Wu, Xinshuai Dong, Anh Tuan Luu, Cong-Duy Nguyen, Zhen Hai, and Lidong Bing. 2023. Gradient-Boosted Decision Tree for Listwise Context Model in Multimodal Review Helpfulness Prediction. arXiv preprint arXiv:2305.12678 (2023)."},{"key":"e_1_3_2_2_34_1","volume-title":"Adaptive Contrastive Learning on Multimodal Transformer for Review Helpfulness Predictions. arXiv preprint arXiv:2211.03524","author":"Nguyen Thong","year":"2022","unstructured":"Thong Nguyen, Xiaobao Wu, Anh-Tuan Luu, Cong-Duy Nguyen, Zhen Hai, and Lidong Bing. 2022. Adaptive Contrastive Learning on Multimodal Transformer for Review Helpfulness Predictions. arXiv preprint arXiv:2211.03524 (2022)."},{"volume-title":"How Children Learn Language","author":"O'Grady William","key":"e_1_3_2_2_35_1","unstructured":"William O'Grady. 2005. How Children Learn Language. Cambridge University Press. https:\/\/www.cambridge.org\/core\/books\/how-children-learn-language\/04C336554C93315A5F78F4E03777A4E6"},{"key":"e_1_3_2_2_36_1","volume-title":"Improved Text Classification via Contrastive Adversarial Training. arXiv preprint arXiv:2107.10137","author":"Pan Lin","year":"2021","unstructured":"Lin Pan, Chung-Wei Hang, Avirup Sil, Saloni Potdar, and Mo Yu. 2021a. Improved Text Classification via Contrastive Adversarial Training. arXiv preprint arXiv:2107.10137 (2021)."},{"key":"e_1_3_2_2_37_1","volume-title":"Contrastive learning for many-to-many multilingual neural machine translation. arXiv preprint arXiv:2105.09501","author":"Pan Xiao","year":"2021","unstructured":"Xiao Pan, Mingxuan Wang, Liwei Wu, and Lei Li. 2021b. Contrastive learning for many-to-many multilingual neural machine translation. arXiv preprint arXiv:2105.09501 (2021)."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-2124"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1264"},{"key":"e_1_3_2_2_42_1","volume-title":"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks. CoRR","author":"Reimers Nils","year":"2019","unstructured":"Nils Reimers and Iryna Gurevych. 2019. Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks. CoRR, Vol. abs\/1908.10084 (2019). arxiv: 1908.10084 http:\/\/arxiv.org\/abs\/1908.10084"},{"key":"e_1_3_2_2_43_1","volume-title":"Love","author":"Roads Brett D.","year":"2019","unstructured":"Brett D. Roads and Bradley C. Love. 2019. Learning as the Unsupervised Alignment of Conceptual Systems. CoRR, Vol. abs\/1906.09012 (2019). arxiv: 1906.09012 http:\/\/arxiv.org\/abs\/1906.09012"},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1017\/S0142716400000643"},{"key":"e_1_3_2_2_45_1","volume-title":"Contrastive Visual-Linguistic Pretraining. CoRR","author":"Shi Lei","year":"2020","unstructured":"Lei Shi, Kai Shuang, Shijie Geng, Peng Su, Zhengkai Jiang, Peng Gao, Zuohui Fu, Gerard de Melo, and Sen Su. 2020. Contrastive Visual-Linguistic Pretraining. CoRR, Vol. abs\/2007.13135 (2020). [arXiv]2007.13135 https:\/\/arxiv.org\/abs\/2007.13135"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D13-1170"},{"key":"e_1_3_2_2_47_1","volume-title":"Vl-bert: Pre-training of generic visual-linguistic representations. In ICLR.","author":"Su Weijie","year":"2020","unstructured":"Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. 2020. Vl-bert: Pre-training of generic visual-linguistic representations. In ICLR."},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1514"},{"key":"e_1_3_2_2_49_1","volume-title":"Vokenization: Improving Language Understanding with Contextualized, Visual-Grounded Supervision. CoRR","author":"Tan Hao","year":"2020","unstructured":"Hao Tan and Mohit Bansal. 2020. Vokenization: Improving Language Understanding with Contextualized, Visual-Grounded Supervision. CoRR, Vol. abs\/2010.06775 (2020). arxiv: 2010.06775 https:\/\/arxiv.org\/abs\/2010.06775"},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"crossref","unstructured":"Gabriella Vigliocco Pamela Perniss and David Vinson. 2014. Language as a multimodal phenomenon: implications for language learning processing and evolution. https:\/\/www.ncbi.nlm.nih.gov\/pmc\/articles\/PMC4123671\/","DOI":"10.1098\/rstb.2013.0292"},{"key":"e_1_3_2_2_51_1","volume-title":"Bowman","author":"Wang Alex","year":"2018","unstructured":"Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2018. GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding. CoRR, Vol. abs\/1804.07461 (2018). arxiv: 1804.07461 http:\/\/arxiv.org\/abs\/1804.07461"},{"key":"e_1_3_2_2_52_1","volume-title":"Bowman","author":"Warstadt Alex","year":"2018","unstructured":"Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. 2018. Neural Network Acceptability Judgments. CoRR, Vol. abs\/1805.12471 (2018). arxiv: 1805.12471 http:\/\/arxiv.org\/abs\/1805.12471"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1101"},{"key":"e_1_3_2_2_54_1","volume-title":"Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information processing systems. 5754--5764.","author":"Yang Zhilin","year":"2019","unstructured":"Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information processing systems. 5754--5764."},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"crossref","unstructured":"Luowei Zhou Hamid Palangi Lei Zhang Houdong Hu Jason J Corso and Jianfeng Gao. 2020. Unified vision-language pre-training for image captioning and vqa. In AAAI","DOI":"10.1609\/aaai.v34i07.7005"}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Ottawa ON Canada","acronym":"MM '23"},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612248","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3612248","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T23:57:32Z","timestamp":1755820652000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612248"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":55,"alternative-id":["10.1145\/3581783.3612248","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3612248","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}