{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T14:38:55Z","timestamp":1774449535827,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":56,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T00:00:00Z","timestamp":1745280000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Ministry of Education, Singapore, under the Academic Research Fund Tier 2 (FY2025)","award":["MOE-T2EP20124-0009"],"award-info":[{"award-number":["MOE-T2EP20124-0009"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,4,22]]},"DOI":"10.1145\/3696410.3714818","type":"proceedings-article","created":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T22:52:18Z","timestamp":1745362338000},"page":"1759-1770","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":7,"title":["UniGraph2: Learning a Unified Embedding Space to Bind Multimodal Graphs"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8918-6734","authenticated-orcid":false,"given":"Yufei","family":"He","sequence":"first","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8559-831X","authenticated-orcid":false,"given":"Yuan","family":"Sui","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8281-8070","authenticated-orcid":false,"given":"Xiaoxin","family":"He","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9894-0062","authenticated-orcid":false,"given":"Yue","family":"Liu","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6814-5527","authenticated-orcid":false,"given":"Yifei","family":"Sun","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5645-1754","authenticated-orcid":false,"given":"Bryan","family":"Hooi","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]}],"member":"320","published-online":{"date-parts":[[2025,4,22]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_2_1","volume-title":"Multimodal machine learning: A survey and taxonomy","author":"Baltru\u0161aitis Tadas","year":"2018","unstructured":"Tadas Baltru\u0161aitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence, Vol. 41, 2 (2018), 423--443."},{"key":"e_1_3_2_1_3_1","volume-title":"BEiT: BERT Pre-Training of Image Transformers. In International Conference on Learning Representations.","author":"Bao Hangbo","year":"2021","unstructured":"Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. 2021. BEiT: BERT Pre-Training of Image Transformers. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/1052934.1052938"},{"key":"e_1_3_2_1_5_1","volume-title":"A suite of generative tasks for multi-level multimodal webpage understanding. arXiv preprint arXiv:2305.03668","author":"Burns Andrea","year":"2023","unstructured":"Andrea Burns, Krishna Srinivasan, Joshua Ainslie, Geoff Brown, Bryan A Plummer, Kate Saenko, Jianmo Ni, and Mandy Guo. 2023. A suite of generative tasks for multi-level multimodal webpage understanding. arXiv preprint arXiv:2305.03668 (2023)."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531992"},{"key":"e_1_3_2_1_7_1","volume-title":"International Conference on Learning Representations.","author":"Chien Eli","year":"2022","unstructured":"Eli Chien, Wei-Cheng Chang, Cho-Jui Hsieh, Hsiang-Fu Yu, Jiong Zhang, Olgica Milenkovic, and Inderjit S Dhillon. 2022. Node Feature Extraction by Self-Supervised Multi-scale Neighborhood Prediction. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_8_1","volume-title":"International Conference on Learning Representations.","author":"Dosovitskiy Alexey","year":"2020","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00624-6"},{"key":"e_1_3_2_1_10_1","volume-title":"Towards foundation models for knowledge graph reasoning. ICLR","author":"Galkin Mikhail","year":"2024","unstructured":"Mikhail Galkin, Xinyu Yuan, Hesham Mostafa, Jian Tang, and Zhaocheng Zhu. 2024. Towards foundation models for knowledge graph reasoning. ICLR (2024)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3488560.3501396"},{"key":"e_1_3_2_1_12_1","volume-title":"International Conference on Learning Representations","author":"Gasteiger Johannes","year":"2018","unstructured":"Johannes Gasteiger, Aleksandar Bojchevski, and Stephan G\u00fcnnemann. 2018. Predict then propagate: Graph neural networks meet personalized pagerank. International Conference on Learning Representations (2018)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"e_1_3_2_1_15_1","volume-title":"Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. In The Twelfth International Conference on Learning Representations.","author":"He Xiaoxin","year":"2024","unstructured":"Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. 2024a. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_16_1","volume-title":"Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. International Conference on Learning Representations","author":"He Xiaoxin","year":"2024","unstructured":"Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. 2024b. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. International Conference on Learning Representations (2024)."},{"key":"e_1_3_2_1_17_1","volume-title":"2024 e. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630","author":"He Xiaoxin","year":"2024","unstructured":"Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024 e. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630 (2024)."},{"key":"e_1_3_2_1_18_1","volume-title":"Generalizing Graph Transformers Across Diverse Graphs and Tasks via Pre-Training on Industrial-Scale Data. arXiv preprint arXiv:2407.03953","author":"He Yufei","year":"2024","unstructured":"Yufei He, Zhenyu Hou, Yukuo Cen, Feng He, Xu Cheng, and Bryan Hooi. 2024c. Generalizing Graph Transformers Across Diverse Graphs and Tasks via Pre-Training on Industrial-Scale Data. arXiv preprint arXiv:2407.03953 (2024)."},{"key":"e_1_3_2_1_19_1","unstructured":"Yufei He Yuan Sui Xiaoxin He and Bryan Hooi. 2024 d. UniGraph: Learning a Unified Cross-Domain Foundation Model for Text-Attributed Graphs. arxiv: 2402.13630 [cs.LG] https:\/\/arxiv.org\/abs\/2402.13630"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3543507.3583379"},{"key":"e_1_3_2_1_21_1","volume-title":"GraphAlign: Pretraining One Graph Neural Network on Multiple Graphs via Feature Alignment. arXiv preprint arXiv:2406.02953","author":"Hou Zhenyu","year":"2024","unstructured":"Zhenyu Hou, Haozhan Li, Yukuo Cen, Jie Tang, and Yuxiao Dong. 2024. GraphAlign: Pretraining One Graph Neural Network on Multiple Graphs via Feature Alignment. arXiv preprint arXiv:2406.02953 (2024)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539321"},{"key":"e_1_3_2_1_23_1","volume-title":"Open graph benchmark: Datasets for machine learning on graphs. Advances in neural information processing systems","author":"Hu Weihua","year":"2020","unstructured":"Weihua Hu, Matthias Fey, Marinka Zitnik, Yuxiao Dong, Hongyu Ren, Bowen Liu, Michele Catasta, and Jure Leskovec. 2020. Open graph benchmark: Datasets for machine learning on graphs. Advances in neural information processing systems, Vol. 33 (2020), 22118--22133."},{"key":"e_1_3_2_1_24_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Huang Qian","year":"2024","unstructured":"Qian Huang, Hongyu Ren, Peng Chen, Gregor Kr\u017emanc, Daniel Zeng, Percy S Liang, and Jure Leskovec. 2024. Prodigy: Enabling in-context learning over graphs. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_25_1","volume-title":"Perceiver IO: A General Architecture for Structured Inputs & Outputs. In International Conference on Learning Representations.","author":"Jaegle Andrew","unstructured":"Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. [n.,d.]. Perceiver IO: A General Architecture for Structured Inputs & Outputs. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_26_1","volume-title":"International conference on machine learning. PMLR, 4904--4916","author":"Jia Chao","year":"2021","unstructured":"Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning. PMLR, 4904--4916."},{"key":"e_1_3_2_1_27_1","volume-title":"Learning Multimodal Graph-to-Graph Translation for Molecule Optimization. In International Conference on Learning Representations.","author":"Jin Wengong","year":"2019","unstructured":"Wengong Jin, Kevin Yang, Regina Barzilay, and Tommi Jaakkola. 2019. Learning Multimodal Graph-to-Graph Translation for Molecule Optimization. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_28_1","volume-title":"Proceedings of NAACL-HLT. 4171--4186","author":"Ming-Wei Chang Jacob Devlin","year":"2019","unstructured":"Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of NAACL-HLT. 4171--4186."},{"key":"e_1_3_2_1_29_1","volume-title":"One for all: Towards training one graph model for all classification tasks. ICLR","author":"Liu Hao","year":"2024","unstructured":"Hao Liu, Jiarui Feng, Lecheng Kong, Ningyue Liang, Dacheng Tao, Yixin Chen, and Muhan Zhang. 2024a. One for all: Towards training one graph model for all classification tasks. ICLR (2024)."},{"key":"e_1_3_2_1_30_1","volume-title":"One For All: Towards Training One Graph Model For All Classification Tasks. In The Twelfth International Conference on Learning Representations.","author":"Liu Hao","year":"2024","unstructured":"Hao Liu, Jiarui Feng, Lecheng Kong, Ningyue Liang, Dacheng Tao, Yixin Chen, and Muhan Zhang. 2024b. One For All: Towards Training One Graph Model For All Classification Tasks. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/2835776.2835823"},{"key":"e_1_3_2_1_32_1","volume-title":"Wiki-cs: A wikipedia-based benchmark for graph neural networks. arXiv preprint arXiv:2007.02901","author":"Mernyei P\u00e9ter","year":"2020","unstructured":"P\u00e9ter Mernyei and C\u0103t\u0103lina Cangea. 2020. Wiki-cs: A wikipedia-based benchmark for graph neural networks. arXiv preprint arXiv:2007.02901 (2020)."},{"key":"e_1_3_2_1_33_1","volume-title":"MTEB: Massive text embedding benchmark. arXiv preprint arXiv:2210.07316","author":"Muennighoff Niklas","year":"2022","unstructured":"Niklas Muennighoff, Nouamane Tazi, Lo\u00efc Magne, and Nils Reimers. 2022. MTEB: Massive text embedding benchmark. arXiv preprint arXiv:2210.07316 (2022)."},{"key":"e_1_3_2_1_34_1","volume-title":"Proceedings of the 28th international conference on machine learning (ICML-11)","author":"Ngiam Jiquan","year":"2011","unstructured":"Jiquan Ngiam, Aditya Khosla, Mingyu Kim, Juhan Nam, Honglak Lee, and Andrew Y Ng. 2011. Multimodal deep learning. In Proceedings of the 28th international conference on machine learning (ICML-11). 689--696."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403168"},{"key":"e_1_3_2_1_36_1","unstructured":"Alec Radford. 2018. Improving language understanding by generative pre-training. (2018)."},{"key":"e_1_3_2_1_37_1","volume-title":"International conference on machine learning. PMLR, 8748--8763","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PMLR, 8748--8763."},{"key":"e_1_3_2_1_38_1","volume-title":"E-commerce recommendation applications. Data mining and knowledge discovery","author":"Schafer J Ben","year":"2001","unstructured":"J Ben Schafer, Joseph A Konstan, and John Riedl. 2001. E-commerce recommendation applications. Data mining and knowledge discovery, Vol. 5 (2001), 115--153."},{"key":"e_1_3_2_1_39_1","volume-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538","author":"Shazeer Noam","year":"2017","unstructured":"Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. 2017. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538 (2017)."},{"key":"e_1_3_2_1_40_1","volume-title":"FiDeLiS: Faithful Reasoning in Large Language Model for Knowledge Graph Question Answering. arXiv preprint arXiv:2405.13873","author":"Sui Yuan","year":"2024","unstructured":"Yuan Sui, Yufei He, Nian Liu, Xiaoxin He, Kun Wang, and Bryan Hooi. 2024. FiDeLiS: Faithful Reasoning in Large Language Model for Knowledge Graph Question Answering. arXiv preprint arXiv:2405.13873 (2024)."},{"key":"e_1_3_2_1_41_1","volume-title":"ICLR 2021 Workshop on Geometrical and Topological Representation Learning.","author":"Thakoor Shantanu","year":"2021","unstructured":"Shantanu Thakoor, Corentin Tallec, Mohammad Gheshlaghi Azar, R\u00e9mi Munos, Petar Veli\u010dkovi\u0107, and Michal Valko. 2021. Bootstrapped representation learning on graphs. In ICLR 2021 Workshop on Geometrical and Topological Representation Learning."},{"key":"e_1_3_2_1_42_1","volume-title":"Attention is all you need. Advances in Neural Information Processing Systems","author":"Vaswani A","year":"2017","unstructured":"A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017)."},{"key":"e_1_3_2_1_43_1","volume-title":"Graph Attention Networks. In International Conference on Learning Representations.","author":"Veli\u010dkovi\u0107 Petar","year":"2018","unstructured":"Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Li\u00f2, and Yoshua Bengio. 2018. Graph Attention Networks. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_44_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Wang Heng","year":"2024","unstructured":"Heng Wang, Shangbin Feng, Tianxing He, Zhaoxuan Tan, Xiaochuang Han, and Yulia Tsvetkov. 2024a. Can language models solve graph problems in natural language? Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_45_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Wang Haotao","year":"2024","unstructured":"Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Kompella, Zhangyang Wang, et al. 2024b. Graph mixture of experts: Learning on large-scale graphs with explicit diversity modeling. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102883"},{"key":"e_1_3_2_1_47_1","volume-title":"One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172","author":"Wang Peng","year":"2023","unstructured":"Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. 2023b. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172 (2023)."},{"key":"e_1_3_2_1_48_1","volume-title":"Graphmetro: Mitigating complex distribution shifts in gnns via mixture of aligned experts. arXiv preprint arXiv:2312.04693","author":"Wu Shirley","year":"2023","unstructured":"Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, and Jure Leskovec. 2023. Graphmetro: Mitigating complex distribution shifts in gnns via mixture of aligned experts. arXiv preprint arXiv:2312.04693 (2023)."},{"key":"e_1_3_2_1_49_1","volume-title":"Mole-BERT: Rethinking Pre-training Graph Neural Networks for Molecules. In The Eleventh International Conference on Learning Representations.","author":"Xia Jun","year":"2023","unstructured":"Jun Xia, Chengshuai Zhao, Bozhen Hu, Zhangyang Gao, Cheng Tan, Yue Liu, Siyuan Li, and Stan Z Li. 2023. Mole-BERT: Rethinking Pre-training Graph Neural Networks for Molecules. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_50_1","volume-title":"Bryan Hooi, and Ruslan Salakhutdinov.","author":"Yoon Minji","year":"2023","unstructured":"Minji Yoon, Jing Yu Koh, Bryan Hooi, and Ruslan Salakhutdinov. 2023. Multimodal graph learning for generative tasks. arXiv preprint arXiv:2310.07478 (2023)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25445"},{"key":"e_1_3_2_1_52_1","volume-title":"Xi Victoria Lin, et al","author":"Zhang Susan","year":"2022","unstructured":"Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)."},{"key":"e_1_3_2_1_53_1","volume-title":"Can LLM Graph Reasoning Generalize beyond Pattern Memorization? arXiv preprint arXiv:2406.15992","author":"Zhang Yizhuo","year":"2024","unstructured":"Yizhuo Zhang, Heng Wang, Shangbin Feng, Zhaoxuan Tan, Xiaochuang Han, Tianxing He, and Yulia Tsvetkov. 2024. Can LLM Graph Reasoning Generalize beyond Pattern Memorization? arXiv preprint arXiv:2406.15992 (2024)."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671913"},{"key":"e_1_3_2_1_55_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Zhao Jianan","year":"2022","unstructured":"Jianan Zhao, Meng Qu, Chaozhuo Li, Hao Yan, Qian Liu, Rui Li, Xing Xie, and Jian Tang. 2022. Learning on Large-scale Text-attributed Graphs via Variational Inference. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_56_1","volume-title":"Multimodal Graph Benchmark. arXiv preprint arXiv:2406.16321","author":"Zhu Jing","year":"2024","unstructured":"Jing Zhu, Yuhang Zhou, Shengyi Qian, Zhongmou He, Tong Zhao, Neil Shah, and Danai Koutra. 2024. Multimodal Graph Benchmark. arXiv preprint arXiv:2406.16321 (2024)."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714818","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3696410.3714818","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:42Z","timestamp":1750295922000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714818"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,22]]},"references-count":56,"alternative-id":["10.1145\/3696410.3714818","10.1145\/3696410"],"URL":"https:\/\/doi.org\/10.1145\/3696410.3714818","relation":{},"subject":[],"published":{"date-parts":[[2025,4,22]]},"assertion":[{"value":"2025-04-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}