{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T09:10:36Z","timestamp":1765357836437,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":65,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,4,22]],"date-time":"2025-04-22T00:00:00Z","timestamp":1745280000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,4,22]]},"DOI":"10.1145\/3696410.3714920","type":"proceedings-article","created":{"date-parts":[[2025,5,5]],"date-time":"2025-05-05T16:42:02Z","timestamp":1746463322000},"page":"2450-2463","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["2D-TPE: Two-Dimensional Positional Encoding Enhances Table Understanding for Large Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-3529-7869","authenticated-orcid":false,"given":"Jia-Nan","family":"Li","sequence":"first","affiliation":[{"name":"Renmin University of China, Beijing, China and Ant Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3597-0176","authenticated-orcid":false,"given":"Jian","family":"Guan","sequence":"additional","affiliation":[{"name":"Ant Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6079-7697","authenticated-orcid":false,"given":"Wei","family":"Wu","sequence":"additional","affiliation":[{"name":"Ant Group, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8952-8984","authenticated-orcid":false,"given":"Zhengtao","family":"Yu","sequence":"additional","affiliation":[{"name":"Kunming University of Science and Technology, Kunming, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3356-6823","authenticated-orcid":false,"given":"Rui","family":"Yan","sequence":"additional","affiliation":[{"name":"Renmin University of China, Beijing, China and Wuhan University, Wuhan, China"}]}],"member":"320","published-online":{"date-parts":[[2025,4,22]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_3_1","unstructured":"Rohan Anil Andrew M Dai Orhan Firat Melvin Johnson Dmitry Lepikhin Alexandre Passos Siamak Shakeri Emanuel Taropa Paige Bailey Zhifeng Chen et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403 (2023)."},{"key":"e_1_3_2_1_4_1","volume-title":"Levine (Eds.)","volume":"36","author":"Chen Pei","year":"2023","unstructured":"Pei Chen, Soumajyoti Sarkar, Leonard Lausen, Balasubramaniam Srinivasan, Sheng Zha, Ruihong Huang, and George Karypis. 2023. HyTrel: Hypergraph-enhanced Tabular Data Representation Learning. In Advances in Neural Information Processing Systems, A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, and S. Levine (Eds.), Vol. 36. Curran Associates, Inc., 32173--32193. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2023\/file\/66178beae8f12fcd48699de95acc1152-Paper-Conference.pdf"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-eacl.83"},{"volume-title":"TabFact: A Large-scale Dataset for Table-based Fact Verification. In International Conference on Learning Representations.","author":"Chen Wenhu","key":"e_1_3_2_1_6_1","unstructured":"Wenhu Chen, HongminWang, Jianshu Chen, Yunkai Zhang, HongWang, Shiyang Li, Xiyou Zhou, and William Yang Wang. [n. d.]. TabFact: A Large-scale Dataset for Table-based Fact Verification. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01138"},{"key":"e_1_3_2_1_8_1","volume-title":"From the Least to the Most: Building a Plug-and-Play Visual Reasoner via Data Synthesis. arXiv preprint arXiv:2406.19934","author":"Cheng Chuanqi","year":"2024","unstructured":"Chuanqi Cheng, Jian Guan, Wei Wu, and Rui Yan. 2024. From the Least to the Most: Building a Plug-and-Play Visual Reasoner via Data Synthesis. arXiv preprint arXiv:2406.19934 (2024)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.78"},{"volume-title":"Binding Language Models in Symbolic Languages. In The Eleventh International Conference on Learning Representations.","author":"Cheng Zhoujun","key":"e_1_3_2_1_10_1","unstructured":"Zhoujun Cheng, Tianbao Xie, Peng Shi, Chengzu Li, Rahul Nadkarni, Yushi Hu, Caiming Xiong, Dragomir Radev, Mari Ostendorf, Luke Zettlemoyer, et al. [n.d.]. Binding Language Models in Symbolic Languages. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_11_1","volume-title":"The Twelfth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=mZn2Xyh9Ec","author":"Dao Tri","year":"2024","unstructured":"Tri Dao. 2024. FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning. In The Twelfth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=mZn2Xyh9Ec"},{"key":"e_1_3_2_1_12_1","unstructured":"DeepSpeed. 2023. DeepSpeed Flops Profiler. https:\/\/www.deepspeed.ai\/tutorials\/flops-profiler\/"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3542700.3542709"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_15_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_1_16_1","unstructured":"J. Dugundji. 1989. Topology. Wm. C. Brown. https:\/\/books.google.com.hk\/books?id=aXnMPQAACAAJ"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","unstructured":"Julian Eisenschlos Maharshi Gor Thomas M\u00fcller and William Cohen. 2021. MATE: Multi-view Attention for Table Transformer Efficiency. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing Marie-Francine Moens Xuanjing Huang Lucia Specia and Scott Wen-tau Yih (Eds.). Association for Computational Linguistics Online and Punta Cana Dominican Republic 7606--7619. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.600","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_18_1","volume-title":"Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural networks 107","author":"Elfwing Stefan","year":"2018","unstructured":"Stefan Elfwing, Eiji Uchibe, and Kenji Doya. 2018. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural networks 107 (2018), 3--11."},{"key":"e_1_3_2_1_19_1","volume-title":"AMOR: A Recipe for Building Adaptable Modular Knowledge Agents Through Process Feedback. arXiv preprint arXiv:2402.01469","author":"Guan Jian","year":"2024","unstructured":"Jian Guan, Wei Wu, Zujie Wen, Peng Xu, Hongning Wang, and Minlie Huang. 2024. AMOR: A Recipe for Building Adaptable Modular Knowledge Agents Through Process Feedback. arXiv preprint arXiv:2402.01469 (2024)."},{"key":"e_1_3_2_1_20_1","volume-title":"Rotary position embedding for vision transformer. arXiv preprint arXiv:2403.13298","author":"Heo Byeongho","year":"2024","unstructured":"Byeongho Heo, Song Park, Dongyoon Han, and Sangdoo Yun. 2024. Rotary position embedding for vision transformer. arXiv preprint arXiv:2403.13298 (2024)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_22_1","volume-title":"\u00dcber die stetige Abbildung einer Linie auf ein Fl\u00e4chenst\u00fcck. Dritter Band: Analysis\u00b7 Grundlagen der Mathematik\u00b7 Physik Verschiedenes: Nebst Einer Lebensgeschichte","author":"Hilbert David","year":"1935","unstructured":"David Hilbert and David Hilbert. 1935. \u00dcber die stetige Abbildung einer Linie auf ein Fl\u00e4chenst\u00fcck. Dritter Band: Analysis\u00b7 Grundlagen der Mathematik\u00b7 Physik Verschiedenes: Nebst Einer Lebensgeschichte (1935), 1--2."},{"key":"e_1_3_2_1_23_1","volume-title":"Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395","author":"Hu Shengding","year":"2024","unstructured":"Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang,Weilin Zhao, et al. 2024. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395 (2024)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_25_1","volume-title":"Adaptive mixtures of local experts. Neural computation 3, 1","author":"Jacobs Robert A","year":"1991","unstructured":"Robert A Jacobs, Michael I Jordan, Steven J Nowlan, and Geoffrey E Hinton. 1991. Adaptive mixtures of local experts. Neural computation 3, 1 (1991), 79--87."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_27_1","volume-title":"Dongmei Zhang, and Surajit Chaudhuri.","author":"Li Peng","year":"2023","unstructured":"Peng Li, Yeye He, Dror Yashar, Weiwei Cui, Song Ge, Haidong Zhang, Danielle Rifinski Fainman, Dongmei Zhang, and Surajit Chaudhuri. 2023. Tablegpt: Table-tuned gpt for diverse table tasks. arXiv preprint arXiv:2310.09263 (2023)."},{"key":"e_1_3_2_1_28_1","volume-title":"Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74--81.","author":"Lin Chin-Yew","year":"2004","unstructured":"Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74--81."},{"key":"e_1_3_2_1_29_1","volume-title":"International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=O50443AsCP","author":"Liu Qian","year":"2022","unstructured":"Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, and Jian-Guang Lou. 2022. TAPEX: Table Pre-training via Learning a Neural SQL Executor. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=O50443AsCP"},{"volume-title":"The Twelfth International Conference on Learning Representations.","author":"Liu Xiao","key":"e_1_3_2_1_30_1","unstructured":"Xiao Liu, Hao Yu, Hanchen Zhang, Yifan Xu, Xuanyu Lei, Hanyu Lai, Yu Gu, Hangliang Ding, Kaiwen Men, Kejuan Yang, et al. [n. d.]. AgentBench: Evaluating LLMs as Agents. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02497"},{"volume-title":"FiT: Flexible Vision Transformer for Diffusion Model. In Forty-first International Conference on Machine Learning.","author":"Lu Zeyu","key":"e_1_3_2_1_32_1","unstructured":"Zeyu Lu, ZiDong Wang, Di Huang, Chengyue Wu, Xihui Liu, Wanli Ouyang, and LEI BAI. [n. d.]. FiT: Flexible Vision Transformer for Diffusion Model. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00446"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_35_1","volume-title":"Proceedings of the 40th annual meeting of the Association for Computational Linguistics. 311--318","author":"Papineni Kishore","year":"2002","unstructured":"Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics. 311--318."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.3115\/v1"},{"key":"e_1_3_2_1_38_1","volume-title":"Evaluation: from precision, recall and F-measure to ROC, informedness, markedness and correlation. arXiv preprint arXiv:2010.16061","author":"Powers David MW","year":"2020","unstructured":"David MW Powers. 2020. Evaluation: from precision, recall and F-measure to ROC, informedness, markedness and correlation. arXiv preprint arXiv:2010.16061 (2020)."},{"key":"e_1_3_2_1_39_1","volume-title":"Test Long: Attention with Linear Biases Enables Input Length Extrapolation. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=R8sQPpGCv0","author":"Press Ofir","year":"2022","unstructured":"Ofir Press, Noah Smith, and Mike Lewis. 2022. Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=R8sQPpGCv0"},{"key":"e_1_3_2_1_40_1","unstructured":"pytorch. 2023. PyTorch Profiler. https:\/\/pytorch.org\/tutorials\/recipes\/recipes\/profiler_recipe.html"},{"key":"e_1_3_2_1_41_1","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans Ilya Sutskever et al. 2018. Improving language understanding by generative pre-training. (2018)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127063"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3616855.3635752"},{"volume-title":"ICLR 2024Workshop on Large Language Model (LLM) Agents.","author":"Tang Xiangru","key":"e_1_3_2_1_45_1","unstructured":"Xiangru Tang, Anni Zou, Zhuosheng Zhang, Ziming Li, Yilun Zhao, Xingyao Zhang, Arman Cohan, and Mark Gerstein. [n. d.]. MedAgents: Large Language Models as Collaborators for Zero-shot Medical Reasoning. In ICLR 2024Workshop on Large Language Model (LLM) Agents."},{"key":"e_1_3_2_1_46_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3511972"},{"key":"e_1_3_2_1_48_1","volume-title":"Attention is all you need. Advances in neural information processing systems 30","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_2_1_49_1","volume-title":"Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution. arXiv preprint arXiv:2409.12191","author":"Wang Peng","year":"2024","unstructured":"Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. 2024. Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution. arXiv preprint arXiv:2409.12191 (2024)."},{"key":"e_1_3_2_1_50_1","volume-title":"Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 1780--1790","author":"Dong Haoyu","year":"2021","unstructured":"ZhiruoWang, Haoyu Dong, Ran Jia, Jia Li, Zhiyi Fu, Shi Han, and Dongmei Zhang. 2021. Tuta: Tree-based transformers for generally structured table pre-training. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 1780--1790."},{"key":"e_1_3_2_1_51_1","volume-title":"Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding. In The Twelfth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=4L0xnS4GQM","author":"Wang Zilong","year":"2024","unstructured":"Zilong Wang, Hao Zhang, Chun-Liang Li, Julian Martin Eisenschlos, Vincent Perot, Zifeng Wang, Lesly Miculicich, Yasuhisa Fujii, Jingbo Shang, Chen-Yu Lee, and Tomas Pfister. 2024. Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding. In The Twelfth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=4L0xnS4GQM"},{"key":"e_1_3_2_1_52_1","volume-title":"Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. 2022. Emergent Abilities of Large Language Models. Transactions on Machine Learning Research (2022). https:\/\/openreview.net\/forum?id=yzkSU5zdwD Survey Certification."},{"key":"e_1_3_2_1_53_1","volume-title":"Denny Zhou, et al.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824--24837."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645574"},{"key":"e_1_3_2_1_55_1","volume-title":"ProTrix: Building Models for Planning and Reasoning over Tables with Sentence Context. arXiv preprint arXiv:2403.02177","author":"Wu Zirui","year":"2024","unstructured":"Zirui Wu and Yansong Feng. 2024. ProTrix: Building Models for Planning and Reasoning over Tables with Sentence Context. arXiv preprint arXiv:2403.02177 (2024)."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1145\/3539618.3591708"},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.335"},{"key":"e_1_3_2_1_60_1","volume-title":"et al","author":"Zhang Wentao","year":"2024","unstructured":"Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al . 2024. FinAgent: A Multi-modal Foundation Agent for Financial Trading: Tool-Augmented, Diversified, and Generalist. arXiv preprint arXiv:2402.18485 (2024)."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_62_1","volume-title":"et al","author":"Zhang Xiaokang","year":"2024","unstructured":"Xiaokang Zhang, Jing Zhang, Zeyao Ma, Yang Li, Bohan Zhang, Guanlin Li, Zijun Yao, Kangli Xu, Jinchang Zhou, Daniel Zhang-Li, et al . 2024. TableLLM: Enabling Tabular Data Manipulation by LLMs in Real Office Usage Scenarios. arXiv preprint arXiv:2403.19318 (2024)."},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.14778\/3659437.3659452"},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_65_1","volume-title":"Seq2sql: Generating structured queries from natural language using reinforcement learning. arXiv preprint arXiv:1709.00103","author":"Zhong Victor","year":"2017","unstructured":"Victor Zhong, Caiming Xiong, and Richard Socher. 2017. Seq2sql: Generating structured queries from natural language using reinforcement learning. arXiv preprint arXiv:1709.00103 (2017)."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Sydney NSW Australia","acronym":"WWW '25"},"container-title":["Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714920","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3696410.3714920","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:54Z","timestamp":1750295934000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3696410.3714920"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,22]]},"references-count":65,"alternative-id":["10.1145\/3696410.3714920","10.1145\/3696410"],"URL":"https:\/\/doi.org\/10.1145\/3696410.3714920","relation":{},"subject":[],"published":{"date-parts":[[2025,4,22]]},"assertion":[{"value":"2025-04-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}