{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:39:26Z","timestamp":1777657166375,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3755331","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T06:54:15Z","timestamp":1761375255000},"page":"10024-10033","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["SpeCa: Accelerating Diffusion Transformers with Speculative Feature Caching"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-8759-8641","authenticated-orcid":false,"given":"Jiacheng","family":"Liu","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China and Shandong University, Weihai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-2663-2204","authenticated-orcid":false,"given":"Chang","family":"Zou","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China and University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-1450-811X","authenticated-orcid":false,"given":"Yuanhuiyi","family":"Lyu","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-3119-8429","authenticated-orcid":false,"given":"Fei","family":"Ren","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8156-7081","authenticated-orcid":false,"given":"Shaobo","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9842-3552","authenticated-orcid":false,"given":"Kaixin","family":"Li","sequence":"additional","affiliation":[{"name":"National University of Singapore, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3341-183X","authenticated-orcid":false,"given":"Linfeng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Andreas Blattmann Tim Dockhorn Sumith Kulal Daniel Mendelevitch Maciej Kilian Dominik Lorenz Yam Levi Zion English Vikram Voleti Adam Letts et al. 2023. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127 (2023)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00484"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2401.10774"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"crossref","unstructured":"Junsong Chen Chongjian Ge Enze Xie Yue Wu Lewei Yao Xiaozhe Ren Zhongdao Wang Ping Luo Huchuan Lu and Zhenguo Li. 2024. PixArt-\u03a3: Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation. arXiv:2403.04692 [cs.CV]","DOI":"10.1007\/978-3-031-73411-3_5"},{"key":"e_1_3_2_1_5_1","volume-title":"PixArt-\u03b1: Fast Training of Diffusion Transformer for Photorealistic Text-to-Image Synthesis. In International Conference on Learning Representations.","author":"Chen Junsong","year":"2024","unstructured":"Junsong Chen, Jincheng Yu, Chongjian Ge, Lewei Yao, Enze Xie, Yue Wu, Zhongdao Wang, James Kwok, Ping Luo, Huchuan Lu, and Zhenguo Li. 2024b. PixArt-\u03b1: Fast Training of Diffusion Transformer for Photorealistic Text-to-Image Synthesis. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_6_1","volume-title":"\u0394-DiT: A Training-Free Acceleration Method Tailored for Diffusion Transformers. arXiv preprint arXiv:2406.01125","author":"Chen Pengtao","year":"2024","unstructured":"Pengtao Chen, Mingzhu Shen, Peng Ye, Jianjian Cao, Chongjun Tu, Christos-Savvas Bouganis, Yiren Zhao, and Tao Chen. 2024a. \u0394-DiT: A Training-Free Acceleration Method Tailored for Diffusion Transformers. arXiv preprint arXiv:2406.01125 (2024)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2402.12374"},{"key":"e_1_3_2_1_8_1","volume-title":"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. ArXiv","author":"Dao Tri","year":"2022","unstructured":"Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher R'e. 2022. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. ArXiv, Vol. abs\/2205.14135 (2022). https:\/\/api.semanticscholar.org\/CorpusID:249151871"},{"key":"e_1_3_2_1_9_1","volume-title":"Structural Pruning for Diffusion Models. arXiv preprint arXiv:2305.10924","author":"Fang Gongfan","year":"2023","unstructured":"Gongfan Fang, Xinyin Ma, and Xinchao Wang. 2023. Structural Pruning for Diffusion Models. arXiv preprint arXiv:2305.10924 (2023)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","unstructured":"Dhruba Ghosh Hanna Hajishirzi and Ludwig Schmidt. 2023. GenEval: An Object-Focused Framework for Evaluating Text-to-Image Alignment. doi:10.48550\/arXiv.2310.11513 arXiv:2310.11513 [cs]","DOI":"10.48550\/arXiv.2310.11513"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","unstructured":"Martin Heusel Hubert Ramsauer Thomas Unterthiner Bernhard Nessler and Sepp Hochreiter. 2018. GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium. doi:10.48550\/arXiv.1706.08500 arXiv:1706.08500 [cs].","DOI":"10.48550\/arXiv.1706.08500"},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","unstructured":"Jonathan Ho Ajay Jain and Pieter Abbeel. 2020a. Denoising Diffusion Probabilistic Models. doi:10.48550\/arXiv.2006.11239 arXiv:2006.11239 [cs].","DOI":"10.48550\/arXiv.2006.11239"},{"key":"e_1_3_2_1_13_1","volume-title":"Denoising diffusion probabilistic models. Advances in neural information processing systems","author":"Ho Jonathan","year":"2020","unstructured":"Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020b. Denoising diffusion probabilistic models. Advances in neural information processing systems, Vol. 33 (2020), 6840-6851."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","unstructured":"Ziqi Huang Yinan He Jiashuo Yu Fan Zhang Chenyang Si Yuming Jiang Yuanhan Zhang Tianxing Wu Qingyang Jin Nattapol Chanpaisit Yaohui Wang Xinyuan Chen Limin Wang Dahua Lin Yu Qiao and Ziwei Liu. 2023. VBench: Comprehensive Benchmark Suite for Video Generative Models. doi:10.48550\/arXiv.2311.17982 arXiv:2311.17982 [cs].","DOI":"10.48550\/arXiv.2311.17982"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00141"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA61900.2025.00035"},{"key":"e_1_3_2_1_17_1","unstructured":"Black Forest Labs. 2024. FLUX. https:\/\/github.com\/black-forest-labs\/flux."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","unstructured":"Yaniv Leviathan Matan Kalman and Yossi Matias. 2023. Fast Inference from Transformers via Speculative Decoding. doi:10.48550\/arXiv.2211.17192 arXiv:2211.17192 [cs]","DOI":"10.48550\/arXiv.2211.17192"},{"key":"e_1_3_2_1_19_1","volume-title":"Linxuan Li, Shiqi Yang, Yaxing Wang, Ming-Ming Cheng, and Jian Yang.","author":"Li Senmao","year":"2023","unstructured":"Senmao Li, Taihang Hu, Fahad Shahbaz Khan, Linxuan Li, Shiqi Yang, Yaxing Wang, Ming-Ming Cheng, and Jian Yang. 2023a. Faster diffusion: Rethinking the role of unet encoder in diffusion models. arXiv preprint arXiv:2312.09608 (2023)."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01608"},{"key":"e_1_3_2_1_21_1","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Li Yanyu","year":"2024","unstructured":"Yanyu Li, Huan Wang, Qing Jin, Ju Hu, Pavlo Chemerys, Yun Fu, Yanzhi Wang, Sergey Tulyakov, and Jian Ren. 2024. Snapfusion: Text-to-image diffusion model on mobile devices within two seconds. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","unstructured":"Zhimin Li Jianwei Zhang and and others Lin. 2024. Hunyuan-DiT: A Powerful Multi-Resolution Diffusion Transformer with Fine-Grained Chinese Understanding. doi:10.48550\/arXiv.2405.08748 arXiv:2405.08748 [cs]","DOI":"10.48550\/arXiv.2405.08748"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"crossref","unstructured":"Feng Liu Shiwei Zhang Xiaofeng Wang Yujie Wei Haonan Qiu Yuzhong Zhao Yingya Zhang Qixiang Ye and Fang Wan. 2024. Timestep Embedding Tells: It's Time to Cache for Video Diffusion Model. arXiv:2411.19108 [cs.CV]","DOI":"10.1109\/CVPR52734.2025.00689"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","unstructured":"Jiacheng Liu Chang Zou Yuanhuiyi Lyu Junjie Chen and Linfeng Zhang. 2025. From Reusing to Forecasting: Accelerating Diffusion Models with TaylorSeers. doi:10.48550\/arXiv.2503.06923 arXiv:2503.06923 [cs]","DOI":"10.48550\/arXiv.2503.06923"},{"key":"e_1_3_2_1_25_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Liu Xingchao","year":"2023","unstructured":"Xingchao Liu, Chengyue Gong, et al., 2023. Flow Straight and Fast: Learning to Generate and Transfer Data with Rectified Flow. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_26_1","unstructured":"Ziming Liu Yifan Yang Chengruidong Zhang Yiqi Zhang Lili Qiu Yang You and Yuqing Yang. 2025. Region-Adaptive Sampling for Diffusion Transformers. arXiv:2502.10389 [cs.CV] https:\/\/arxiv.org\/abs\/2502.10389"},{"key":"e_1_3_2_1_27_1","first-page":"5775","article-title":"Dpm-solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps","volume":"35","author":"Lu Cheng","year":"2022","unstructured":"Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. 2022a. Dpm-solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps. Advances in Neural Information Processing Systems, Vol. 35 (2022), 5775-5787.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_28_1","volume-title":"Dpm-solver: Fast solver for guided sampling of diffusion probabilistic models. arXiv preprint arXiv:2211.01095","author":"Lu Cheng","year":"2022","unstructured":"Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. 2022b. Dpm-solver: Fast solver for guided sampling of diffusion probabilistic models. arXiv preprint arXiv:2211.01095 (2022)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01492"},{"key":"e_1_3_2_1_30_1","volume-title":"On Distillation of Guided Diffusion Models. In NeurIPS 2022 Workshop on Score-Based Methods. https:\/\/openreview.net\/forum?id=6QHpSQt6VR-","author":"Meng Chenlin","year":"2022","unstructured":"Chenlin Meng, Ruiqi Gao, Diederik P Kingma, Stefano Ermon, Jonathan Ho, and Tim Salimans. 2022. On Distillation of Guided Diffusion Models. In NeurIPS 2022 Workshop on Score-Based Methods. https:\/\/openreview.net\/forum?id=6QHpSQt6VR-"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651335"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","unstructured":"William Peebles and Saining Xie. 2023a. Scalable Diffusion Models with Transformers. doi:10.48550\/arXiv.2212.09748 arXiv:2212.09748 [cs].","DOI":"10.48550\/arXiv.2212.09748"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00387"},{"key":"e_1_3_2_1_34_1","unstructured":"Junxiang Qiu Shuo Wang Jinda Lu Lin Liu Houcheng Jiang and Yanbin Hao. 2025. Accelerating Diffusion Transformer via Error-Optimized Cache. arXiv:2501.19243 [cs.CV] https:\/\/arxiv.org\/abs\/2501.19243"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","unstructured":"Robin Rombach Andreas Blattmann Dominik Lorenz Patrick Esser and Bj\u00f6rn Ommer. 2022. High-Resolution Image Synthesis with Latent Diffusion Models. doi:10.48550\/arXiv.2112.10752 arXiv:2112.10752 [cs].","DOI":"10.48550\/arXiv.2112.10752"},{"key":"e_1_3_2_1_36_1","first-page":"234","volume-title":"Munich","author":"Ronneberger Olaf","year":"2015","unstructured":"Olaf Ronneberger, Philipp Fischer, and Thomas Brox. 2015. U-net: Convolutional networks for biomedical image segmentation. In Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 234-241."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","unstructured":"Olga Russakovsky Jia Deng Hao Su Jonathan Krause Sanjeev Satheesh Sean Ma Zhiheng Huang Andrej Karpathy Aditya Khosla Michael Bernstein Alexander C. Berg and Li Fei-Fei. 2015. ImageNet Large Scale Visual Recognition Challenge. doi:10.48550\/arXiv.1409.0575 arXiv:1409.0575 [cs].","DOI":"10.48550\/arXiv.1409.0575"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2205.11487"},{"key":"e_1_3_2_1_39_1","volume-title":"Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512","author":"Salimans Tim","year":"2022","unstructured":"Tim Salimans and Jonathan Ho. 2022. Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512 (2022)."},{"key":"e_1_3_2_1_40_1","volume-title":"FORA: Fast-Forward Caching in Diffusion Transformer Acceleration. arXiv preprint arXiv:2407.01425","author":"Selvaraju Pratheba","year":"2024","unstructured":"Pratheba Selvaraju, Tianyu Ding, Tianyi Chen, Ilya Zharkov, and Luming Liang. 2024. FORA: Fast-Forward Caching in Diffusion Transformer Acceleration. arXiv preprint arXiv:2407.01425 (2024)."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00196"},{"key":"e_1_3_2_1_42_1","volume-title":"International conference on machine learning. PMLR, 2256-2265","author":"Sohl-Dickstein Jascha","year":"2015","unstructured":"Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. 2015. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning. PMLR, 2256-2265."},{"key":"e_1_3_2_1_43_1","volume-title":"Denoising Diffusion Implicit Models. In International Conference on Learning Representations.","author":"Song Jiaming","year":"2021","unstructured":"Jiaming Song, Chenlin Meng, and Stefano Ermon. 2021. Denoising Diffusion Implicit Models. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_44_1","volume-title":"Consistency Models. In International Conference on Machine Learning. PMLR, 32211-32252","author":"Song Yang","year":"2023","unstructured":"Yang Song, Prafulla Dhariwal, Mark Chen, and Ilya Sutskever. 2023. Consistency Models. In International Conference on Machine Learning. PMLR, 32211-32252."},{"key":"e_1_3_2_1_45_1","unstructured":"Wenzhang Sun Qirui Hou Donglin Di Jiahui Yang Yongjia Ma and Jianxun Cui. 2025. UniCP: A Unified Caching and Pruning Framework for Efficient Video Generation. arXiv:2502.04393 [cs.CV] https:\/\/arxiv.org\/abs\/2502.04393"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","unstructured":"Xingwu Sun Yanfeng Chen Huang et al. 2024a. Hunyuan-Large: An Open-Source MoE Model with 52 Billion Activated Parameters by Tencent. doi:10.48550\/arXiv.2411.02265 arXiv:2411.02265 [cs]","DOI":"10.48550\/arXiv.2411.02265"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2310.15141"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","unstructured":"Jiazheng Xu Xiao Liu Yuchen Wu Yuxuan Tong Qinkai Li Ming Ding Jie Tang and Yuxiao Dong. 2023. ImageReward: Learning and Evaluating Human Preferences for Text-to-Image Generation. doi:10.48550\/arXiv.2304.05977 arXiv:2304.05977 [cs].","DOI":"10.48550\/arXiv.2304.05977"},{"key":"e_1_3_2_1_49_1","volume-title":"CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer. In The Thirteenth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=LQzN6TRFg9","author":"Yang Zhuoyi","year":"2025","unstructured":"Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, Da Yin, Xiaotao Gu, Yuxuan.Zhang, Weihan Wang, Yean Cheng, Bin Xu, Yuxiao Dong, and Jie Tang. 2025. CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer. In The Thirteenth International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=LQzN6TRFg9"},{"key":"e_1_3_2_1_50_1","volume-title":"DiTFastAttn: Attention Compression for Diffusion Transformer Models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=51HQpkQy3t","author":"Yuan Zhihang","year":"2024","unstructured":"Zhihang Yuan, Hanling Zhang, Lu Pu, Xuefei Ning, Linfeng Zhang, Tianchen Zhao, Shengen Yan, Guohao Dai, and Yu Wang. 2024. DiTFastAttn: Attention Compression for Diffusion Transformer Models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=51HQpkQy3t"},{"key":"e_1_3_2_1_51_1","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=9fWKExmKa0","author":"Zheng Kaiwen","year":"2023","unstructured":"Kaiwen Zheng, Cheng Lu, Jianfei Chen, and Jun Zhu. 2023. DPM-Solver-v3: Improved Diffusion ODE Solver with Empirical Model Statistics. In Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=9fWKExmKa0"},{"key":"e_1_3_2_1_52_1","unstructured":"Zangwei Zheng Xiangyu Peng Tianji Yang Chenhui Shen Shenggui Li Hongxin Liu Yukun Zhou Tianyi Li and Yang You. 2024. Open-Sora: Democratizing Efficient Video Production for All. https:\/\/github.com\/hpcaitech\/Open-Sora"},{"key":"e_1_3_2_1_53_1","unstructured":"Haowei Zhu Dehua Tang Ji Liu Mingjie Lu Jintu Zheng Jinzhang Peng Dong Li Yu Wang Fan Jiang Lu Tian Spandan Tiwari Ashish Sirasao Jun-Hai Yong Bin Wang and Emad Barsoum. 2024. DiP-GO: A Diffusion Pruner via Few-step Gradient Optimization. arXiv:2410.16942 [cs.CV]"},{"key":"e_1_3_2_1_54_1","volume-title":"Accelerating Diffusion Transformers with Token-wise Feature Caching. arXiv preprint arXiv:2410.05317","author":"Zou Chang","year":"2024","unstructured":"Chang Zou, Xuyang Liu, Ting Liu, Siteng Huang, and Linfeng Zhang. 2024a. Accelerating Diffusion Transformers with Token-wise Feature Caching. arXiv preprint arXiv:2410.05317 (2024)."},{"key":"e_1_3_2_1_55_1","unstructured":"Chang Zou Evelyn Zhang Runlin Guo Haohang Xu Conghui He Xuming Hu and Linfeng Zhang. 2024b. Accelerating Diffusion Transformers with Dual Feature Caching. arXiv:2412.18911 [cs.LG] https:\/\/arxiv.org\/abs\/2412.18911"}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","location":"Dublin Ireland","acronym":"MM '25","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3755331","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T04:04:58Z","timestamp":1765339498000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3755331"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":55,"alternative-id":["10.1145\/3746027.3755331","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3755331","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}