{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,23]],"date-time":"2026-04-23T14:46:49Z","timestamp":1776955609740,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":75,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,2,27]],"date-time":"2025-02-27T00:00:00Z","timestamp":1740614400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,2,27]]},"DOI":"10.1145\/3706628.3708864","type":"proceedings-article","created":{"date-parts":[[2025,2,26]],"date-time":"2025-02-26T12:22:11Z","timestamp":1740572531000},"page":"2-13","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":10,"title":["FlightVGM: Efficient Video Generation Model Inference with Online Sparsification and Hybrid Precision on FPGAs"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-8280-9072","authenticated-orcid":false,"given":"Jun","family":"Liu","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China, &amp; Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1030-3748","authenticated-orcid":false,"given":"Shulin","family":"Zeng","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China, &amp; Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-0582-4503","authenticated-orcid":false,"given":"Li","family":"Ding","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-4930-5648","authenticated-orcid":false,"given":"Widyadewi","family":"Soedarmadji","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-8421-1242","authenticated-orcid":false,"given":"Hao","family":"Zhou","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5958-9599","authenticated-orcid":false,"given":"Zehao","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-4286-6359","authenticated-orcid":false,"given":"Jinhao","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-8903-8276","authenticated-orcid":false,"given":"Jintao","family":"Li","sequence":"additional","affiliation":[{"name":"Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-9193-8494","authenticated-orcid":false,"given":"Yadong","family":"Dai","sequence":"additional","affiliation":[{"name":"Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-4357-2717","authenticated-orcid":false,"given":"Kairui","family":"Wen","sequence":"additional","affiliation":[{"name":"Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-4922-2718","authenticated-orcid":false,"given":"Shan","family":"He","sequence":"additional","affiliation":[{"name":"Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-0645-2466","authenticated-orcid":false,"given":"Yaqi","family":"Sun","sequence":"additional","affiliation":[{"name":"Infinigence-AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6108-5157","authenticated-orcid":false,"given":"Yu","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0849-3252","authenticated-orcid":false,"given":"Guohao","family":"Dai","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China, &amp; Infinigence-AI, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2025,2,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2013. NVIDIA-SMI. https:\/\/docs.nvidia.com\/deploy\/pdf\/NVSMI_Manual.pdf."},{"key":"e_1_3_2_1_2_1","volume-title":"Retrieved","year":"2024","unstructured":"2020. NVIDIA AMPERE GA102 GPU ARCHITECTURE. Retrieved September 25, 2024 from https:\/\/www.nvidia.com\/content\/PDF\/nvidia-ampere-ga-102-gpuarchitecture-whitepaper-v2.pdf"},{"key":"e_1_3_2_1_3_1","volume-title":"openai\/clip-vit-base-patch32. Retrieved","year":"2024","unstructured":"2021. openai\/clip-vit-base-patch32. Retrieved September 7, 2024 from https:\/\/huggingface.co\/openai\/clip-vit-base-patch32"},{"key":"e_1_3_2_1_4_1","volume-title":"sentence-transformers\/clip-ViT-B-32. Retrieved","year":"2024","unstructured":"2021. sentence-transformers\/clip-ViT-B-32. Retrieved September 7, 2024 from https:\/\/huggingface.co\/sentence-transformers\/clip-ViT-B-32"},{"key":"e_1_3_2_1_5_1","volume-title":"NVIDIA ADA GPU ARCHITECTURE. Retrieved","year":"2024","unstructured":"2022. NVIDIA ADA GPU ARCHITECTURE. Retrieved September 7, 2024 from https:\/\/images.nvidia.com\/aem-dam\/Solutions\/Data-Center\/l4\/nvidia-adagpu-architecture-whitepaper-v2.1.pdf"},{"key":"e_1_3_2_1_6_1","volume-title":"Versal ACAP DSP Engine Architecture Manual (AM004). Retrieved","year":"2024","unstructured":"2022. Versal ACAP DSP Engine Architecture Manual (AM004). Retrieved September 7, 2024 from https:\/\/docs.amd.com\/r\/en-US\/am004-versal-dsp-engine"},{"key":"e_1_3_2_1_7_1","volume-title":"Alveo U280 Data Center Accelerator Card Data Sheet (DS963). Retrieved","year":"2024","unstructured":"2023. Alveo U280 Data Center Accelerator Card Data Sheet (DS963). Retrieved September 25, 2024 from https:\/\/docs.amd.com\/r\/en-US\/ds963-u280\/Summary"},{"key":"e_1_3_2_1_8_1","volume-title":"Retrieved","year":"2024","unstructured":"2023. Versal AI Edge Series VEK280 Evaluation Kit. Retrieved September 27, 2024 from https:\/\/www.xilinx.com\/products\/boards-and-kits\/vek280.html"},{"key":"e_1_3_2_1_9_1","volume-title":"Alveo V80 Compute Accelerator Card. Retrieved","year":"2024","unstructured":"2024. Alveo V80 Compute Accelerator Card. Retrieved September 7, 2024 from https:\/\/www.amd.com\/content\/dam\/amd\/en\/documents\/products\/accelerators\/alveo\/v80\/alveo-v80-product-brief.pdf"},{"key":"e_1_3_2_1_10_1","volume-title":"Retrieved","year":"2024","unstructured":"2024. AVED Management Interface userguide (ami_tool). Retrieved September 27, 2024 from https:\/\/xilinx.github.io\/AVED\/amd_v80_gen5x8_exdes_2_20240408\/AVED%2BManagement%2BInterface%2Buserguide%2B%28ami_tool%29.html"},{"key":"e_1_3_2_1_11_1","volume-title":"chatgpt. Retrieved","year":"2024","unstructured":"2024. chatgpt. Retrieved September 7, 2024 from https:\/\/openai.com\/chatgpt\/"},{"key":"e_1_3_2_1_12_1","volume-title":"Dreamina: Free AI Image Generator. Retrieved","year":"2024","unstructured":"2024. Dreamina: Free AI Image Generator. Retrieved September 7, 2024 from https:\/\/dreamina.capcut.com\/"},{"key":"e_1_3_2_1_13_1","unstructured":"2024. Kling. Retrieved September 7 2024 from https:\/\/kling.kuaishou.com\/"},{"key":"e_1_3_2_1_14_1","volume-title":"Retrieved","year":"2024","unstructured":"2024. Latte gallery. Retrieved June 17, 2024 from https:\/\/maxin-cn.github.io\/latte_project\/"},{"key":"e_1_3_2_1_15_1","volume-title":"Retrieved","year":"2024","unstructured":"2024. Open-Sora gallery. Retrieved June 17, 2024 from https:\/\/hpcaitech.github.io\/Open-Sora\/"},{"key":"e_1_3_2_1_16_1","unstructured":"2024. Sora. Retrieved September 7 2024 from https:\/\/openai.com\/index\/sora\/"},{"key":"e_1_3_2_1_17_1","volume-title":"t2v_sora.txt. Retrieved","year":"2024","unstructured":"2024. t2v_sora.txt. Retrieved September 27, 2024 from https:\/\/github.com\/hpcaitech\/Open-Sora\/blob\/main\/assets\/texts\/t2v_sora.txt"},{"key":"e_1_3_2_1_18_1","unstructured":"2024. Veo. Retrieved September 7 2024 from https:\/\/deepmind.google\/technologies\/veo\/"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/ARITH.2017.29"},{"key":"e_1_3_2_1_20_1","volume-title":"A comprehensive survey of ai-generated content (aigc): A history of generative ai from gan to chatgpt. arXiv preprint arXiv:2303.04226","author":"Cao Yihan","year":"2023","unstructured":"Yihan Cao, Siyu Li, Yixin Liu, Zhiling Yan, Yutong Dai, Philip S Yu, and Lichao Sun. 2023. A comprehensive survey of ai-generated content (aigc): A history of generative ai from gan to chatgpt. arXiv preprint arXiv:2303.04226 (2023)."},{"key":"e_1_3_2_1_21_1","volume-title":"Understanding the potential of fpga based spatial acceleration for large language model inference. ACM Transactions on Reconfigurable Technology and Systems","author":"Chen Hongzheng","year":"2024","unstructured":"Hongzheng Chen, Jiahao Zhang, Yixiao Du, Shaojie Xiang, Zichao Yue, Niansong Zhang, Yaohui Cai, and Zhiru Zhang. 2024. Understanding the potential of fpga based spatial acceleration for large language model inference. ACM Transactions on Reconfigurable Technology and Systems (2024)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3358236"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA56546.2023.10071047"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3490422.3502368"},{"key":"e_1_3_2_1_25_1","unstructured":"Yao Fu EphremWu Ashish Sirasao Sedny Attia Kamran Khan and Ralph Wittig. 2017. Deep Learning with INT8 Optimization on Xilinx Devices. (2017)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCAD.2023.3281714"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.2478\/jagi-2014-0001"},{"key":"e_1_3_2_1_28_1","volume-title":"SC18: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 603--613","author":"Haidar Azzam","year":"2018","unstructured":"Azzam Haidar, Stanimire Tomov, Jack Dongarra, and Nicholas J Higham. 2018. Harnessing GPU tensor cores for fast FP16 arithmetic to speed up mixedprecision iterative refinement solvers. In SC18: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 603--613."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3020078.3021745"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3007787.3001163"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2019.2910525"},{"key":"e_1_3_2_1_32_1","volume-title":"Denoising diffusion probabilistic models. Advances in neural information processing systems 33","author":"Ho Jonathan","year":"2020","unstructured":"Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. Advances in neural information processing systems 33 (2020), 6840--6851."},{"key":"e_1_3_2_1_33_1","first-page":"148","article-title":"FlashDecoding: Faster Large Language Model Inference with Asynchronization, Flat GEMM Optimization, and Heuristics","volume":"6","author":"Hong Ke","year":"2024","unstructured":"Ke Hong, Guohao Dai, Jiaming Xu, Qiuli Mao, Xiuhong Li, Jun Liu, Yuhan Dong, Yu Wang, et al. 2024. FlashDecoding: Faster Large Language Model Inference with Asynchronization, Flat GEMM Optimization, and Heuristics. Proceedings of Machine Learning and Systems 6 (2024), 148--161.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_34_1","volume-title":"Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868","author":"Hong Wenyi","year":"2022","unstructured":"Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. 2022. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868 (2022)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3613424.3623775"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/TC.2011.77"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02060"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/FCCM57271.2023.00023"},{"key":"e_1_3_2_1_39_1","unstructured":"Jinhao Li Jiaming Xu Shan Huang Yonghua Chen Wen Li Jun Liu Yaoxiu Lian Jiayi Pan Li Ding Hao Zhou et al. 2024. Large language model inference acceleration: A comprehensive hardware perspective. arXiv preprint arXiv:2410.04466 (2024)."},{"key":"e_1_3_2_1_40_1","unstructured":"Wenxuan Liu and Sai Qian Zhang. 2024. HQ-DiT: Efficient Diffusion Transformer with FP4 Hybrid Quantization. arXiv:2405.19751 [cs.CV] https:\/\/arxiv.org\/abs\/2405.19751"},{"key":"e_1_3_2_1_41_1","volume-title":"FPGA-Based Sparse Matrix Multiplication Accelerators: From State-of-the-art to Future Opportunities. ACM Transactions on Reconfigurable Technology and Systems","author":"Liu Yajing","year":"2024","unstructured":"Yajing Liu, Ruiqi Chen, Shuyang Li, Jing Yang, Shun Li, and Bruno da Silva. 2024. FPGA-Based Sparse Matrix Multiplication Accelerators: From State-of-the-art to Future Opportunities. ACM Transactions on Reconfigurable Technology and Systems (2024)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02090"},{"key":"e_1_3_2_1_43_1","volume-title":"Latte: Latent Diffusion Transformer for Video Generation. arXiv preprint arXiv:2401.03048","author":"Ma Xin","year":"2024","unstructured":"Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. 2024. Latte: Latent Diffusion Transformer for Video Generation. arXiv preprint arXiv:2401.03048 (2024)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/ECRTS.2015.26"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1109\/ECRTS.2015.26"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00387"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626202.3637557"},{"key":"e_1_3_2_1_48_1","volume-title":"Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792","author":"Singer Uriel","year":"2022","unstructured":"Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, Harry Yang, Oron Ashual, Oran Gafni, et al. 2022. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792 (2022)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/FPL57034.2022.00035"},{"key":"e_1_3_2_1_50_1","volume-title":"Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502","author":"Song Jiaming","year":"2020","unstructured":"Jiaming Song, Chenlin Meng, and Stefano Ermon. 2020. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/3489517.3530420"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1145\/3490422.3502357"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/3620665.3640393"},{"key":"e_1_3_2_1_54_1","volume-title":"Amir Roshan Zamir, and Mubarak Shah","author":"Soomro Khurram","year":"2012","unstructured":"Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. 2012. UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)."},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2012.2221191"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/71.993206"},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1109\/RTAS.2017.2"},{"key":"e_1_3_2_1_58_1","volume-title":"2021 IEEE International Symposium on High-Performance Computer Architecture (HPCA). IEEE, 97--110","author":"Zhang Zhekai","year":"2021","unstructured":"HanruiWang, Zhekai Zhang, and Song Han. 2021. Spatten: Efficient sparse attention architecture with cascade token and head pruning. In 2021 IEEE International Symposium on High-Performance Computer Architecture (HPCA). IEEE, 97--110."},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1145\/3649329.3656237"},{"key":"e_1_3_2_1_60_1","volume-title":"2021 ACM\/IEEE 48th Annual International Symposium on Computer Architecture (ISCA). IEEE, 1083--1095","author":"Zhang Chen","year":"2021","unstructured":"YangWang, Chen Zhang, Zhiqiang Xie, Cong Guo, Yunxin Liu, and Jingwen Leng. 2021. Dual-side sparse tensor core. In 2021 ACM\/IEEE 48th Annual International Symposium on Computer Architecture (ISCA). IEEE, 1083--1095."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/1498765.1498785"},{"key":"e_1_3_2_1_62_1","volume-title":"Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806","author":"Wu Chenfei","year":"2021","unstructured":"Chenfei Wu, Lun Huang, Qianxi Zhang, Binyang Li, Lei Ji, Fan Yang, Guillermo Sapiro, and Nan Duan. 2021. Godiva: Generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806 (2021)."},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3613424.3623786"},{"key":"e_1_3_2_1_64_1","volume-title":"EasyAnimate: A High-Performance Long Video Generation Method based on Transformer Architecture. arXiv preprint arXiv:2405.18991","author":"Xu Jiaqi","year":"2024","unstructured":"Jiaqi Xu, Xinyi Zou, Kunzhe Huang, Yunkuo Chen, Bo Liu, MengLi Cheng, Xing Shi, and Jun Huang. 2024. EasyAnimate: A High-Performance Long Video Generation Method based on Transformer Architecture. arXiv preprint arXiv:2405.18991 (2024)."},{"key":"e_1_3_2_1_65_1","volume-title":"Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072","author":"Yang Zhuoyi","year":"2024","unstructured":"Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al. 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024)."},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626202.3637562"},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS.2018.8351354"},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2022.3177046"},{"key":"e_1_3_2_1_69_1","doi-asserted-by":"publisher","DOI":"10.1109\/RTSS49844.2020.00022"},{"key":"e_1_3_2_1_70_1","unstructured":"Tianchen Zhao Tongcheng Fang Enshu Liu Rui Wan Widyadewi Soedarmadji Shiyao Li Zinan Lin Guohao Dai Shengen Yan Huazhong Yang Xuefei Ning and Yu Wang. 2024. ViDiT-Q: Efficient and Accurate Quantization of Diffusion Transformers for Image and Video Generation. arXiv:2406.02540 [cs.CV] https:\/\/arxiv.org\/abs\/2406.02540"},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72630-9_17"},{"key":"e_1_3_2_1_72_1","unstructured":"Zangwei Zheng Xiangyu Peng Tianji Yang Chenhui Shen Shenggui Li Hongxin Liu Yukun Zhou Tianyi Li and Yang You. 2024. Open-Sora: Democratizing Efficient Video Production for All. https:\/\/github.com\/hpcaitech\/Open-Sora"},{"key":"e_1_3_2_1_73_1","volume-title":"Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018","author":"Zhou Daquan","year":"2022","unstructured":"Daquan Zhou, Weimin Wang, Hanshu Yan, Weiwei Lv, Yizhe Zhu, and Jiashi Feng. 2022. Magicvideo: Efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018 (2022)."},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1145\/3626202.3637569"},{"key":"e_1_3_2_1_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00841"}],"event":{"name":"FPGA '25: The 2025 ACM\/SIGDA International Symposium on Field Programmable Gate Arrays","location":"Monterey CA USA","acronym":"FPGA '25","sponsor":["SIGDA ACM Special Interest Group on Design Automation"]},"container-title":["Proceedings of the 2025 ACM\/SIGDA International Symposium on Field Programmable Gate Arrays"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3706628.3708864","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3706628.3708864","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T21:54:31Z","timestamp":1755899671000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3706628.3708864"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,27]]},"references-count":75,"alternative-id":["10.1145\/3706628.3708864","10.1145\/3706628"],"URL":"https:\/\/doi.org\/10.1145\/3706628.3708864","relation":{},"subject":[],"published":{"date-parts":[[2025,2,27]]},"assertion":[{"value":"2025-02-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}