{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T20:47:32Z","timestamp":1776199652768,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":18,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,4,27]]},"DOI":"10.1145\/3802973.3804458","type":"proceedings-article","created":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T19:41:22Z","timestamp":1776195682000},"page":"7-12","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Energy Consumption Analysis of Discrete Diffusion and Autoregressive Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9024-9499","authenticated-orcid":false,"given":"Boris","family":"Ruf","sequence":"first","affiliation":[{"name":"AXA AI Research, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5669-4871","authenticated-orcid":false,"given":"Marcin","family":"Detyniecki","sequence":"additional","affiliation":[{"name":"AXA AI Research, Paris, France"}]}],"member":"320","published-online":{"date-parts":[[2026,4,26]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Subham Sekhar Sahoo, and Volodymyr Kuleshov","author":"Arriola Marianne","year":"2025","unstructured":"Marianne Arriola, Aaron Gokaslan, Justin T Chiu, Jiaqi Han, Zhihan Yang, Zhixuan Qi, Subham Sekhar Sahoo, and Volodymyr Kuleshov. 2025. Interpolating Autoregressive and Discrete Denoising Diffusion Language Models. In ICLR."},{"key":"e_1_3_2_1_2_1","unstructured":"Giulia Bertazzini Chiara Albisani Daniele Baracchi Dasara Shullani and Roberto Verdecchia. 2025. The Hidden Cost of an Image: Quantifying the Energy Consumption of AI Image Generation."},{"key":"e_1_3_2_1_3_1","volume-title":"Garyk Brixi, Eric Nguyen, Stefano Massaroli, and Michael Poli.","author":"Chandrasegaran Keshigeyan","year":"2025","unstructured":"Keshigeyan Chandrasegaran, Armin W. Thomas, Jerome Ku, Federico Berto, Jae Myung Kim, Garyk Brixi, Eric Nguyen, Stefano Massaroli, and Michael Poli. 2025. RND1: Simple, Scalable AR-to-Diffusion Conversion."},{"key":"e_1_3_2_1_4_1","unstructured":"Zigeng Chen Gongfan Fang Xinyin Ma Ruonan Yu and Xinchao Wang. 2026. dParallel: Learnable Parallel Decoding for dLLMs. In ICLR."},{"key":"e_1_3_2_1_5_1","volume-title":"SDAR: A Synergistic Diffusion-AutoRegression Paradigm for Scalable Sequence Generation. CoRR","author":"Cheng Shuang","year":"2025","unstructured":"Shuang Cheng, Yihan Bian, Dawei Liu, Linfeng Zhang, Qian Yao, Zhongbo Tian, Wenhai Wang, Qipeng Guo, Kai Chen, Biqing Qi, and Bowen Zhou. 2025. SDAR: A Synergistic Diffusion-AutoRegression Paradigm for Scalable Sequence Generation. CoRR (2025)."},{"key":"e_1_3_2_1_6_1","unstructured":"Benoit Courty Victor Schmidt Sasha Luccioni Goyal-Kamal Marion Coutarel Boris Feld J\u00e9r\u00e9my Lecourt Liam Connell Amine Saboni et al. 2024. mlco2\/codecarbon: v2.4.1."},{"key":"e_1_3_2_1_7_1","unstructured":"Shansan Gong Shivam Agarwal Yizhe Zhang Jiacheng Ye Lin Zheng Mukai Li Chenxin An Peilin Zhao Wei Bi Jiawei Han Hao Peng and Lingpeng Kong. 2025. Scaling Diffusion Language Models via Adaptation from Autoregressive Models. In ICLR."},{"key":"e_1_3_2_1_8_1","unstructured":"Aniketh Iyengar Jiaqi Han Boris Ruf Vincent Grari Marcin Detyniecki and Stefano Ermon. 2025. Energy Scaling Laws for Diffusion Models: Quantifying Compute and Carbon Emissions in Image Generation."},{"key":"e_1_3_2_1_9_1","unstructured":"Haokun Lin Xinle Jia Shaozhen Liu Shujun Xia Weitao Huang Haobo Xu Junyang Li Yicheng Xiao Xingrun Xing Ziyu Guo et al. 2026. Efficient Diffusion Language Models: A Comprehensive Survey. Authorea Preprints (2026)."},{"key":"e_1_3_2_1_10_1","volume-title":"NeurIPS. [11] Shen Nie, Fengqi Zhu, Zebin You, Xiaolu Zhang, Jingyang Ou, Jun Hu, Jun Zhou, Yankai Lin, Ji-Rong Wen, and Chongxuan Li.","author":"Ma Xinyin","year":"2025","unstructured":"Xinyin Ma, Runpeng Yu, Gongfan Fang, and Xinchao Wang. 2025. dKV-Cache: The Cache for Diffusion Language Models. In NeurIPS. [11] Shen Nie, Fengqi Zhu, Zebin You, Xiaolu Zhang, Jingyang Ou, Jun Hu, Jun Zhou, Yankai Lin, Ji-Rong Wen, and Chongxuan Li. 2025. Large language diffusion models. arXiv (2025)."},{"key":"e_1_3_2_1_11_1","unstructured":"Subham Sekhar Sahoo Marianne Arriola Yair Schiff Aaron Gokaslan Edgar Marroquin Justin T Chiu Alexander Rush and Volodymyr Kuleshov. 2024. Simple and effective masked diffusion language models. In NeurIPS."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"crossref","unstructured":"Roy Schwartz Jesse Dodge Noah A. Smith and Oren Etzioni. 2020. Green AI. Commun. ACM (2020).","DOI":"10.1145\/3381831"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"crossref","unstructured":"Emma Strubell Ananya Ganesh and Andrew McCallum. 2019. Energy and policy considerations for deep learning in NLP. In ACL.","DOI":"10.18653\/v1\/P19-1355"},{"key":"e_1_3_2_1_14_1","unstructured":"Hugo Touvron Thibaut Lavril Gautier Izacard Xavier Martinet Marie-Anne Lachaux Timoth\u00e9e Lacroix Baptiste Rozi\u00e8re Naman Goyal Eric Hambro Faisal Azhar et al. 2023. LLaMA: Open and Efficient Foundation Language Models. arXiv (2023)."},{"key":"e_1_3_2_1_15_1","unstructured":"Chengyue Wu Hao Zhang Shuchen Xue Shizhe Diao Yonggan Fu Zhijian Liu Pavlo Molchanov Ping Luo Song Han and Enze Xie. 2025. Fast-dLLM v2: Efficient Block-Diffusion LLM."},{"key":"e_1_3_2_1_16_1","unstructured":"Minkai Xu Tomas Geffner Karsten Kreis Weili Nie Yilun Xu Jure Leskovec Stefano Ermon and Arash Vahdat. 2025. Energy-based Diffusion Language Models for Text Generation. In ICLR."},{"key":"e_1_3_2_1_17_1","unstructured":"An Yang Baosong Yang Beichen Zhang Binyuan Hui Bo Zheng Bowen Yu Chengyuan Li Dayiheng Liu Fei Huang Haoran Wei Huan Lin Jian Yang Jianhong Tu Jianwei Zhang Jianxin Yang Jiaxi Yang Jingren Zhou Junyang Lin Kai Dang Keming Lu Keqin Bao Kexin Yang Le Yu Mei Li Mingfeng Xue Pei Zhang Qin Zhu Rui Men Runji Lin Tianhao Li Tingyu Xia Xingzhang Ren Xuancheng Ren Yang Fan Yang Su Yichang Zhang Yu Wan Yuqiong Liu Zeyu Cui Zhenru Zhang and Zihan Qiu. 2024. Qwen2.5 Technical Report. CoRR (2024)."},{"key":"e_1_3_2_1_18_1","volume-title":"Dream 7B: Diffusion Large Language Models. arXiv","author":"Ye Jiacheng","year":"2025","unstructured":"Jiacheng Ye, Zhihui Xie, Lin Zheng, Jiahui Gao, Zirui Wu, Xin Jiang, Zhenguo Li, and Lingpeng Kong. 2025. Dream 7B: Diffusion Large Language Models. arXiv (2025)."}],"event":{"name":"EuroSys '26: 21st European Conference on Computer Systems","location":"Edinburgh Scotland Uk","acronym":"GreenSys '26","sponsor":["SIGOPS ACM Special Interest Group on Operating Systems"]},"container-title":["Proceedings of the 2nd International Workshop on Systems and Methods for Sustainable Large-Scale AI (GreenSys)"],"original-title":[],"deposited":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T19:41:28Z","timestamp":1776195688000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3802973.3804458"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4,26]]},"references-count":18,"alternative-id":["10.1145\/3802973.3804458","10.1145\/3802973"],"URL":"https:\/\/doi.org\/10.1145\/3802973.3804458","relation":{},"subject":[],"published":{"date-parts":[[2026,4,26]]},"assertion":[{"value":"2026-04-26","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}