{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:58:31Z","timestamp":1763193511326,"version":"3.45.0"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228481","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["TQ-DiT: Efficient Time-Aware Quantization for Diffusion Transformers"],"prefix":"10.1109","author":[{"given":"Younghye","family":"Hwang","sequence":"first","affiliation":[{"name":"Korea Advanced Institute of Science and Technology (KAIST),The School of Electrical Engineering,Daejeon,South Korea"}]},{"given":"Hyojin","family":"Lee","sequence":"additional","affiliation":[{"name":"Korea Advanced Institute of Science and Technology (KAIST),The School of Electrical Engineering,Daejeon,South Korea"}]},{"given":"Joonhyuk","family":"Kang","sequence":"additional","affiliation":[{"name":"Korea Advanced Institute of Science and Technology (KAIST),The School of Electrical Engineering,Daejeon,South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3626235"},{"article-title":"Diffusion models beat gans on image synthesis","volume-title":"Proc. NeurIPS","author":"Dhariwal","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. ICLR","author":"Dosovitskiy","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00387"},{"article-title":"Video generation models as world simulators","year":"2024","author":"Brooks","key":"ref7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02117"},{"article-title":"Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis","year":"2023","author":"Chen","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.128096"},{"article-title":"Sustainable ai: Environmental implications, challenges and opportunities","volume-title":"Proc. MLSys","author":"Wu","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00286"},{"article-title":"Brecq: Pushing the limit of post-training quantization by block reconstruction","volume-title":"Proc. ICLR","author":"Li","key":"ref13"},{"article-title":"Up or down? adaptive rounding for post-training quantization","volume-title":"Proc. ICML","author":"Nagel","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19775-8_12"},{"article-title":"Ptq4dit: Post-training quantization for diffusion transformers","year":"2024","author":"Wu","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1049\/icp.2024.2234"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2023.3288243"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.3390\/fi15020057"},{"article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. NeurIPS","author":"Ho","key":"ref20"},{"article-title":"A white paper on neural network quantization","year":"2021","author":"Nagel","key":"ref21"},{"article-title":"Ptqd: Accurate post-training quantization for diffusion models","volume-title":"Proc. NeurIPS","author":"He","key":"ref22"},{"article-title":"Smoothquant: Accurate and efficient post-training quantization for large language models","volume-title":"Proc. ICML","author":"Xiao","key":"ref23"},{"article-title":"Gpt3.int8(): 8-bit matrix multiplication for transformers at scale","volume-title":"Proc. NeurIPS","author":"Dettmers","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01608"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/DSN58291.2024.00034"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00363"},{"article-title":"Post-training quantization for vision transformer","volume-title":"Proc. NeurIPS","author":"Liu","key":"ref28"},{"article-title":"Gans trained by a two time-scale update rule converge to a local nash equilibrium","volume-title":"Proc. NeurIPS","author":"Heusel","key":"ref29"},{"article-title":"Generating images with sparse representations","year":"2021","author":"Nash","key":"ref30"},{"article-title":"Improved techniques for training gans","volume-title":"Proc. NeurIPS","author":"Salimans","key":"ref31"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228481.pdf?arnumber=11228481","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:54:50Z","timestamp":1763193290000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228481\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228481","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}