{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T02:04:20Z","timestamp":1773713060377,"version":"3.50.1"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,7,9]],"date-time":"2023-07-09T00:00:00Z","timestamp":1688860800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,7,9]],"date-time":"2023-07-09T00:00:00Z","timestamp":1688860800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,7,9]]},"DOI":"10.1109\/dac56929.2023.10247993","type":"proceedings-article","created":{"date-parts":[[2023,9,15]],"date-time":"2023-09-15T17:31:31Z","timestamp":1694799091000},"page":"1-6","source":"Crossref","is-referenced-by-count":8,"title":["Efficient Transformer Inference with Statically Structured Sparse Attention"],"prefix":"10.1109","author":[{"given":"Steve","family":"Dai","sequence":"first","affiliation":[{"name":"NVIDIA"}]},{"given":"Hasan","family":"Genc","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Rangharajan","family":"Venkatesan","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Brucek","family":"Khailany","sequence":"additional","affiliation":[{"name":"NVIDIA"}]}],"member":"263","reference":[{"key":"ref13","article-title":"Linformer: Self-attention with linear complexity","author":"wang","year":"2020"},{"key":"ref12","article-title":"Blockwise self-attention for long document understanding","author":"qiu","year":"2019"},{"key":"ref15","article-title":"Longformer: The long-document transformer","author":"beltagy","year":"2020"},{"key":"ref14","article-title":"Generating long sequences with sparse transformers","author":"child","year":"2019"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCAD45719.2019.8942127"},{"key":"ref10","article-title":"Big bird: Transformers for longer sequences","author":"zaheer","year":"2020","journal-title":"NeurIPS"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1264"},{"key":"ref1","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"NeurIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3307650.3322214"},{"key":"ref16","article-title":"Sparsebert: Rethinking the importance analysis in self-attention","author":"shi","year":"2021","journal-title":"ICML"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00018"},{"key":"ref18","article-title":"A3: Accelerating attention mechanisms in neural networks with approximation","author":"ham","year":"2020","journal-title":"HPCA"},{"key":"ref24","article-title":"Integer quantization for deep learning inference: Principles and empirical evaluation","author":"wu","year":"2020"},{"key":"ref23","article-title":"Long range arena: A benchmark for efficient transformers","author":"tay","year":"2020"},{"key":"ref26","article-title":"Huggingface&#x2019;s transformers: State-of-the-art natural language processing","author":"wolf","year":"2019"},{"key":"ref25","author":"reddy","year":"1977","journal-title":"Speech understanding systems A summary of results of the five-year research effort"},{"key":"ref20","article-title":"Transformer acceleration with dynamic sparse attention","author":"liu","year":"2021"},{"key":"ref22","article-title":"Nystr&#x00F6;mformer: A nystr&#x00F6;m-based algorithm for approximating self-attention","author":"xiong","year":"2021","journal-title":"Conf on Artificial Intelligence (AAAI)"},{"key":"ref21","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018"},{"key":"ref27","article-title":"Pointer sentinel mixture models","author":"merity","year":"2016"},{"key":"ref8","article-title":"Unified language model pre-training for natural language understanding and generation","author":"dong","year":"2019","journal-title":"NeurIPS"},{"key":"ref7","article-title":"End-to-end object detection with transformers","author":"carion","year":"2020","journal-title":"ECCV"},{"key":"ref9","article-title":"Language models are unsupervised multitask learners","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1017\/9781108608480"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"key":"ref6","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020"},{"key":"ref5","article-title":"Image transformer","author":"parmar","year":"2018","journal-title":"ICML"}],"event":{"name":"2023 60th ACM\/IEEE Design Automation Conference (DAC)","location":"San Francisco, CA, USA","start":{"date-parts":[[2023,7,9]]},"end":{"date-parts":[[2023,7,13]]}},"container-title":["2023 60th ACM\/IEEE Design Automation Conference (DAC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10247654\/10247655\/10247993.pdf?arnumber=10247993","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,2]],"date-time":"2023-10-02T17:41:30Z","timestamp":1696268490000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10247993\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,7,9]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/dac56929.2023.10247993","relation":{},"subject":[],"published":{"date-parts":[[2023,7,9]]}}}