{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T17:53:50Z","timestamp":1775066030172,"version":"3.50.1"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,5]],"date-time":"2021-12-05T00:00:00Z","timestamp":1638662400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,5]]},"DOI":"10.1109\/dac18074.2021.9586134","type":"proceedings-article","created":{"date-parts":[[2021,11,8]],"date-time":"2021-11-08T23:30:34Z","timestamp":1636414234000},"page":"469-474","source":"Crossref","is-referenced-by-count":125,"title":["Softermax: Hardware\/Software Co-Design of an Efficient Softmax for Transformers"],"prefix":"10.1109","author":[{"given":"Jacob R.","family":"Stevens","sequence":"first","affiliation":[]},{"given":"Rangharajan","family":"Venkatesan","sequence":"additional","affiliation":[]},{"given":"Steve","family":"Dai","sequence":"additional","affiliation":[]},{"given":"Brucek","family":"Khailany","sequence":"additional","affiliation":[]},{"given":"Anand","family":"Raghunathan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Q8BERT: Quantized 8bit BERT","author":"zafrir","year":"2019","journal-title":"arXiv preprint arXiv 1910 01500"},{"key":"ref11","article-title":"Fully quantized transformer for machine translation","author":"prato","year":"2020","journal-title":"Proc of EMNLP"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/520"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS45731.2020.9180870"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3299874.3317988"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA47549.2020.00035"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TCSII.2020.3002564"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCAD45719.2019.8942127"},{"key":"ref18","article-title":"Online normalizer calculation for softmax","author":"milakov","year":"2018","journal-title":"arXiv preprint arXiv 1805 02867"},{"key":"ref19","year":"2017","journal-title":"Nvidia tesla v100 gpu architecture"},{"key":"ref4","article-title":"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism","author":"shoeybi","year":"2019","journal-title":"arXiv preprint arXiv 1909 08072"},{"key":"ref3","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"Proc of NAACL-HLT"},{"key":"ref6","article-title":"Language models are few-shot learners","author":"brown","year":"2020","journal-title":"arXiv preprint arXiv 2005 14354"},{"key":"ref5","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1133"},{"key":"ref7","article-title":"DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter","author":"sanh","year":"2019","journal-title":"arXiv preprint arXiv 1910 01108"},{"key":"ref2","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2020","journal-title":"arXiv preprint arXiv 2010 10042"},{"key":"ref1","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc of NeurIPS"},{"key":"ref9","article-title":"Optimizing Transformers with Approximate Computing for Faster, Smaller and more Accurate NLP Models","author":"nagarajan","year":"2020","journal-title":"arXiv preprint arXiv 2010 00170"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"ref22","article-title":"Integer quantization for deep learning inference: Principles and empirical evaluation","author":"wu","year":"2020","journal-title":"arXiv preprint arXiv 2004 06774"},{"key":"ref21","article-title":"Huggingface&#x2019;s transformers: State-of-the-art natural language processing","author":"wolf","year":"2019","journal-title":"arXiv pp arXiv&#x2013;"},{"key":"ref23","year":"0"}],"event":{"name":"2021 58th ACM\/IEEE Design Automation Conference (DAC)","location":"San Francisco, CA, USA","start":{"date-parts":[[2021,12,5]]},"end":{"date-parts":[[2021,12,9]]}},"container-title":["2021 58th ACM\/IEEE Design Automation Conference (DAC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9585997\/9586083\/09586134.pdf?arnumber=9586134","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T16:55:51Z","timestamp":1652201751000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9586134\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,5]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/dac18074.2021.9586134","relation":{},"subject":[],"published":{"date-parts":[[2021,12,5]]}}}