{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T15:34:44Z","timestamp":1772206484547,"version":"3.50.1"},"reference-count":40,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Emerg. Topics Comput."],"published-print":{"date-parts":[[2022,7,1]]},"DOI":"10.1109\/tetc.2022.3187770","type":"journal-article","created":{"date-parts":[[2022,7,8]],"date-time":"2022-07-08T19:28:23Z","timestamp":1657308503000},"page":"1302-1314","source":"Crossref","is-referenced-by-count":13,"title":["A BF16 FMA is All You Need for DNN Training"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8592-0716","authenticated-orcid":false,"given":"John","family":"Osorio","sequence":"first","affiliation":[{"name":"Department of Computer Science, Barcelona Supercomputing Center, Barcelona, Spain"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2869-668X","authenticated-orcid":false,"given":"Adria","family":"Armejach","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Barcelona Supercomputing Center, Barcelona, Spain"}]},{"given":"Eric","family":"Petit","sequence":"additional","affiliation":[{"name":"Intel Corporation, Portland, OR, USA"}]},{"given":"Greg","family":"Henry","sequence":"additional","affiliation":[{"name":"Intel Corporation, Portland, OR, USA"}]},{"given":"Marc","family":"Casas","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Barcelona Supercomputing Center, Barcelona, Spain"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TENSYMP52854.2021.9550912"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ARITH.2019.00019"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/92.845894"},{"key":"ref4","first-page":"7686","article-title":"Training deep neural networks with 8-bit floating point numbers","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref5","article-title":"Improve tensor core operations","year":"2021"},{"key":"ref6","article-title":"A study of BFLOAT16 for deep learning training","author":"Kalamkar","year":"2019"},{"key":"ref7","first-page":"1","article-title":"Mixed precision training","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Micikevicius"},{"key":"ref8","first-page":"1796","article-title":"Ultra-Low precision 4-bit training of deep neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sun"},{"key":"ref9","first-page":"12 127","article-title":"FracTrain: Fractionally squeezing bit savings both temporally and spatially for efficient DNN training","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Fu"},{"key":"ref10","article-title":"{CPT}: Efficient deep neural network training via cyclic precision","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Fu"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/1064978.1065034"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref14","article-title":"Recurrent neural network regularization","author":"Zaremba","year":"2014"},{"key":"ref15","first-page":"1","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vaswani"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W16-3210"},{"key":"ref17","article-title":"Neural machine translation by jointly learning to align and translate","author":"Trevett","year":"2020"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3331184.3331267"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/2827872"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/2063384.2063454"},{"key":"ref21","article-title":"Nvidia tensor cores"},{"key":"ref22","article-title":"Bfloat16: The secret to high performance on cloud TPUs","author":"Wang","year":"2019"},{"key":"ref23","article-title":"Intel architecture instruction set extensions and future features programming reference","year":"2020"},{"key":"ref24","article-title":"Nvidia a100 tensor core GPU architecture","year":"2020"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-29400-7_34"},{"key":"ref26","article-title":"Intel math kernel library","year":"2020"},{"key":"ref27","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref28","article-title":"Replication of recurrent neural network regularization by zaremba","author":"Durmus","year":"2019"},{"key":"ref29","article-title":"Original pytorch transformer model","author":"Gordic","year":"2020"},{"key":"ref30","article-title":"Pytorch seq2seq","author":"Ben","year":"2018"},{"key":"ref31","article-title":"Neural graph collaborative filtering algorithm in pytorch"},{"key":"ref32","article-title":"Pytorch","author":"Paszke","year":"2020"},{"key":"ref33","article-title":"Intel deep neural network library"},{"key":"ref34","article-title":"Intel architecture instruction set extensions programming reference","year":"2020"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2013.6707022"},{"key":"ref36","article-title":"AdaptivFloat: A floating-point based data type for resilient deep learning inference","volume":"arXiv","author":"Tambe","year":"2019"},{"key":"ref37","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020"},{"key":"ref38","article-title":"Introducing 2nd generation IPU systems for AI at scale","author":"Toon","year":"2020"},{"key":"ref39","article-title":"Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"Zhou","year":"2016"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.12.027"}],"container-title":["IEEE Transactions on Emerging Topics in Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6245516\/9874959\/09823406.pdf?arnumber=9823406","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T04:42:07Z","timestamp":1706762527000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9823406\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,1]]},"references-count":40,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tetc.2022.3187770","relation":{},"ISSN":["2168-6750","2376-4562"],"issn-type":[{"value":"2168-6750","type":"electronic"},{"value":"2376-4562","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,7,1]]}}}