{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T18:27:47Z","timestamp":1772908067593,"version":"3.50.1"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100001863","name":"New Energy and Industrial Technology Development Organization","doi-asserted-by":"publisher","award":["JPNP20006"],"award-info":[{"award-number":["JPNP20006"]}],"id":[{"id":"10.13039\/501100001863","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2023.3346952","type":"journal-article","created":{"date-parts":[[2023,12,25]],"date-time":"2023-12-25T20:13:53Z","timestamp":1703535233000},"page":"23-33","source":"Crossref","is-referenced-by-count":12,"title":["BERT-NAR-BERT: A Non-Autoregressive Pre-Trained Sequence-to-Sequence Model Leveraging BERT Checkpoints"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5540-7834","authenticated-orcid":false,"given":"Mohammad Golam","family":"Sohrab","sequence":"first","affiliation":[{"name":"Artificial Intelligence Research Center (AIRC), National Institute of Advanced Industrial Science and Technology, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7117-5962","authenticated-orcid":false,"given":"Masaki","family":"Asada","sequence":"additional","affiliation":[{"name":"Artificial Intelligence Research Center (AIRC), National Institute of Advanced Industrial Science and Technology, Tokyo, Japan"}]},{"given":"Mat\u012bss","family":"Rikters","sequence":"additional","affiliation":[{"name":"Artificial Intelligence Research Center (AIRC), National Institute of Advanced Industrial Science and Technology, Tokyo, Japan"}]},{"given":"Makoto","family":"Miwa","sequence":"additional","affiliation":[{"name":"Artificial Intelligence Research Center (AIRC), National Institute of Advanced Industrial Science and Technology, Tokyo, Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"issue":"1","key":"ref3","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.378"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00313"},{"key":"ref6","first-page":"1","article-title":"Non-autoregressive neural machine translation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Gu"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1149"},{"key":"ref8","first-page":"8630","article-title":"BANG: Bridging autoregressive and non-autoregressive generation with large scale pretraining","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Qi"},{"key":"ref9","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Hum. Lang. Technol.","volume":"1","author":"Devlin"},{"issue":"8","key":"ref10","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref11","first-page":"1","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Vaswani"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.bionlp-1.34"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6413"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1336"},{"key":"ref15","first-page":"5753","article-title":"XLNet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Yang"},{"key":"ref16","first-page":"7059","article-title":"Cross-lingual language model pretraining","volume-title":"Proc. Neural Inf. Process. Syst.","volume":"32","author":"Conneau"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.11"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.68"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.233"},{"key":"ref20","volume-title":"Wikimedia Downloads","year":"2023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/w18-5446"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1206"},{"key":"ref23","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Text Summarization Branches Out","author":"Lin","year":"2004"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1264"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1123"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-6319"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref29","first-page":"1","article-title":"Distilling the knowledge in a neural network","volume-title":"Proc. NIPS Deep Learn. Workshop","author":"Hinton"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1139"},{"key":"ref31","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018","journal-title":"OpenAI Blog"},{"key":"ref32","first-page":"5926","article-title":"MASS: Masked sequence to sequence pre-training for language generation","volume-title":"Proc. ICML","volume":"97","author":"Song"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.217"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1633"},{"key":"ref35","article-title":"Levenshtein transformer","volume-title":"Advances in Neural Information Processing Systems","volume":"32","author":"Gu","year":"2019"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10380310\/10373869.pdf?arnumber=10373869","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T00:28:54Z","timestamp":1705105734000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10373869\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3346952","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}