{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T10:32:50Z","timestamp":1769509970284,"version":"3.49.0"},"reference-count":60,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T00:00:00Z","timestamp":1632700800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T00:00:00Z","timestamp":1632700800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Front. Comput. Sci."],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1007\/s11704-020-0133-7","type":"journal-article","created":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T15:03:11Z","timestamp":1632754991000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["Speech-driven facial animation with spectral gathering and temporal attention"],"prefix":"10.1007","volume":"16","author":[{"given":"Yujin","family":"Chai","sequence":"first","affiliation":[]},{"given":"Yanlin","family":"Weng","sequence":"additional","affiliation":[]},{"given":"Lvdi","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Kun","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,27]]},"reference":[{"issue":"4","key":"133_CR1","first-page":"1","volume":"33","author":"C Cao","year":"2014","unstructured":"Cao C, Hou Q, Zhou K. Displaced dynamic expression regression for real-time facial tracking and animation. ACM Transactions on Graphics, 2014, 33(4): 1\u201310","journal-title":"ACM Transactions on Graphics"},{"key":"133_CR2","doi-asserted-by":"crossref","unstructured":"Nagano K, Saito S, Goldwhite L, San K, Hong A, Hu L, Wei L, Xing J, Xu Q, Kung H W, Kuang J, Agarwal A, Castellanos E, Seo J, Fursund J, Li H. Pinscreen avatars in your pocket: mobile pagan engine and personalized gaming. In: Proceedings of SIGGRAPH Asia 2018 RealTime Live!. 2018, 1\u20131","DOI":"10.1145\/3289160.3289162"},{"issue":"4","key":"133_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2897824.2925984","volume":"35","author":"P Edwards","year":"2016","unstructured":"Edwards P, Landreth C, Fiume E, Singh K. JALI: an animator-centric viseme model for expressive lip synchronization. ACM Transactions on Graphics, 2016, 35(4): 1\u201311","journal-title":"ACM Transactions on Graphics"},{"issue":"4","key":"133_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073658","volume":"36","author":"T Karras","year":"2017","unstructured":"Karras T, Aila T, Laine S, Herva A, Lehtinen J. Audio-driven facial animation by joint end-to-end learning of pose and emotion. ACM Transactions on Graphics, 2017, 36(4): 1\u201312","journal-title":"ACM Transactions on Graphics"},{"key":"133_CR5","doi-asserted-by":"crossref","unstructured":"Pham H X, Wang Y, Pavlovic V. End-to-end learning for 3d facial animation from speech. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction. 2018, 361\u2013365","DOI":"10.1145\/3242969.3243017"},{"key":"133_CR6","doi-asserted-by":"crossref","unstructured":"Cudeiro D, Bolkart T, Laidlaw C, Ranjan A, Black M J. Capture, learning, and synthesis of 3d speaking styles. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. 2019, 10101\u201310111","DOI":"10.1109\/CVPR.2019.01034"},{"key":"133_CR7","doi-asserted-by":"crossref","unstructured":"Hati Y, Rousseaux F, Duhart C. Text-driven mouth animation for human computer interaction with personal assistant. In: Proceedings of the 25th International Conference on Auditory Display. 2019, 75\u201382","DOI":"10.21785\/icad2019.032"},{"key":"133_CR8","volume-title":"Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics, and Speech Recognition","author":"D Jurafsky","year":"2009","unstructured":"Jurafsky D, Martin J H. Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics, and Speech Recognition (2nd Edition). Upper Saddle River, New Jersey: Pearson Prentice Hall, 2009","edition":"2nd Edition"},{"issue":"4","key":"133_CR9","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073640","volume":"36","author":"S Suwajanakorn","year":"2017","unstructured":"Suwajanakorn S, Seitz S M, Kemelmacher-Shlizerman I. Synthesizing obama: learning lip sync from audio. ACM Transactions on Graphics, 2017, 36(4): 1\u201313","journal-title":"ACM Transactions on Graphics"},{"issue":"4","key":"133_CR10","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073699","volume":"36","author":"S Taylor","year":"2017","unstructured":"Taylor S, Kim T, Yue Y, Mahler M, Krahe J, Rodriguez A G, Hodgins J, Matthews I. A deep learning approach for generalized speech animation. ACM Transactions on Graphics, 2017, 36(4): 1\u201311","journal-title":"ACM Transactions on Graphics"},{"key":"133_CR11","doi-asserted-by":"crossref","unstructured":"Hussen Abdelaziz A, Theobald B J, Binder J, Fanelli G, Dixon P, Apostoloff N, Weise T, Kajareker S. Speaker-independent speech-driven visual speech synthesis using domain-adapted acoustic models. In: Proceedings of the 2019 International Conference on Multimodal Interaction. 2019, 220\u2013225","DOI":"10.1145\/3340555.3353745"},{"issue":"8","key":"133_CR12","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter S, Schmidhuber J. Long short-term memory. Neural Computation, 1997, 9(8): 1735\u20131780","journal-title":"Neural Computation"},{"key":"133_CR13","unstructured":"Hannun A, Case C, Casper J, Catanzaro B, Diamos G, Elsen E, Prenger R, Satheesh S, Sengupta S, Coates A, et al. Deep speech: scaling up end-to-end speech recognition. arXiv preprint arXiv: 14125567, 2014"},{"key":"133_CR14","doi-asserted-by":"crossref","unstructured":"Pham H X, Cheung S, Pavlovic V. Speech-driven 3d facial animation with implicit emotional awareness: a deep learning approach. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops. 2017, 2328\u20132336","DOI":"10.1109\/CVPRW.2017.287"},{"key":"133_CR15","doi-asserted-by":"crossref","unstructured":"Tian G, Yuan Y, Liu Y. Audio2face: generating speech\/face animation from single audio with attention-based bidirectional lstm networks. In: Proceedings of 2019 IEEE International Conference on Multimedia and Expo Workshops. 2019, 366\u2013371","DOI":"10.1109\/ICMEW.2019.00069"},{"key":"133_CR16","doi-asserted-by":"crossref","unstructured":"Tzirakis P, Papaioannou A, Lattas A, Tarasiou M, Schuller B, Zafeiriou S. Synthesising 3d facial motion from \u201cin-the-wild\u201d speech. arXiv preprint arXiv: 190407002, 2019","DOI":"10.1109\/FG47880.2020.00100"},{"key":"133_CR17","doi-asserted-by":"crossref","unstructured":"Nishimura R, Sakata N, Tominaga T, Hijikata Y, Harada K, Kiyokawa K. Speech-driven facial animation by lstm-rnn for communication use. In: Proceedings of 2019 IEEE Conference on Virtual Reality and 3D User Interfaces. 2019, 1102\u20131103","DOI":"10.1109\/VR.2019.8798145"},{"issue":"3","key":"133_CR18","doi-asserted-by":"publisher","first-page":"399","DOI":"10.1145\/1015706.1015736","volume":"23","author":"R W Sumner","year":"2004","unstructured":"Sumner R W, Popovi\u0107 J. Deformation transfer for triangle meshes. ACM Transactions on Graphics, 2004, 23(3): 399\u2013405","journal-title":"ACM Transactions on Graphics"},{"key":"133_CR19","doi-asserted-by":"crossref","unstructured":"Wu Q, Zhang J, Lai Y K, Zheng J, Cai J. Alive caricature from 2d to 3d. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. 2018, 7336\u20137345","DOI":"10.1109\/CVPR.2018.00766"},{"key":"133_CR20","unstructured":"Gao L, Lai Y, Yang J, Zhang L X, Kobbelt L, Xia S. Sparse data driven mesh deformation. IEEE Transactions on Visualization and Computer Graphics, 2019"},{"key":"133_CR21","unstructured":"Orvalho V, Bastos P, Parke F I, Oliveira B, Alvarez X. A facial rigging survey. In: Proceedings of Eurographics 2012 \u2014 State of the Art Reports. 2012, 183\u2013204"},{"issue":"2","key":"133_CR22","doi-asserted-by":"publisher","first-page":"115","DOI":"10.1016\/S0095-4470(19)31123-4","volume":"5","author":"R D Kent","year":"1977","unstructured":"Kent R D, Minifie F D. Coarticulation in recent speech production models. Journal of Phonetics, 1977, 5(2): 115\u2013133","journal-title":"Journal of Phonetics"},{"issue":"1","key":"133_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1207\/s15516709cog2001_1","volume":"20","author":"C Pelachaud","year":"1996","unstructured":"Pelachaud C, Badler N I, Steedman M. Generating facial expressions for speech. Cognitive Science, 1996, 20(1): 1\u201346","journal-title":"Cognitive Science"},{"key":"133_CR24","doi-asserted-by":"crossref","unstructured":"Wang A, Emmi M, Faloutsos P. Assembling an expressive facial animation system. In: Proceedings of the 2007 ACM SIGGRAPH Symposium on Video Games. 2007, 21\u201326","DOI":"10.1145\/1274940.1274947"},{"key":"133_CR25","doi-asserted-by":"crossref","unstructured":"Cohen M M, Massaro D W. Modeling coarticulation in synthetic visual speech. In: Proceedings of Models and Techniques in Computer Animation. 1993, 139\u2013156","DOI":"10.1007\/978-4-431-66911-1_13"},{"key":"133_CR26","doi-asserted-by":"crossref","unstructured":"Xu Y, Feng A W, Marsella S, Shapiro A. A practical and configurable lip sync method for games. In: Proceedings of Motion on Games. 2013, 131\u2013140","DOI":"10.1145\/2522628.2522904"},{"key":"133_CR27","doi-asserted-by":"crossref","unstructured":"Bregler C, Covell M, Slaney M. Video rewrite: driving visual speech with audio. In: Proceedings of the 24th Annual Conference on Computer Graphics and Interactive Techniques. 1997, 353\u2013360","DOI":"10.1145\/258734.258880"},{"issue":"3","key":"133_CR28","doi-asserted-by":"publisher","first-page":"388","DOI":"10.1145\/566654.566594","volume":"21","author":"T Ezzat","year":"2002","unstructured":"Ezzat T, Geiger G, Poggio T. Trainable videorealistic speech animation. ACM Transactions on Graphics, 2002, 21(3): 388\u2013398","journal-title":"ACM Transactions on Graphics"},{"key":"133_CR29","unstructured":"Taylor S L, Mahler M, Theobald B J, Matthews I. Dynamic units of visual speech. In: Proceedings of the 11th ACM SIGGRAPH\/Eurographics Conference on Computer Animation. 2012, 275\u2013284"},{"key":"133_CR30","doi-asserted-by":"crossref","unstructured":"Brand M. Voice puppetry. In: Proceedings of the 26th Annual Conference on Computer Graphics and Interactive Techniques. 1999, 21\u201328","DOI":"10.1145\/311535.311537"},{"issue":"3","key":"133_CR31","doi-asserted-by":"publisher","first-page":"500","DOI":"10.1109\/TMM.2006.888009","volume":"9","author":"L Xie","year":"2007","unstructured":"Xie L, Liu Z Q. Realistic mouth-synching for speech-driven talking face using articulatory modelling. IEEE Transactions on Multimedia, 2007, 9(3): 500\u2013510","journal-title":"IEEE Transactions on Multimedia"},{"key":"133_CR32","unstructured":"Wang L, Han W, Soong F K, Huo Q. Text driven 3d photo-realistic talking head. In: Proceedings of Interspeech. 2011, 3307\u20133308"},{"key":"133_CR33","doi-asserted-by":"crossref","unstructured":"Zhang X, Wang L, Li G, Seide F, Soong F K. A new language independent, photo-realistic talking head driven by voice only. In: Proceedings of Interspeech. 2013, 2743\u20132747","DOI":"10.21437\/Interspeech.2013-629"},{"key":"133_CR34","doi-asserted-by":"crossref","unstructured":"Shimba T, Sakurai R, Yamazoe H, Lee J H. Talking heads synthesis from audio with deep neural networks. In: Proceedings of the IEEE\/SICE International Symposium on System Integration. 2015, 100\u2013105","DOI":"10.1109\/SII.2015.7404961"},{"issue":"9","key":"133_CR35","doi-asserted-by":"publisher","first-page":"5287","DOI":"10.1007\/s11042-015-2944-3","volume":"75","author":"B Fan","year":"2016","unstructured":"Fan B, Xie L, Yang S, Wang L, Soong F K. A deep bidirectional lstm approach for video-realistic talking head. Multimedia Tools and Applications, 2016, 75(9): 5287\u20135309","journal-title":"Multimedia Tools and Applications"},{"key":"133_CR36","doi-asserted-by":"crossref","unstructured":"Eskimez S E, Maddox R K, Xu C, Duan Z. Generating talking face landmarks from speech. In: Proceedings of the International Conference on Latent Variable Analysis and Signal Separation. 2018, 372\u2013381","DOI":"10.1007\/978-3-319-93764-9_35"},{"key":"133_CR37","unstructured":"Aneja D, Li W. Real-time lip sync for live 2d animation. arXiv preprint arXiv: 191008685, 2019"},{"key":"133_CR38","doi-asserted-by":"crossref","unstructured":"Greenwood D, Matthews I, Laycock S. Joint learning of facial expression and head pose from speech. In: Proceedings of Interspeech. 2018, 2484\u20132488","DOI":"10.21437\/Interspeech.2018-2587"},{"key":"133_CR39","doi-asserted-by":"crossref","unstructured":"Websdale D, Taylor S, Milner B. The effect of real-time constraints on automatic speech animation. In: Proceedings of Interspeech. 2018, 2479\u20132483","DOI":"10.21437\/Interspeech.2018-2066"},{"issue":"7","key":"133_CR40","doi-asserted-by":"publisher","first-page":"e1003743","DOI":"10.1371\/journal.pcbi.1003743","volume":"10","author":"J L Schwartz","year":"2014","unstructured":"Schwartz J L, Savariaux C. No, there is no 150 ms lead of visual speech on auditory speech, but a range of audiovisual asynchronies varying from small audio lead to large audio lag. PLOS Computational Biology, 2014, 10(7): e1003743","journal-title":"PLOS Computational Biology"},{"key":"133_CR41","doi-asserted-by":"crossref","unstructured":"Shen J, Pang R, Weiss R J, Schuster M, Jaitly N, Yang Z, Chen Z, Zhang Y, Wang Y, Skerrv-Ryan R, et al. Natural tts synthesis by conditioning wavenet on mel spectrogram predictions. In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing. 2018, 4779\u20134783","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"133_CR42","doi-asserted-by":"crossref","unstructured":"Prenger R, Valle R, Catanzaro B. Waveglow: a flow-based generative network for speech synthesis. In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing. 2019, 3617\u20133621","DOI":"10.1109\/ICASSP.2019.8683143"},{"issue":"5","key":"133_CR43","doi-asserted-by":"publisher","first-page":"1398","DOI":"10.1007\/s11263-019-01251-8","volume":"128","author":"K Vougioukas","year":"2020","unstructured":"Vougioukas K, Petridis S, Pantic M. Realistic speech-driven facial animation with gans. International Journal of Computer Vision, 2020, 128(5): 1398\u20131413","journal-title":"International Journal of Computer Vision"},{"key":"133_CR44","doi-asserted-by":"crossref","unstructured":"Chen L, Maddox R K, Duan Z, Xu C. Hierarchical cross-modal talking face generation with dynamic pixel-wise loss. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. 2019, 7832\u20137841","DOI":"10.1109\/CVPR.2019.00802"},{"issue":"10","key":"133_CR45","doi-asserted-by":"publisher","first-page":"1533","DOI":"10.1109\/TASLP.2014.2339736","volume":"22","author":"O Abdel-Hamid","year":"2014","unstructured":"Abdel-Hamid O, Mohamed A R, Jiang H, Deng L, Penn G, Yu D. Convolutional neural networks for speech recognition. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 2014, 22(10): 1533\u20131545","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"133_CR46","doi-asserted-by":"crossref","unstructured":"Sainath T N, Li B. Modeling time-frequency patterns with lstm vs. convolutional architectures for lvcsr tasks. In: Proceedings of Interspeech. 2016, 813\u2013817","DOI":"10.21437\/Interspeech.2016-84"},{"key":"133_CR47","doi-asserted-by":"crossref","unstructured":"Liu Y, Wang D. Time and frequency domain long short-term memory for noise robust pitch tracking. In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing. 2017, 5600\u20135604","DOI":"10.1109\/ICASSP.2017.7953228"},{"issue":"8","key":"133_CR48","doi-asserted-by":"publisher","first-page":"2151","DOI":"10.1162\/NECO_a_00312","volume":"24","author":"M Denil","year":"2012","unstructured":"Denil M, Bazzani L, Larochelle H, de Freitas N. Learning where to attend with deep architectures for image tracking. Neural Computation, 2012, 24(8): 2151\u20132184","journal-title":"Neural Computation"},{"key":"133_CR49","unstructured":"Bahdanau D, Cho K, Bengio Y. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv: 14090473, 2014"},{"key":"133_CR50","unstructured":"Paszke A, Gross S, Chintala S, Chanan G, Yang E, DeVito Z, Lin Z, Desmaison A, Antiga L, Lerer A. Automatic differentiation in pytorch. In: Proceedings of Neural Information Processing Systems 2017 Workshop on Autodiff. 2017"},{"key":"133_CR51","unstructured":"Kingma D P, Ba J. Adam: a method for stochastic optimization. arXiv preprint arXiv: 14126980, 2014"},{"key":"133_CR52","volume-title":"Instructor\u2019s Guide","author":"P Ekman","year":"2002","unstructured":"Ekman P, Friesen W V, Hager J C. Facial Action Coding System: The Manual on CD-ROM. Instructor\u2019s Guide. Salt Lake City: Network Information Research Co., 2002"},{"issue":"2","key":"133_CR53","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1109\/MRA.2012.2192811","volume":"19","author":"M Mori","year":"2012","unstructured":"Mori M, MacDorman K F, Kageki N. The uncanny valley. IEEE Robotics and Automation Magazine, 2012, 19(2): 98\u2013100","journal-title":"IEEE Robotics and Automation Magazine"},{"key":"133_CR54","doi-asserted-by":"crossref","unstructured":"Kim C, Shin H V, Oh T H, Kaspar A, Elgharib M, Matusik W. On learning associations of faces and voices. In: Proceedings of the Asian Conference on Computer Vision. 2018, 276\u2013292","DOI":"10.1007\/978-3-030-20873-8_18"},{"key":"133_CR55","doi-asserted-by":"crossref","unstructured":"Vielzeuf V, Kervadec C, Pateux S, Lechervy A, Jurie F. An occam\u2019s razor view on learning audiovisual emotion recognition with small training sets. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction. 2018, 589\u2013593","DOI":"10.1145\/3242969.3264980"},{"issue":"5","key":"133_CR56","doi-asserted-by":"publisher","first-page":"975","DOI":"10.1007\/s00138-018-0960-9","volume":"30","author":"E Avots","year":"2019","unstructured":"Avots E, Sapi\u0144ski T, Bachmann M, Kami\u0144ska D. Audiovisual emotion recognition in wild. Machine Vision and Applications, 2019, 30(5): 975\u2013985","journal-title":"Machine Vision and Applications"},{"key":"133_CR57","doi-asserted-by":"crossref","unstructured":"Oh T H, Dekel T, Kim C, Mosseri I, Freeman W T, Rubinstein M, Matusik W. Speech2face: learning the face behind a voice. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. 2019, 7539\u20137548","DOI":"10.1109\/CVPR.2019.00772"},{"key":"133_CR58","doi-asserted-by":"crossref","unstructured":"Wang R, Liu X, Cheung Y m, Cheng K, Wang N, Fan W. Learning discriminative joint embeddings for efficient face and voice association. In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 2020, 1881\u20131884","DOI":"10.1145\/3397271.3401302"},{"key":"133_CR59","unstructured":"Zhu H, Luo M, Wang R, Zheng A, He R. Deep audio-visual learning: a survey. arXiv preprint arXiv: 200104758, 2020"},{"key":"133_CR60","doi-asserted-by":"crossref","unstructured":"Ginosar S, Bar A, Kohavi G, Chan C, Owens A, Malik J. Learning individual styles of conversational gesture. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. 2019, 3497\u20133506","DOI":"10.1109\/CVPR.2019.00361"}],"container-title":["Frontiers of Computer Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11704-020-0133-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11704-020-0133-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11704-020-0133-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,20]],"date-time":"2023-07-20T21:04:13Z","timestamp":1689887053000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11704-020-0133-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,27]]},"references-count":60,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2022,6]]}},"alternative-id":["133"],"URL":"https:\/\/doi.org\/10.1007\/s11704-020-0133-7","relation":{},"ISSN":["2095-2228","2095-2236"],"issn-type":[{"value":"2095-2228","type":"print"},{"value":"2095-2236","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,9,27]]},"assertion":[{"value":"31 March 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 September 2020","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 September 2021","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"163703"}}