{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T16:32:28Z","timestamp":1725726748633},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,1,9]]},"DOI":"10.1109\/slt54892.2023.10022565","type":"proceedings-article","created":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T18:54:03Z","timestamp":1674845643000},"page":"671-676","source":"Crossref","is-referenced-by-count":1,"title":["Efficient Text Analysis with Pre-Trained Neural Network Models"],"prefix":"10.1109","author":[{"given":"Jia","family":"Cui","sequence":"first","affiliation":[{"name":"Tencent AI Lab,Seattle"}]},{"given":"Heng","family":"Lu","sequence":"additional","affiliation":[{"name":"Tencent AI Lab,Seattle"}]},{"given":"Wenjie","family":"Wang","sequence":"additional","affiliation":[{"name":"Emory University"}]},{"given":"Shiyin","family":"Kang","sequence":"additional","affiliation":[{"name":"Tencent AI Lab,Seattle"}]},{"given":"Liqiang","family":"He","sequence":"additional","affiliation":[{"name":"Tencent AI Lab,Seattle"}]},{"given":"Guangzhi","family":"Li","sequence":"additional","affiliation":[{"name":"Tencent AI Lab,Seattle"}]},{"given":"Dong","family":"Yu","sequence":"additional","affiliation":[{"name":"Tencent AI Lab,Seattle"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Bert: Pre-training of deep bidirectional transformers for language under-standing","author":"Devlin","year":"2018","journal-title":"arXiv"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.454"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.705"},{"key":"ref4","article-title":"What do you learn from context? probing for sentence structure in contextualized word representations","author":"Tenney","year":"2019","journal-title":"ICLR"},{"key":"ref5","article-title":"Visualizing and under-standing the effectiveness of bert","author":"Hao","year":"2019","journal-title":"EMNLP"},{"key":"ref6","article-title":"Green ai","author":"Schwartz","year":"2019","journal-title":"ArXiv, abs\/1907. 10597"},{"key":"ref7","article-title":"Small and practical bert models for sequence la-beling","author":"Tsai","year":"2019","journal-title":"EMNLP"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.372"},{"key":"ref9","article-title":"Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter","author":"Sanh","year":"2020","journal-title":"arXiv: 1910. 01108"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.242"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.16"},{"key":"ref12","article-title":"Text normalization as a special case of machine translation","author":"Filip","year":"2006","journal-title":"IM-CSIT"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1389"},{"key":"ref14","article-title":"From text to speech","author":"Allen","year":"1987","journal-title":"The MITalk system, Cambridge Studies in Speech Science and Communication"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1017\/S1351324997001654"},{"key":"ref16","article-title":"A unified tagging approach to text normalization","author":"Zhu","year":"2007","journal-title":"ACL"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-176"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-35"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s10772-018-9521-x"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-2024"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054560"},{"key":"ref22","article-title":"Transformer-based models of text normalization for speech applications","author":"Ro","year":"2022","journal-title":"CoRR"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2010-518"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00114"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1162\/coli_a_00349"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/ICSLP.1996-432"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.2007.367010"},{"key":"ref28","article-title":"Disambiguating effectively chinese polyphonic am-biguity based on unify approach","author":"Huang","year":"2008","journal-title":"ICML"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2292"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053390"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2015-134"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2004-466"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/11540007_74"},{"key":"ref34","article-title":"Unified mandarin tts front-end based on distilled bert model","author":"Zhang","year":"2020","journal-title":"arXiv"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1998.675358"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CIS52066.2020.00009"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CogInfoCom.2017.8268246"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP.2016.7918492"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCSNT.2013.6967317"}],"event":{"name":"2022 IEEE Spoken Language Technology Workshop (SLT)","start":{"date-parts":[[2023,1,9]]},"location":"Doha, Qatar","end":{"date-parts":[[2023,1,12]]}},"container-title":["2022 IEEE Spoken Language Technology Workshop (SLT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10022052\/10022330\/10022565.pdf?arnumber=10022565","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T08:38:58Z","timestamp":1707813538000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10022565\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,9]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/slt54892.2023.10022565","relation":{},"subject":[],"published":{"date-parts":[[2023,1,9]]}}}