{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T07:53:22Z","timestamp":1768809202011,"version":"3.49.0"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/access.2023.3248796","type":"journal-article","created":{"date-parts":[[2023,2,24]],"date-time":"2023-02-24T18:44:42Z","timestamp":1677264282000},"page":"19849-19862","source":"Crossref","is-referenced-by-count":6,"title":["Performance Improvement on Traditional Chinese Task-Oriented Dialogue Systems With Reinforcement Learning and Regularized Dropout Technique"],"prefix":"10.1109","volume":"11","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5534-0120","authenticated-orcid":false,"given":"Jeng-Shin","family":"Sheu","sequence":"first","affiliation":[{"name":"Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Yunlin, Taiwan"}]},{"given":"Siang-Ru","family":"Wu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Information Engineering, National Yunlin University of Science and Technology, Yunlin, Taiwan"}]},{"given":"Wen-Hung","family":"Wu","sequence":"additional","affiliation":[{"name":"Ponddy Education Taiwan Ltd., New Taipei City, Taiwan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1017\/cbo9780511809071"},{"key":"ref2","volume-title":"Natural Language Processing (NLP): What Is It & How Does it Work?","year":"2023"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijinfomgt.2022.102568"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.02.001"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2010-343"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref7","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"arXiv:1706.03762"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2019.105210"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwx110"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s11431-020-1647-3"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3212767"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688232"},{"key":"ref15","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref16","first-page":"1","article-title":"Character-level convolutional networks for text classification","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Zhang"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1670"},{"key":"ref18","article-title":"Improving neural machine translation models with monolingual data","author":"Sennrich","year":"2015","journal-title":"arXiv:1511.06709"},{"issue":"1","key":"ref19","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"Srivastava","year":"2014","journal-title":"J. Mach. Learn. Res."},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2012.2225812"},{"key":"ref21","article-title":"Exposure bias versus self-recovery: Are distortions really incremental for autoregressive text generation?","author":"He","year":"2019","journal-title":"arXiv:1905.10617"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.179"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.ress.2022.108908"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.3390\/a15110393"},{"key":"ref25","article-title":"A survey on reinforcement learning security with application to autonomous driving","author":"Demontis","year":"2022","journal-title":"arXiv:2212.06123"},{"key":"ref26","first-page":"10890","article-title":"R-Drop: Regularized dropout for neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Liang"},{"key":"ref27","article-title":"MultiWOZ 2.1: A consolidated multi-domain dialogue dataset with state corrections and state tracking baselines","author":"Eric","year":"2019","journal-title":"arXiv:1907.01669"},{"key":"ref28","volume-title":"Multi-Domain Dialogue State Tracking on MULTIWOZ 2.1","year":"2021"},{"key":"ref29","first-page":"20179","article-title":"A simple language model for task-oriented dialogue","volume":"33","author":"Hosseini-Asl","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref30","article-title":"Learning end-to-end goal-oriented dialog","author":"Bordes","year":"2016","journal-title":"arXiv:1605.07683"},{"key":"ref31","article-title":"A network-based end-to-end trainable task-oriented dialogue system","author":"Wen","year":"2016","journal-title":"arXiv:1604.04562"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W17-5506"},{"key":"ref33","article-title":"Global-to-local memory pointer networks for task-oriented dialogue","author":"Wu","year":"2019","journal-title":"arXiv:1901.04713"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1133"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6349"},{"key":"ref36","volume-title":"Fundamentals of Machine Learning for Predictive Data Analytics: Algorithms, Worked Examples, and Case Studies","author":"Kelleher","year":"2020"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6242"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/BigMM.2018.8499179"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1039"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2020.3004555"},{"key":"ref42","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10005208\/10052671.pdf?arnumber=10052671","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,2]],"date-time":"2024-03-02T22:03:32Z","timestamp":1709417012000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10052671\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3248796","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}