{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,7]],"date-time":"2025-05-07T04:14:04Z","timestamp":1746591244580,"version":"3.40.5"},"reference-count":30,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T00:00:00Z","timestamp":1746489600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T00:00:00Z","timestamp":1746489600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["23010501500"],"award-info":[{"award-number":["23010501500"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Discov Computing"],"DOI":"10.1007\/s10791-025-09582-6","type":"journal-article","created":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T07:31:05Z","timestamp":1746516665000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Few-shot controlled dialogue generation using in-context learning"],"prefix":"10.1007","volume":"28","author":[{"given":"Zhongqin","family":"Bi","sequence":"first","affiliation":[]},{"given":"Xueni","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Weina","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiaoyu","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,6]]},"reference":[{"key":"9582_CR1","unstructured":"Vinyals O, Le QV. A neural conversational model. CoRR. 2015. arXiv:1506.05869."},{"key":"9582_CR2","doi-asserted-by":"crossref","unstructured":"Qin L, Pan W, Chen Q, Liao L, Yu Z, Zhang Y, Che W, Li M. End-to-end task-oriented dialogue: a survey of tasks, methods, and future directions. 2023.","DOI":"10.18653\/v1\/2023.emnlp-main.363"},{"key":"9582_CR3","doi-asserted-by":"crossref","unstructured":"Zhang Z, Takanobu R, Zhu Q, Huang M, Zhu X. Recent advances and challenges in task-oriented dialog system. arXiv: Computation and Language, arXiv: Computation and Language. 2020.","DOI":"10.1007\/s11431-020-1692-3"},{"key":"9582_CR4","doi-asserted-by":"crossref","unstructured":"Li Z, Chen W, Li S, Wang H, Qian J, Yan X. Controllable dialogue simulation with in-context learning. 2022.","DOI":"10.18653\/v1\/2022.findings-emnlp.318"},{"key":"9582_CR5","unstructured":"Brown TB, Mann B, Ryder N, Subbiah M, Kaplan J, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Amanda A, Agarwal S, Herbert-Voss A, Krueger G, Tom H, Child R, Ramesh A, Ziegler D, Wu J, Winter C, Hesse C, Chen M, Sigler E, Litwin M, Gray S, Benjamin C, Clark J, Berner C, Sam M, Radford A, Sutskever I, Amodei D. Language models are few-shot learners. arXiv: Computation and Language, arXiv: Computation and Language. 2020."},{"key":"9582_CR6","doi-asserted-by":"crossref","unstructured":"Shin R, Lin C, Thomson S, Chen C, Roy S, Platanios E, Pauls A, Klein D, Eisner J, Durme B. Constrained language models yield few-shot semantic parsers. Cornell University - arXiv: Cornell University - arXiv 2021.","DOI":"10.18653\/v1\/2021.emnlp-main.608"},{"key":"9582_CR7","unstructured":"Rubin O, Herzig J, Berant J. Learning to retrieve prompts for in-context learning."},{"issue":"2","key":"9582_CR8","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1145\/3166054.3166058","volume":"19","author":"H Chen","year":"2017","unstructured":"Chen H, Liu X, Yin D, Tang J. A survey on dialogue systems: recent advances and new frontiers. ACM SIGKDD Explorations Newsl. 2017;19(2):25\u201335. https:\/\/doi.org\/10.1145\/3166054.3166058.","journal-title":"ACM SIGKDD Explorations Newsl"},{"key":"9582_CR9","doi-asserted-by":"publisher","unstructured":"Wu C-S, Hoi SCH, Socher R, Xiong C. Tod-bert: Pre-trained natural language understanding for task-oriented dialogue. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP) 2020. https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.66.","DOI":"10.18653\/v1\/2020.emnlp-main.66"},{"key":"9582_CR10","doi-asserted-by":"publisher","unstructured":"Budzianowski P, Wen T-H, Tseng B-H, Casanueva I, Ultes S, Ramadan O, Ga\u0161i\u0107 M. Multiwoz\u2014a large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling. In: Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing 2018. https:\/\/doi.org\/10.18653\/v1\/d18-1547.","DOI":"10.18653\/v1\/d18-1547"},{"key":"9582_CR11","doi-asserted-by":"publisher","first-page":"119","DOI":"10.5898\/JHRI.1.1.Riek","volume":"1","author":"LD Riek","year":"2012","unstructured":"Riek LD. Wizard of oz studies in HRI. J Human-Robot Interaction. 2012;1:119\u201336.","journal-title":"J Human-Robot Interaction"},{"key":"9582_CR12","unstructured":"Shin J, Yu H, Moon H, Madotto A, Park J. Dialogue summaries as dialogue states (ds2), template-guided summarization for few-shot dialogue state tracking."},{"key":"9582_CR13","unstructured":"Hu Y, Lee C-H, Xie T, Tao, Smith N, Ostendorf M. In-context learning for few-shot dialogue state tracking."},{"key":"9582_CR14","doi-asserted-by":"crossref","unstructured":"Tseng B-H, Dai Y, Kreyssig F, Byrne B. Transferable dialogue systems and user simulators. 2021.","DOI":"10.18653\/v1\/2021.acl-long.13"},{"key":"9582_CR15","unstructured":"Terragni S, Filipavicius M, Khau N, Guedes B, Manso A, Mathis R. In-context learning user simulators for task-oriented dialog systems. 2023."},{"key":"9582_CR16","doi-asserted-by":"publisher","unstructured":"Kim S, Chang M, Lee S-W. Neuralwoz: learning to collect task-oriented dialogue via model-based simulation. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). 2021. https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.287.","DOI":"10.18653\/v1\/2021.acl-long.287"},{"key":"9582_CR17","unstructured":"Dong Q, Li L, Dai D, Zheng C, Wu Z, Chang B, Sun X, Xu J, Li L, Sui Z. A survey for in-context learning. 2022."},{"key":"9582_CR18","doi-asserted-by":"publisher","unstructured":"Lee C-H, Cheng H, Ostendorf M. Dialogue state tracking with a language model using schema-driven prompting. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing; 2021. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.404.","DOI":"10.18653\/v1\/2021.emnlp-main.404"},{"key":"9582_CR19","doi-asserted-by":"crossref","unstructured":"Lai C-M, Hsu M-H, Huang C-W, Chen Y-N. Controllable user dialogue act augmentation for dialogue state tracking. 2022.","DOI":"10.18653\/v1\/2022.sigdial-1.5"},{"key":"9582_CR20","unstructured":"Poesia G, Polozov O, Le V, Tiwari A, Soares G, Meek C, Gulwani S. Synchromesh: Reliable code generation from pre-trained language models."},{"key":"9582_CR21","unstructured":"Liu C, Chen X, Shin E, Chen M, Song D. Latent attention for if-then program synthesis. Neural Inf Proc Syst. 2016."},{"key":"9582_CR22","doi-asserted-by":"publisher","unstructured":"Pasupat P, Zhang Y, Guu K. Controllable semantic parsing via retrieval augmentation. In: Moens, M.-F., Huang, X., Specia, L., Yih, S.W.-t. (eds.) Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 7683\u20137698. Association for Computational Linguistics, Online and Punta Cana, Dominican Republic; 2021. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.607.","DOI":"10.18653\/v1\/2021.emnlp-main.607"},{"key":"9582_CR23","unstructured":"Devlin J, Chang MW, Lee K, Toutanova K. Bert: Pre-training of deep bidirectional transformers for language understanding. 2018."},{"key":"9582_CR24","unstructured":"Eric M, Goel R, Paul S, Sethi A, Agarwal S, Gao S, Kumar A, Goyal A, Ku P, Hakkani-Tur D. MultiWOZ 2.1: A consolidated multi-domain dialogue dataset with state corrections and state tracking baselines. In: Calzolari, N., B\u00e9chet, F., Blache, P., Choukri, K., Cieri, C., Declerck, T., Goggi, S., Isahara, H., Maegaard, B., Mariani, J., Mazo, H., Moreno, A., Odijk, J., Piperidis, S. (eds.) Proceedings of the Twelfth Language Resources and Evaluation Conference, pp. 422\u2013428. European Language Resources Association, Marseille, France; 2020. https:\/\/aclanthology.org\/2020.lrec-1.53."},{"key":"9582_CR25","doi-asserted-by":"crossref","unstructured":"Han T, Liu X, Takanobu R, Lian Y, Huang C, Peng W, Huang M. Multiwoz 2.3: a multi-domain task-oriented dataset enhanced with annotation corrections and co-reference annotation. CoRR. 2020. arxiv:2010.05594.","DOI":"10.1007\/978-3-030-88483-3_16"},{"key":"9582_CR26","unstructured":"Sid B, Leo G, Wang P, Leahy C, Stella B. Gpt-neo: large scale autoregressive language modeling with mesh-tensorflow. 2021."},{"key":"9582_CR27","doi-asserted-by":"publisher","unstructured":"Reimers N, Gurevych I. Sentence-bert: Sentence embeddings using siamese bert-networks. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP) 2019. https:\/\/doi.org\/10.18653\/v1\/d19-1410.","DOI":"10.18653\/v1\/d19-1410"},{"key":"9582_CR28","unstructured":"Hosseini-Asl E, McCann B, Wu C, Yavuz S, Socher R. A simple language model for task-oriented dialogue. CoRR. 2020 arxiv:2005.00796."},{"key":"9582_CR29","doi-asserted-by":"publisher","unstructured":"Lin Z, Madotto A, Winata GI, Fung P. MinTL: Minimalist transfer learning for task-oriented dialogue systems. In: Webber, B., Cohn, T., He, Y., Liu, Y. (eds.) Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 3391\u20133405. Association for Computational Linguistics, Online; 2020. https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.273.","DOI":"10.18653\/v1\/2020.emnlp-main.273"},{"key":"9582_CR30","doi-asserted-by":"publisher","unstructured":"Su Y, Shu L, Mansimov E, Gupta A, Cai D, Lai Y-A, Zhang Y. Multi-task pre-training for plug-and-play task-oriented dialogue system. In: Muresan, S., Nakov, P., Villavicencio, A. (eds.) Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4661\u20134676. Association for Computational Linguistics, Dublin, Ireland; 2022. https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.319.","DOI":"10.18653\/v1\/2022.acl-long.319"}],"container-title":["Discover Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10791-025-09582-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10791-025-09582-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10791-025-09582-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T07:31:54Z","timestamp":1746516714000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10791-025-09582-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,6]]},"references-count":30,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["9582"],"URL":"https:\/\/doi.org\/10.1007\/s10791-025-09582-6","relation":{},"ISSN":["2948-2992"],"issn-type":[{"value":"2948-2992","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5,6]]},"assertion":[{"value":"30 November 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 April 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 May 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This declaration is not applicable to our work. Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The authors declare no potential Conflict of interest.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"66"}}