{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T21:16:06Z","timestamp":1757625366145,"version":"3.44.0"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783032025500"},{"type":"electronic","value":"9783032025517"}],"license":[{"start":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:00:00Z","timestamp":1755820800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:00:00Z","timestamp":1755820800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-02551-7_27","type":"book-chapter","created":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T05:27:32Z","timestamp":1755754052000},"page":"313-323","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Parameter vs. Sample Efficiency in\u00a0Multi-intent Recognition for\u00a0Dialogue Understanding: Benchmarking Small Open LLMs"],"prefix":"10.1007","author":[{"given":"Adnan","family":"Ahmad","sequence":"first","affiliation":[]},{"given":"Justin","family":"Hulha","sequence":"additional","affiliation":[]},{"given":"Philine","family":"Kowol","sequence":"additional","affiliation":[]},{"given":"Stefan","family":"Hillmann","sequence":"additional","affiliation":[]},{"given":"Sebastian","family":"M\u00f6ller","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,22]]},"reference":[{"key":"27_CR1","unstructured":"Abdin, M., et\u00a0al.: Phi-3 technical report: a highly capable language model locally on your phone. arXiv e-prints, arXiv:2404 (2024)"},{"key":"27_CR2","unstructured":"Adnan, A., Philine, T.K., Stefan, H., Sebastian, M.: Multi-intent recognition in dialogue understanding: a comparison between smaller open-source LLMs. In: The 14th International Workshop on Spoken Dialogue Systems Technology (2024)"},{"key":"27_CR3","unstructured":"AI@Meta. Llama 3 model card (2024). https:\/\/github.com\/meta-llama\/llama3\/blob\/main\/MODEL_CARD.md"},{"key":"27_CR4","unstructured":"Brown, T., et\u00a0al.: Language models are few-shot learners. Adv. Neural Inf. Process. Syst. 33, 1877\u20131901 (2020)"},{"key":"27_CR5","doi-asserted-by":"crossref","unstructured":"Budzianowski, P., et al.: Multiwoz-a large-scale multi-domain wizard-of-oz dataset for task-oriented dialogue modelling. In: Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 5016\u20135026 (2018)","DOI":"10.18653\/v1\/D18-1547"},{"issue":"3","key":"27_CR6","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3641289","volume":"15","author":"X Yupeng Chang","year":"2024","unstructured":"Yupeng Chang, X., et al.: A survey on evaluation of large language models. ACM Trans. Intell. Syst. Technol. 15(3), 1\u201345 (2024)","journal-title":"ACM Trans. Intell. Syst. Technol."},{"key":"27_CR7","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs\/1810.04805 (2018)"},{"key":"27_CR8","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Bert, K.T.: Pre-training of deep bidirectional transformers for language understanding (2019)"},{"key":"27_CR9","unstructured":"Eric, M., et al.: Multiwoz 2.1: a consolidated multi-domain dialogue dataset with state corrections and state tracking baselines. In: Proceedings of the Twelfth Language Resources and Evaluation Conference, pp. 422\u2013428 (2020)"},{"key":"27_CR10","unstructured":"Hiyouga. hiyouga\/llama-factory: Unify efficient fine-tuning of 100+ LLMs (2024). https:\/\/github.com\/hiyouga\/LLaMA-Factory"},{"key":"27_CR11","unstructured":"Hu, E.J., et\u00a0al.: Lora: Low-rank adaptation of large language models. In: International Conference on Learning Representations (2021)"},{"key":"27_CR12","unstructured":"Jiang, A.Q., et al.: Mistral 7b (2023)"},{"key":"27_CR13","unstructured":"Kaplan, J., et al.: Scaling laws for neural language models, Alec Radford (2020)"},{"key":"27_CR14","doi-asserted-by":"crossref","unstructured":"Lee, C.H., Cheng, H., Ostendorf, M.: Dialogue state tracking with a language model using schema-driven prompting. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 4937\u20134949 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.404"},{"key":"27_CR15","unstructured":"Li, S., et al.: COCO: controllable counterfactuals for evaluating dialogue state trackers. In: International Conference on Learning Representations (2020)"},{"key":"27_CR16","doi-asserted-by":"crossref","unstructured":"Louvan, S., Magnini, B.: Recent neural methods on slot filling and intent classification for task-oriented dialogue systems: a survey. arXiv preprint arXiv:2011.00564 (2020)","DOI":"10.18653\/v1\/2020.coling-main.42"},{"key":"27_CR17","unstructured":"Mirza, P., Sudhi, V., Sahoo, S.R., Bhat, S.R.: Illuminer: instruction-tuned large language models as few-shot intent classifier and slot filler. In: Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pp. 8639\u20138651 (2024)"},{"key":"27_CR18","doi-asserted-by":"crossref","unstructured":"Moghe, N., Razumovskaia, E., Guillou, L., Vuli\u0107, I., Korhonen, A., Birch, A.: Multi3nlu++: A multilingual, multi-intent, multi-domain dataset for natural language understanding in task-oriented dialogue. In: Findings of the Association for Computational Linguistics: ACL, vol. 2023, pp. 3732\u20133755 (2023)","DOI":"10.18653\/v1\/2023.findings-acl.230"},{"key":"27_CR19","unstructured":"Penedo, G., et al.: The refinedweb dataset for falcon LLM: outperforming curated corpora with web data, and web data only. arXiv preprint arXiv:2306.01116 (2023)"},{"key":"27_CR20","doi-asserted-by":"crossref","unstructured":"Rastogi, A., Zang, X., Sunkara, S., Gupta, R., Khaitan, P.: Towards scalable multi-domain conversational agents: the schema-guided dialogue dataset. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 8689\u20138696 (2020)","DOI":"10.1609\/aaai.v34i05.6394"},{"key":"27_CR21","doi-asserted-by":"crossref","unstructured":"Sahu, G., Rodriguez, P., Laradji, I., Atighehchian, P., Vazquez, D., Bahdanau, D.: Data augmentation for intent classification with off-the-shelf large language models. In: Proceedings of the 4th Workshop on NLP for Conversational AI, pp. 47\u201357 (2022)","DOI":"10.18653\/v1\/2022.nlp4convai-1.5"},{"key":"27_CR22","unstructured":"Team, G., et\u00a0al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)"},{"key":"27_CR23","unstructured":"Wei, J., et al.: Finetuned language models are zero-shot learners. In: International Conference on Learning Representations (2021)"},{"key":"27_CR24","doi-asserted-by":"crossref","unstructured":"Xu, P., Sarikaya, R.: Exploiting shared information for multi-intent natural language sentence classification. In: Interspeech, pp. 3785\u20133789 (2013)","DOI":"10.21437\/Interspeech.2013-599"},{"key":"27_CR25","unstructured":"Yi, Z., Ouyang, J., Liu, Y., Liao, T., Xu, Z., Shen, Y.: A survey on recent advances in LLM-based multi-turn dialogue systems. arXiv preprint arXiv:2402.18013 (2024)"},{"key":"27_CR26","unstructured":"Young, S.: Cued standard dialogue acts. Report, Cambridge University Engineering Department, 14 October 2007 (2007)"},{"issue":"10","key":"27_CR27","doi-asserted-by":"publisher","first-page":"2011","DOI":"10.1007\/s11431-020-1692-3","volume":"63","author":"Z Zhang","year":"2020","unstructured":"Zhang, Z., Takanobu, R., Zhu, Q., Huang, M.L., Zhu, X.Y.: Recent advances and challenges in task-oriented dialog systems. Sci. China Technol. Sci. 63(10), 2011\u20132027 (2020). https:\/\/doi.org\/10.1007\/s11431-020-1692-3","journal-title":"Sci. China Technol. Sci."}],"container-title":["Lecture Notes in Computer Science","Text, Speech, and Dialogue"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-02551-7_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,9]],"date-time":"2025-09-09T18:04:45Z","timestamp":1757441085000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-02551-7_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,22]]},"ISBN":["9783032025500","9783032025517"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-02551-7_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025,8,22]]},"assertion":[{"value":"22 August 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"TSD","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Text, Speech, and Dialogue","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Erlangen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Germany","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 August 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"tsd2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.kiv.zcu.cz\/tsd2025\/index.php","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}