{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T13:47:43Z","timestamp":1742996863885,"version":"3.40.3"},"publisher-location":"Cham","reference-count":15,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030964504"},{"type":"electronic","value":"9783030964511"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-030-96451-1_5","type":"book-chapter","created":{"date-parts":[[2022,2,25]],"date-time":"2022-02-25T13:03:18Z","timestamp":1645794198000},"page":"45-56","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Automatic Utterance Selection Based on Prosodic Features of Children\u2019s Vocalizations"],"prefix":"10.1007","author":[{"given":"Yuki","family":"Kubo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3396-671X","authenticated-orcid":false,"given":"Natsuki","family":"Oka","sequence":"additional","affiliation":[]},{"given":"Subaru","family":"Hanada","sequence":"additional","affiliation":[]},{"given":"Kazuaki","family":"Tanaka","sequence":"additional","affiliation":[]},{"given":"Tomomi","family":"Takahashi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,2,26]]},"reference":[{"unstructured":"Hanada, S., Oka, N., Tanaka, K., Takahashi, T., Kubo, Y.: A study on automatic dialogue selection in a spoken dialogue system for children. In: Human-Agent Interaction Symposium (2021). (in Japanese)","key":"5_CR1"},{"doi-asserted-by":"crossref","unstructured":"Zhou, H., Huang, M., Zhang, T., Zhu, X., Liu, B.: Emotional chatting machine: emotional conversation generation with internal and external memory. arXiv e-prints, arXiv\u20131704 (2017)","key":"5_CR2","DOI":"10.1609\/aaai.v32i1.11325"},{"doi-asserted-by":"crossref","unstructured":"Lin, Z., Madotto, A., Shin, J., Xu, P., Fung, P.: MoEL: mixture of empathetic listeners. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 121\u2013132 (2019)","key":"5_CR3","DOI":"10.18653\/v1\/D19-1012"},{"doi-asserted-by":"crossref","unstructured":"Majumder, N., Poria, S., Hazarika, D., Mihalcea, R., Gelbukh, A., Cambria, E.: Dialoguernn: An attentive rnn for emotion detection in conversations. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, pp. 6818\u20136825 (2019)","key":"5_CR4","DOI":"10.1609\/aaai.v33i01.33016818"},{"issue":"02","key":"5_CR5","first-page":"86","volume":"5","author":"S Matsumoto","year":"2018","unstructured":"Matsumoto, S., Araki, M.: Assesment of user\u2019s interests in multimodal dialog integrating multiple modalities. SIG-SLUD 5(02), 86\u201387 (2018). (in Japanese)","journal-title":"SIG-SLUD"},{"unstructured":"Nishimoto, H., Komatani, K.: Predicting user\u2019s interest level in dialogues with multimodal features. In: Proceedings of the Annual Conference of JSAI, JSAI 2018, 3C2OS14b04 (2018). (in Japanese)","key":"5_CR6"},{"doi-asserted-by":"crossref","unstructured":"Horii, T., Nagai, Y., Asada, M.: Imitation of human expressions based on emotion estimation by mental simulation. Paladyn J. Behav. Robot. 7(1), 40\u201354 (2016). De Gruyter","key":"5_CR7","DOI":"10.1515\/pjbr-2016-0004"},{"issue":"146","key":"5_CR8","first-page":"55","volume":"115","author":"H Nakayama","year":"2015","unstructured":"Nakayama, H.: Image feature extraction and transfer learning using deep convolutional neural networks. IEICE Tech. 115(146), 55\u201359 (2015). (in Japanese)","journal-title":"IEICE Tech."},{"key":"5_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"270","DOI":"10.1007\/978-3-030-01424-7_27","volume-title":"Artificial Neural Networks and Machine Learning \u2013 ICANN 2018","author":"C Tan","year":"2018","unstructured":"Tan, C., Sun, F., Kong, T., Zhang, W., Yang, C., Liu, C.: A survey on deep transfer learning. In: K\u016frkov\u00e1, V., Manolopoulos, Y., Hammer, B., Iliadis, L., Maglogiannis, I. (eds.) ICANN 2018. LNCS, vol. 11141, pp. 270\u2013279. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01424-7_27"},{"doi-asserted-by":"crossref","unstructured":"Huang, J.-T., Li, J., Yu, D., Deng, L., Gong, Y.: Cross-language knowledge transfer using multilingual deep neural network with shared hidden layers. In: 2013 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 7304\u20137308. IEEE (2013)","key":"5_CR10","DOI":"10.1109\/ICASSP.2013.6639081"},{"doi-asserted-by":"crossref","unstructured":"Mitsukuni, K., et al.: Analysis of the influence of mothers\u2019 traits and behaviors on children\u2019s conversational play with an utterance-output device. In: Proceedings of the 7th International Conference on Human-Agent Interaction, pp. 271\u2013273 (2019)","key":"5_CR11","DOI":"10.1145\/3349537.3352789"},{"doi-asserted-by":"publisher","unstructured":"Ichikawa, J., et al.: Investigation of interaction between children and an utterance-output device focused on personality. Trans. Jpn. Soc. Kansei Eng. 19(2), 173\u2013179 (2020). https:\/\/doi.org\/10.5057\/jjske.TJSKE-D-19-00058. (in Japanese)","key":"5_CR12","DOI":"10.5057\/jjske.TJSKE-D-19-00058"},{"doi-asserted-by":"crossref","unstructured":"Eyben, F., W\u00f6llmer, M., Schuller, B.: Opensmile: the munich versatile and fast open-source audio feature extractor. In: Proceedings of the 18th ACM International Conference on Multimedia, pp. 1459\u20131462 (2010)","key":"5_CR13","DOI":"10.1145\/1873951.1874246"},{"issue":"02","key":"5_CR14","first-page":"20","volume":"5","author":"M Araki","year":"2017","unstructured":"Araki, M., et al.: Collection of multimodal dialog data and analysis of the result of annotation of users\u2019 interests. SIG-SLUD 5(02), 20\u201325 (2017). (in Japanese)","journal-title":"SIG-SLUD"},{"issue":"2","key":"5_CR15","first-page":"57","volume":"21","author":"T Oyama","year":"1985","unstructured":"Oyama, T.: Historical background and the present status of reaction time studies. Jpn. J. Ergon. 21(2), 57\u201364 (1985). (in Japanese)","journal-title":"Jpn. J. Ergon."}],"container-title":["Advances in Intelligent Systems and Computing","Advances in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-96451-1_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T22:39:48Z","timestamp":1674859188000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-96451-1_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783030964504","9783030964511"],"references-count":15,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-96451-1_5","relation":{},"ISSN":["2194-5357","2194-5365"],"issn-type":[{"type":"print","value":"2194-5357"},{"type":"electronic","value":"2194-5365"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"26 February 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"JSAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Annual Conference of the Japanese Society for Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 June 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 June 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"35","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"jsai2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.ai-gakkai.or.jp\/jsai2021\/en","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}