{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T17:54:58Z","timestamp":1725990898062},"publisher-location":"Cham","reference-count":17,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030001254"},{"type":"electronic","value":"9783030001261"}],"license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018]]},"DOI":"10.1007\/978-3-030-00126-1_3","type":"book-chapter","created":{"date-parts":[[2018,9,10]],"date-time":"2018-09-10T15:10:57Z","timestamp":1536592257000},"page":"24-34","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Commonalities of Glottal Sources and Vocal Tract Shapes Among Speakers in Emotional Speech"],"prefix":"10.1007","author":[{"given":"Yongwei","family":"Li","sequence":"first","affiliation":[]},{"given":"Ken-Ichi","family":"Sakakibara","sequence":"additional","affiliation":[]},{"given":"Daisuke","family":"Morikawa","sequence":"additional","affiliation":[]},{"given":"Masato","family":"Akagi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,9,11]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Schr\u00f6der, M., Cowie, R., Douglas-Cowie, E., Westerdijk, M., Gielen, S.: Acoustic correlates of emotion dimensions in view of speech synthesis. In: 7th European Conference on Speech Communication and Technology (2001)","key":"3_CR1","DOI":"10.21437\/Eurospeech.2001-34"},{"doi-asserted-by":"crossref","unstructured":"Hamada, Y., Elbarougy, R., Akagi, M.: A method for emotional speech synthesis based on the position of emotional state in Valence-Activation space. In: Asia-Pacific Signal and Information Processing Association, 2014 Annual Summit and Conference (APSIPA), pp. 1\u20137. IEEE Press (2014)","key":"3_CR2","DOI":"10.1109\/APSIPA.2014.7041729"},{"doi-asserted-by":"crossref","unstructured":"Li, X., Akagi, M.: Multilingual speech emotion recognition system based on a three-layer model. In: Interspeech, pp. 3608\u20133612 (2016)","key":"3_CR3","DOI":"10.21437\/Interspeech.2016-645"},{"issue":"3","key":"3_CR4","doi-asserted-by":"publisher","first-page":"614","DOI":"10.1037\/0022-3514.70.3.614","volume":"70","author":"R Banse","year":"1996","unstructured":"Banse, R., Scherer, K.R.: Acoustic profiles in vocal emotion expression. J. Pers. Soc. Psychol. 70(3), 614\u2013636 (1996)","journal-title":"J. Pers. Soc. Psychol."},{"issue":"1","key":"3_CR5","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1159\/000091405","volume":"63","author":"M Airas","year":"2006","unstructured":"Airas, M., Alku, P.: Emotions in vowel segments of continuous speech: analysis of the glottal flow using the normalised amplitude quotient. Phonetica 63(1), 26\u201346 (2006)","journal-title":"Phonetica"},{"issue":"1\u20132","key":"3_CR6","doi-asserted-by":"publisher","first-page":"189","DOI":"10.1016\/S0167-6393(02)00082-1","volume":"40","author":"C Gobl","year":"2003","unstructured":"Gobl, C., Chasaide, A.N.: The role of voice quality in communicating emotion, mood and attitude. Speech Commun. 40(1\u20132), 189\u2013212 (2003)","journal-title":"Speech Commun."},{"doi-asserted-by":"crossref","unstructured":"Kitamura, T.: Similarity of effects of emotions on the speech organ configuration with and without speaking. In: Interspeech, pp. 909\u2013912, (2010)","key":"3_CR7","DOI":"10.21437\/Interspeech.2010-309"},{"issue":"1","key":"3_CR8","doi-asserted-by":"crossref","first-page":"620","DOI":"10.1515\/opli-2016-0034","volume":"2","author":"D Erickson","year":"2016","unstructured":"Erickson, D., Zhu, C., Kawahara, S., Suemitsu, A.: Articulation, acoustics and perception of Mandarin Chinese Emotional Speech. Open Linguist. 2(1), 620\u2013635 (2016)","journal-title":"Open Linguist."},{"unstructured":"Fant, G., Liljencrants, J., Lin, Q.-G.: A four-parameter model of glottal flow. in: STL-QPSR 1985, vol. 4, pp. 1\u201313 (1985)","key":"3_CR9"},{"doi-asserted-by":"crossref","unstructured":"Vincent, D., Rosec, O., Chonavel, T.: Estimation of LF glottal source parameters based on an ARX model. In: Interspeech, pp. 333\u2013336 (2005)","key":"3_CR10","DOI":"10.21437\/Interspeech.2005-177"},{"doi-asserted-by":"crossref","unstructured":"Kane, J., Gobl, C.: Evaluation of automatic glottal source analysis. International Conference on Nonlinear Speech Processing, Springer, pp. 1\u20138 (2013)","key":"3_CR11","DOI":"10.1007\/978-3-642-38847-7_1"},{"doi-asserted-by":"crossref","unstructured":"Ohtsuka, T., Kasuya, H.: Aperiodicity control in ARX-based speech analysis-synthesis method. In: Seventh European Conference on Speech Communication and Technology, pp. 2267\u20132270 (2001)","key":"3_CR12","DOI":"10.21437\/Eurospeech.2001-540"},{"doi-asserted-by":"crossref","unstructured":"Kawahara, H., Sakakibara, K.-I., Banno, H., Morise, M., Toda, T., Irino, T.: Aliasing-free implementation of discrete-time glottal source models and their applications to speech synthesis and F0 extractor evaluation. In: Signal and Information Processing Association Annual Summit and Conference (APSIPA), pp. 520\u2013529. IEEE Press (2015)","key":"3_CR13","DOI":"10.1109\/APSIPA.2015.7415325"},{"issue":"1","key":"3_CR14","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1016\/j.csl.2011.03.003","volume":"26","author":"T Drugman","year":"2012","unstructured":"Drugman, T., Bozkurt, B., Dutoit, T.: A comparative study of glottal source estimation techniques. Comput. Speech Lang. 26(1), 20\u201334 (2012)","journal-title":"Comput. Speech Lang."},{"key":"3_CR15","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/978-3-642-38847-7_1","volume-title":"Advances in Nonlinear Speech Processing","author":"J Kane","year":"2013","unstructured":"Kane, J., Gobl, C.: Evaluation of automatic glottal source analysis. In: Drugman, T., Dutoit, T. (eds.) NOLISP 2013. LNCS (LNAI), vol. 7911, pp. 1\u20138. Springer, Heidelberg (2013). https:\/\/doi.org\/10.1007\/978-3-642-38847-7_1"},{"issue":"5","key":"3_CR16","doi-asserted-by":"publisher","first-page":"417","DOI":"10.1109\/TAU.1973.1162506","volume":"21","author":"H Wakita","year":"1973","unstructured":"Wakita, H.: Direct estimation of the vocal tract shape by inverse filtering of acoustic speech waveforms. IEEE Trans. Audio Electroacoust. 21(5), 417\u2013427 (1973)","journal-title":"IEEE Trans. Audio Electroacoust."},{"doi-asserted-by":"crossref","unstructured":"Schroder M., Cowie R., Douglas-Cowie E., Westerdijk M., Gielen S.C.: Acoustic correlates of emotion dimensions in view of speech synthesis. In: Proceedings of Interspeech 2001, pp. 87\u201390 (2001)","key":"3_CR17","DOI":"10.21437\/Eurospeech.2001-34"}],"container-title":["Lecture Notes in Computer Science","Studies on Speech Production"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-00126-1_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,1]],"date-time":"2022-09-01T01:33:37Z","timestamp":1661996017000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-00126-1_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"ISBN":["9783030001254","9783030001261"],"references-count":17,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-00126-1_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2018]]}}}