{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,20]],"date-time":"2025-08-20T12:52:15Z","timestamp":1755694335714,"version":"3.28.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,11,5]],"date-time":"2020-11-05T00:00:00Z","timestamp":1604534400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,5]],"date-time":"2020-11-05T00:00:00Z","timestamp":1604534400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,11,5]],"date-time":"2020-11-05T00:00:00Z","timestamp":1604534400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,11,5]]},"DOI":"10.1109\/o-cocosda50338.2020.9295032","type":"proceedings-article","created":{"date-parts":[[2020,12,28]],"date-time":"2020-12-28T20:48:58Z","timestamp":1609188538000},"page":"166-171","source":"Crossref","is-referenced-by-count":11,"title":["Improving Valence Prediction in Dimensional Speech Emotion Recognition Using Linguistic Information"],"prefix":"10.1109","author":[{"given":"Bagus Tris","family":"Atmaja","sequence":"first","affiliation":[]},{"given":"Masato","family":"Akagi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","article-title":"Adam: A Method for Stochastic Optimization","author":"kingma","year":"0","journal-title":"3rd Int Conf Learn Represent ICLR 2015 - Conf Track Proc"},{"key":"ref32","first-page":"4482","article-title":"Multitask Learning and Multistage Fusion for Dimensional Audiovisual Emotion Recognition","author":"atmaja","year":"0","journal-title":"ICASSP 2020 &#x2013; 2020 IEEE Int Conf Acoust Speech Signal Process"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.2307\/2532051"},{"key":"ref30","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","volume":"1","author":"loffe","year":"0","journal-title":"32nd Int Conf Mach Learn ICML 2015"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32381-3_16"},{"key":"ref36","first-page":"265","article-title":"Tensorflow: A system for large-scale machine learning","author":"abadi","year":"0","journal-title":"12th USENIX Symp Oper Syst Des Implement (OSDI '16)"},{"journal-title":"Keras","year":"2015","author":"chollet","key":"ref35"},{"key":"ref34","first-page":"26","article-title":"Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude","volume":"4","author":"tieleman","year":"2012","journal-title":"COURSERA Neural Networks for Machine Learning"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CIP.2012.6232924"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2466"},{"key":"ref12","first-page":"112","author":"yoon","year":"2018","journal-title":"Multimodal Speech Emotion Recognition Using Audio and Text"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3201"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC47483.2019.9023098"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1811"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683190"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1149"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref19","first-page":"2825","article-title":"Scikit-leam: Machine Learning in Python","volume":"12","author":"pedregosa","year":"2011","journal-title":"J Mach Learn Res"},{"key":"ref28","first-page":"1","volume":"10","author":"giannakopoulos","year":"2015","journal-title":"pyAudioAnalysis An Open-Source Python Library for Audio Signal Analysis"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1037\/0022-3514.37.3.345"},{"journal-title":"BERT Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"devlin","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-3307"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2736999"},{"key":"ref29","first-page":"68","author":"torres","year":"2018","journal-title":"Emotion Detection from Text"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2508"},{"key":"ref8","first-page":"843","volume":"9","author":"li","year":"2017","journal-title":"Learning Word Representations for Sentiment Analysis"},{"key":"ref7","first-page":"326","article-title":"Lexicons for Sentiment and Affect Extraction","author":"jurafsky","year":"2017","journal-title":"Speech Lang Process An Introd to Nat Lang Process Comput Linguist Speech Recognit"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2867099"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3136755.3136760"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1494"},{"journal-title":"Multi-Modal Emotion recognition on IEMOCAP Dataset using Deep Learning","year":"2018","author":"tripathi","key":"ref20"},{"key":"ref22","first-page":"1537","article-title":"Deep Recurrent Neural Networks for Emotion Recognition in Speech","author":"schmitt","year":"2018","journal-title":"DAGA"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2015.2457417"},{"journal-title":"Efficient Estimation of Word Representations in Vector Space","year":"2013","author":"mikolov","key":"ref24"},{"journal-title":"Deep Learning","year":"2016","author":"goodfellow","key":"ref23"},{"key":"ref26","first-page":"52","article-title":"Advances in pre-training distributed word representations","author":"mikolov","year":"0","journal-title":"Lr 2018 &#x2013; 11th Int Conf Lang Resour Eval"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"}],"event":{"name":"2020 23rd Conference of the Oriental COCOSDA International Committee for the Co-ordination and Standardisation of Speech Databases and Assessment Techniques (O-COCOSDA)","start":{"date-parts":[[2020,11,5]]},"location":"Yangon, Myanmar","end":{"date-parts":[[2020,11,7]]}},"container-title":["2020 23rd Conference of the Oriental COCOSDA International Committee for the Co-ordination and Standardisation of Speech Databases and Assessment Techniques (O-COCOSDA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9294763\/9294994\/09295032.pdf?arnumber=9295032","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T21:50:32Z","timestamp":1656453032000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9295032\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,5]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/o-cocosda50338.2020.9295032","relation":{},"subject":[],"published":{"date-parts":[[2020,11,5]]}}}