{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T12:44:39Z","timestamp":1765370679342,"version":"build-2065373602"},"reference-count":52,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3614020","type":"journal-article","created":{"date-parts":[[2025,9,24]],"date-time":"2025-09-24T17:34:19Z","timestamp":1758735259000},"page":"177509-177519","source":"Crossref","is-referenced-by-count":1,"title":["A Multimodal Deep Network for Music Emotion Recognition Using Audio Chorus and Lyrics"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2687-3988","authenticated-orcid":false,"given":"Mohammad Ali","family":"Talaghat","sequence":"first","affiliation":[{"name":"Department of Computer Engineering, Shi.C., Islamic Azad University, Shiraz, Iran"}]},{"given":"Elham","family":"Parvinnia","sequence":"additional","affiliation":[{"name":"Department of Computer Engineering, Shi.C., Islamic Azad University, Shiraz, Iran"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1551-5723","authenticated-orcid":false,"given":"Mahdi","family":"Mehrabi","sequence":"additional","affiliation":[{"name":"Department of Computer Engineering, Shi.C., Islamic Azad University, Shiraz, Iran"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0055-4452","authenticated-orcid":false,"given":"Reza","family":"Boostani","sequence":"additional","affiliation":[{"name":"Department of Computer Science Engineering and Information Technology, Shiraz University, Shiraz, Iran"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1525\/mp.2012.30.3.307"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-021-0569-4"},{"issue":"1","key":"ref3","first-page":"2","article-title":"Transformer-based automatic music mood classification using multi-modal framework","volume":"23","author":"Kumar","year":"2023","journal-title":"J. Comput. Sci. Technol."},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1080\/0929821042000317813"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0173392"},{"key":"ref6","article-title":"Neural machine translation by jointly learning to align and translate","author":"Bahdanau","year":"2014","journal-title":"arXiv:1409.0473"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.3390\/electronics10101163"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/iccst53801.2021.00027"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/taffc.2020.3032373"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.jestch.2020.10.009"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s10844-021-00658-5"},{"key":"ref12","article-title":"Music mood detection based on audio and lyrics with deep neural net","author":"Delbouys","year":"2018","journal-title":"arXiv:1809.07276"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-019-08192-x"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3390\/s22031065"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1037\/h0077714"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/tasl.2007.911513"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/2964284.2967286"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654931"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/access.2024.3484470"},{"key":"ref20","first-page":"1","article-title":"Parallel convolutional neural networks for music genre and mood classification","volume-title":"Proc. MIREX","author":"Lidy"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/tmm.2019.2918739"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s10772-020-09781-0"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3390\/electronics12040978"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1155\/2020\/4606027"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/iri51335.2021.00068"},{"key":"ref26","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.11591\/eei.v12i1.4231"},{"key":"ref28","first-page":"12345","article-title":"XLNet: Generalized autoregressive pretraining for language","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","volume":"32","author":"Yang"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-022-14252-6"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.3390\/info15040224"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8851988"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ijcnn48605.2020.9207605"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/tsa.2005.863204"},{"article-title":"Mastering future house music using machine","year":"2021","author":"Hofman","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413773"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-94-011-4657-9"},{"article-title":"Auditory toolbox","year":"1998","author":"Slaney","key":"ref37"},{"volume-title":"The HTK Book","year":"2009","author":"Young","key":"ref38"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/9780470546475"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/5732687"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.5555\/3104322.3104425"},{"key":"ref42","first-page":"2332","article-title":"Stacked convolutional and recurrent neural networks for music emotion recognition","volume-title":"Proc. IEEE Int. Conf. Syst.","author":"Malik"},{"key":"ref43","first-page":"1","article-title":"Fast and accurate deep network learning by exponential linear units (ELUs)","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Clevert"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.5555\/3045118.3045167"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2022.02.006"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/d14-1181"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2020.01.006"},{"key":"ref49","article-title":"Sigmoid-weighted linear units for neural network function approximation in reinforcement learning","author":"Elfwing","year":"2017","journal-title":"arXiv:1702.03118"},{"key":"ref50","first-page":"591","article-title":"The million song dataset","volume-title":"Proc. Int. Soc. Music Inf. Retr. Conf.","author":"Bertin-Mahieux"},{"key":"ref51","first-page":"1","article-title":"MERT: Acoustic music understanding model with large-scale self-supervised training","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Li"},{"issue":"1","key":"ref52","first-page":"123","article-title":"Towards unified music emotion recognition across dimensional and categorical models","volume":"7","author":"Kang","year":"2025","journal-title":"J. Music Inf. Retr."}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/11177191.pdf?arnumber=11177191","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T17:25:41Z","timestamp":1761153941000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11177191\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":52,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3614020","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2025]]}}}