{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T22:59:05Z","timestamp":1768431545862,"version":"3.49.0"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/icassp.2019.8683133","type":"proceedings-article","created":{"date-parts":[[2019,4,17]],"date-time":"2019-04-17T16:01:56Z","timestamp":1555516916000},"page":"3975-3979","source":"Crossref","is-referenced-by-count":22,"title":["Learning Affective Correspondence between Music and Image"],"prefix":"10.1109","author":[{"given":"Gaurav","family":"Verma","sequence":"first","affiliation":[{"name":"Adobe Research, India"}]},{"given":"Eeshan Gunesh","family":"Dhekane","sequence":"additional","affiliation":[{"name":"Mila, Universit&#x00E9; de Montr&#x00E9;al, Canada"}]},{"given":"Tanaya","family":"Guha","sequence":"additional","affiliation":[{"name":"University of Warwick, UK"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Deep learning for content-based, cross-modal retrieval of videos and music","author":"hong","year":"2017"},{"key":"ref11","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-030-01231-1_39","article-title":"Audio-visual scene analysis with self-supervised multisensory features","author":"owens","year":"2018"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00879"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.73"},{"key":"ref14","article-title":"Objects that sound","volume":"3","author":"arandjelovic","year":"2017"},{"key":"ref15","first-page":"308","article-title":"Building a large scale dataset for image emotion recognition: The fine print and the benchmark","author":"you","year":"2016","journal-title":"AAAI"},{"key":"ref16","first-page":"10","article-title":"The million song dataset","volume":"2","author":"bertin-mahieux","year":"2011","journal-title":"ISMIR"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref19","article-title":"Features and classifiers for the automatic classification of musical audio signals","author":"west","year":"2004","journal-title":"ISMIR"},{"key":"ref4","first-page":"i","article-title":"Hidden markov model-based speech emotion recognition","volume":"1","author":"schuller","year":"2003","journal-title":"Proc Int Conf Multimedia Expo (ICME)"},{"key":"ref27","article-title":"Learning multi-level deep representations for image emotion classification","author":"rao","year":"2016"},{"key":"ref3","first-page":"543","article-title":"Combining modality specific deep neural networks for emotion recognition in video","author":"kahou","year":"2013","journal-title":"Proc Int Conf Multimodal Interact (ICMI)"},{"key":"ref6","first-page":"935","article-title":"Zero-shot learning through cross-modal transfer","author":"socher","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2064164"},{"key":"ref8","first-page":"2822","article-title":"A multi-modal mixture-of-experts model for dynamic emotion prediction in movies","author":"goyal","year":"2016","journal-title":"IEEE Int Conf on Acoustics Speech and Signal Processing (ICASSP)"},{"key":"ref7","first-page":"801","article-title":"Ambient sound provides supervision for visual learning","author":"owens","year":"0"},{"key":"ref2","first-page":"381","article-title":"Robust image sentiment analysis using progressively trained and domain transferred deep networks","author":"you","year":"2015","journal-title":"AAAI"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939812"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2011.941851"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2005.860344"},{"key":"ref22","first-page":"113","article-title":"Music type classification by spectral contrast feature","volume":"1","author":"jiang","year":"2002","journal-title":"Multimedia and Expo 2002 ICME&#x2019;02 Proceedings 2002 IEEE International Conference on"},{"key":"ref21","article-title":"Adding the affective dimension: a new look in speech analysis and synthesis","author":"scherer","year":"1996","journal-title":"ICSLP"},{"key":"ref24","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/1178723.1178727"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654930"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/2661806.2661810"}],"event":{"name":"ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Brighton, UK","start":{"date-parts":[[2019,5,12]]},"end":{"date-parts":[[2019,5,17]]}},"container-title":["ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8671773\/8682151\/08683133.pdf?arnumber=8683133","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T00:37:43Z","timestamp":1755909463000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8683133\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/icassp.2019.8683133","relation":{},"subject":[],"published":{"date-parts":[[2019,5]]}}}