{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T21:20:06Z","timestamp":1773264006033,"version":"3.50.1"},"reference-count":81,"publisher":"Ubiquity Press, Ltd.","issue":"1","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026,3,11]]},"DOI":"10.5334\/tismir.223","type":"journal-article","created":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T05:19:42Z","timestamp":1773206382000},"page":"66-85","source":"Crossref","is-referenced-by-count":0,"title":["Investigating Auditory\u2013Visual Perception Using Multi-Modal Neural Networks with the SoundActions Dataset"],"prefix":"10.5334","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-7438-1483","authenticated-orcid":false,"given":"Jinyue","family":"Guo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0556-0288","authenticated-orcid":false,"given":"Jim","family":"T\u00f8rresen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6171-8743","authenticated-orcid":false,"given":"Alexander Refsum","family":"Jensenius","sequence":"additional","affiliation":[]}],"member":"3285","reference":[{"key":"key20260311051935_r1","first-page":"451","article-title":"Introducing multimodality","volume-title":"The Oxford handbook of language and society","year":"2016"},{"key":"key20260311051935_r2","first-page":"24206","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","year":"2021"},{"key":"key20260311051935_r3","first-page":"9758","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","year":"2020"},{"key":"key20260311051935_r4","first-page":"609","article-title":"Look, listen and learn","year":"2017"},{"key":"key20260311051935_r5","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","year":"2016"},{"key":"key20260311051935_r6","first-page":"2236","article-title":"Multimodal language analysis in the wild: CMU\u2011MOSEI dataset and interpretable dynamic fusion graph","year":"2018"},{"issue":"2","key":"key20260311051935_r7","doi-asserted-by":"crossref","first-page":"423","DOI":"10.1109\/TPAMI.2018.2798607","article-title":"Multimodal machine learning: A survey and taxonomy","volume":"41","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"1","key":"key20260311051935_r8","doi-asserted-by":"crossref","first-page":"35","DOI":"10.1017\/S1355771809990240","article-title":"Sonic image and acousmatic listening","volume":"15","year":"2010","journal-title":"Organised Sound"},{"key":"key20260311051935_r9","article-title":"Visual and audio analysis of movies video for emotion detection","volume-title":"Emotional Impact of Movies task MediaEval 2018","year":"2018"},{"key":"key20260311051935_r10","unstructured":"Biewald, L. (2020). Experiment tracking with weights and biases. Software. https:\/\/www.wandb.com."},{"key":"key20260311051935_r11","first-page":"721","article-title":"VGGSound: A large\u2011scale audio\u2011visual dataset","year":"2020"},{"key":"key20260311051935_r12","first-page":"646","article-title":"HTS\u2011AT: A hierarchical token\u2011semantic audio transformer for sound classification and detection","year":"2022"},{"key":"key20260311051935_r13","article-title":"Guide des objets sonores: Pierre Schaeffer et la recherche musicale","volume-title":"Biblioth\u00e8que de recherche musicale. Buchet\/Chastel","year":"1983"},{"key":"key20260311051935_r14","volume-title":"Audio\u2011Vision: Sound on Screen","year":"2019"},{"key":"key20260311051935_r15","first-page":"35","volume-title":"LINES AND POINTS: Horizontal and Vertical Perspectives on Audiovisual Relations","year":"2019"},{"key":"key20260311051935_r16","first-page":"22","volume-title":"The Three Listening Modes","year":"2019"},{"issue":"3","key":"key20260311051935_r17","doi-asserted-by":"crossref","first-page":"37","DOI":"10.1007\/s13735-024-00344-6","article-title":"Multimodal music datasets? Challenges and future goals in music processing","volume":"13","year":"2024","journal-title":"International Journal of Multimedia Information Retrieval"},{"key":"key20260311051935_r18","volume-title":"La musique \u00e9lectroacoustique: analyse morphologique et repr\u00e9sentation analytique [Theses]","year":"2003"},{"key":"key20260311051935_r19","doi-asserted-by":"crossref","first-page":"33","DOI":"10.1007\/s11263-021-01531-2","article-title":"Rescaling egocentric vision: Collection, pipeline and challenges for epic\u2011kitchens\u2011100","volume":"130","year":"2022","journal-title":"International Journal of Computer Vision (IJCV)"},{"key":"key20260311051935_r20","volume-title":"Les unites s\u00e9miotiques temporelles-\u00e9l\u00e9ments nouveaux d\u2019analyse musicale","year":"1996"},{"key":"key20260311051935_r21","volume-title":"MediaEval","year":"2019"},{"key":"key20260311051935_r22","first-page":"248","article-title":"ImageNet: A large\u2011scale hierarchical image database","year":"2009"},{"key":"key20260311051935_r23","first-page":"56075","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","year":"2023"},{"key":"key20260311051935_r24","author":"Falcon, W. and The PyTorch Lightning team","year":"2019"},{"key":"key20260311051935_r25","doi-asserted-by":"crossref","first-page":"105151","DOI":"10.1016\/j.engappai.2022.105151","article-title":"Ensemble deep learning: A review","volume":"115","year":"2022","journal-title":"Engineering Applications of Artificial Intelligence"},{"issue":"1","key":"key20260311051935_r26","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1207\/s15326969eco0501_1","article-title":"What in the world do we hear?: An ecological approach to auditory event perception","volume":"5","year":"1993","journal-title":"Ecological Psychology"},{"key":"key20260311051935_r27","first-page":"776","article-title":"Audio Set: An ontology and human\u2011labeled dataset for audio events","year":"2017"},{"key":"key20260311051935_r28","article-title":"Ecrins: An audio\u2011content description environment for sound samples","year":"2002"},{"key":"key20260311051935_r29","article-title":"Towards a unified view of parameter\u2011efficient transfer learning","year":"2022"},{"key":"key20260311051935_r30","first-page":"175","year":"2022"},{"key":"key20260311051935_r31","first-page":"2790","article-title":"Parameter\u2011efficient transfer learning for NLP","year":"2019"},{"issue":"2","key":"key20260311051935_r32","first-page":"3","article-title":"Lora: Low\u2011rank adaptation of large language models","volume":"1","year":"2022","journal-title":"ICLR"},{"key":"key20260311051935_r33","article-title":"Modality competition: What makes joint training of multi\u2011modal network fail in deep learning? (Provably)","year":"2022"},{"key":"key20260311051935_r34","first-page":"1","article-title":"Epic\u2011sounds: A large\u2011scale dataset of actions that sound","year":"2023"},{"key":"key20260311051935_r35","volume-title":"Sound Actions: Conceptualizing Musical Instruments","year":"2022"},{"key":"key20260311051935_r36","article-title":"The kinetics human action video dataset","volume-title":"arXiv preprint arXiv:1705.06950","year":"2017"},{"key":"key20260311051935_r37","doi-asserted-by":"crossref","first-page":"106414","DOI":"10.1016\/j.neunet.2024.106414","article-title":"Hydra: Multi\u2011head low\u2011rank adaptation for parameter efficient fine\u2011tuning","volume":"178","year":"2024","journal-title":"Neural Networks"},{"key":"key20260311051935_r38","first-page":"5583","article-title":"Vilt: Vision\u2011and\u2011language transformer without convolution or region supervision","year":"2021"},{"key":"key20260311051935_r39","article-title":"Cooperative learning of audio and video models from self\u2011supervised synchronization","volume":"31","year":"2018","journal-title":"Advances in Neural Information Processing Systems (NeurIPS)"},{"key":"key20260311051935_r40","first-page":"1","article-title":"Low\u2011resource language adaptation with ensemble of PEFT approaches","year":"2024"},{"key":"key20260311051935_r41","first-page":"271","article-title":"Musicological and technological perspectives on computational analysis of electroacoustic music","year":"2024"},{"key":"key20260311051935_r42","first-page":"705","article-title":"PEFT for speech: Unveiling optimal placement, merging strategies, and ensemble techniques","year":"2024"},{"key":"key20260311051935_r43","first-page":"2299","article-title":"Vision transformers are parameter\u2011efficient audio\u2011visual learners","year":"2023"},{"key":"key20260311051935_r44","first-page":"12009","article-title":"Swin transformer v2: Scaling up capacity and resolution","year":"2022"},{"key":"key20260311051935_r45","volume-title":"MediaEval","year":"2019"},{"key":"key20260311051935_r46","article-title":"Active contrastive learning of audio\u2011visual video representations","year":"2021"},{"key":"key20260311051935_r47","article-title":"Piggyback: Adapting a single network to multiple tasks by learning to mask weights","year":"2018"},{"issue":"5588","key":"key20260311051935_r48","doi-asserted-by":"crossref","first-page":"746","DOI":"10.1038\/264746a0","article-title":"Hearing lips and seeing voices","volume":"264","year":"1976","journal-title":"Nature"},{"issue":"3","key":"key20260311051935_r49","doi-asserted-by":"crossref","first-page":"640","DOI":"10.1152\/jn.1986.56.3.640","article-title":"Visual, auditory, and somatosensory convergence on cells in superior colliculus results in multisensory integration","volume":"56","year":"1986","journal-title":"Journal of Neurophysiology"},{"key":"key20260311051935_r50","first-page":"1004","article-title":"Sound ontology for computational auditory scene analysis","year":"1998"},{"key":"key20260311051935_r51","first-page":"689","article-title":"Multimodal deep learning","year":"2011"},{"key":"key20260311051935_r52","first-page":"2405","article-title":"Visually indicated sounds","year":"2016"},{"key":"key20260311051935_r53","volume-title":"Advances in Neural Information Processing Systems (NeurIPS)","year":"2019"},{"issue":"11","key":"key20260311051935_r54","doi-asserted-by":"crossref","first-page":"559","DOI":"10.1080\/14786440109462720","article-title":"LIII. on lines and planes of closest fit to systems of points in space","volume":"2","year":"1901","journal-title":"The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science"},{"key":"key20260311051935_r55","first-page":"8238","article-title":"Balanced multimodal learning via on\u2011the\u2011fly gradient modulation","year":"2022"},{"key":"key20260311051935_r56","volume-title":"MediaEval","year":"2015"},{"key":"key20260311051935_r57","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","year":"2021"},{"key":"key20260311051935_r58","article-title":"Free\u2011marginal multirater kappa (multirater \u03bafree): An alternative to Fleiss fixed\u2011marginal multirater kappa","year":"2005"},{"key":"key20260311051935_r59","volume-title":"L\u2019analyse des musiques \u00e9lectroacoustiques: mod\u00e8les et propositions","year":"2004"},{"key":"key20260311051935_r60","article-title":"AudioSentibank: Large\u2011scale semantic ontology of acoustic concepts for audio content analysis","volume-title":"arXiv preprint arXiv:1607.03766","year":"2016"},{"key":"key20260311051935_r61","first-page":"1041","article-title":"A dataset and taxonomy for urban sound research","year":"2014"},{"issue":"3","key":"key20260311051935_r62","article-title":"Trait\u00e9 des objets musicaux","volume":"74","year":"1969","journal-title":"Revue de M\u00e9taphysique et de Morale"},{"key":"key20260311051935_r63","volume-title":"Solf\u00e8ge de l\u2019objet sonore","year":"1967"},{"key":"key20260311051935_r64","volume-title":"The soundscape: Our sonic environment and the tuning of the world","year":"1993"},{"key":"key20260311051935_r65","article-title":"A taxonomy of multimodal interaction in the human information processing system","volume-title":"Technical report, NICI institute, Nijmegen","year":"1995"},{"key":"key20260311051935_r66","doi-asserted-by":"crossref","first-page":"61","DOI":"10.1007\/978-1-349-18492-7_5","volume-title":"The Language of Electroacoustic Music","year":"1986"},{"issue":"3","key":"key20260311051935_r67","first-page":"3200","article-title":"Human action recognition from various data modalities: A review","volume":"45","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"2","key":"key20260311051935_r68","doi-asserted-by":"crossref","first-page":"129","DOI":"10.1017\/S1355771807001793","article-title":"Spectromorphological analysis of sound objects: An adaptation of pierre schaeffer\u2019s typomorphology","volume":"12","year":"2007","journal-title":"Organised Sound"},{"key":"key20260311051935_r69","volume-title":"Emergent Musical Forms: Aural Explorations","year":"2015"},{"key":"key20260311051935_r70","first-page":"2745","article-title":"Cyclic co\u2011learning of sounding object visual grounding and sound separation","year":"2021"},{"key":"key20260311051935_r71","first-page":"436","volume-title":"Computer Vision (ECCV 2020)","year":"2020"},{"key":"key20260311051935_r72","first-page":"247","article-title":"Audio\u2011visual event localization in unconstrained videos","year":"2018"},{"key":"key20260311051935_r73","first-page":"52559","article-title":"Mmpareto: Boosting multimodal learning with innocent unimodal assistance","year":"2024"},{"key":"key20260311051935_r74","article-title":"Learning in audio\u2011visual context: A review, analysis, and new perspective","volume-title":"arXiv preprint arXiv:2208.09579","year":"2022"},{"key":"key20260311051935_r75","article-title":"Balanced audiovisual dataset for imbalance analysis","volume-title":"arXiv preprint arXiv:2302.10912","year":"2023"},{"key":"key20260311051935_r76","first-page":"19989","article-title":"Cross\u2011modal background suppression for audio\u2011visual event localization","year":"2022"},{"key":"key20260311051935_r77","article-title":"Parameter\u2011efficient fine\u2011tuning methods for pretrained language models: A critical review and assessment","volume-title":"arXiv preprint arXiv:2312.12148","year":"2023"},{"key":"key20260311051935_r78","article-title":"Filip: Fine\u2011grained interactive language\u2011image pre\u2011training","volume-title":"arXiv preprint arXiv:2111.07783","year":"2021"},{"issue":"11","key":"key20260311051935_r79","doi-asserted-by":"crossref","first-page":"65","DOI":"10.1109\/35.41402","article-title":"Integration of acoustic and visual speech signals using neural networks","volume":"27","year":"1989","journal-title":"IEEE Communications Magazine"},{"key":"key20260311051935_r80","first-page":"16375","article-title":"MERLOT reserve: Neural script knowledge through vision and language and sound","year":"2022"},{"key":"key20260311051935_r81","first-page":"386","article-title":"Audio\u2013visual segmentation","year":"2022"}],"container-title":["Transactions of the International Society for Music Information Retrieval"],"original-title":[],"language":"en","deposited":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T05:19:48Z","timestamp":1773206388000},"score":1,"resource":{"primary":{"URL":"https:\/\/transactions.ismir.net\/articles\/10.5334\/tismir.223\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":81,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,3,11]]}},"alternative-id":["10.5334\/tismir.223"],"URL":"https:\/\/doi.org\/10.5334\/tismir.223","relation":{},"ISSN":["2514-3298"],"issn-type":[{"value":"2514-3298","type":"print"}],"subject":[],"published":{"date-parts":[[2026]]}}}