{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:16:50Z","timestamp":1776885410880,"version":"3.51.2"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100017090","name":"Sony","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100017090","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,12]]},"DOI":"10.1109\/waspaa66052.2025.11230973","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:47Z","timestamp":1763146007000},"page":"1-5","source":"Crossref","is-referenced-by-count":1,"title":["Learning Perceptually Relevant Temporal Envelope Morphing"],"prefix":"10.1109","author":[{"given":"Satvik","family":"Dixit","sequence":"first","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Sungjoon","family":"Park","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Chris","family":"Donahue","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Laurie M.","family":"Heller","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2011.5946365"},{"key":"ref2","article-title":"Sound morphing by audio descriptors and parameter interpolation","volume-title":"Proceedings of the 19th International Conference on Digital Audio Effects (DAFx-16)","author":"Kazazis"},{"key":"ref3","article-title":"A method of morphing spectral envelopes of the singing voice for use with backing vocals","volume-title":"Proceedings of the International Computer Music Conference (ICMC)","author":"Roddy"},{"key":"ref4","article-title":"Soundmorpher: Perceptually-uniform sound morphing with diffusion model","author":"Niu","year":"2024"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1080\/25742442.2022.2143201"},{"key":"ref6","article-title":"Extended convolution techniques for cross-synthesis","volume-title":"International Conference on Computer Music","author":"Donahue"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-70210-6_31"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1996.543292"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2005-791"},{"key":"ref10","article-title":"Audioldm: Text-to-audio generation with latent diffusion models","author":"Liu","year":"2023"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399607"},{"key":"ref12","article-title":"Audiogen: Textually guided audio generation","author":"Kreuk","year":"2022"},{"key":"ref13","article-title":"Audiobox: Unified audio generation with natural language prompts","author":"Vyas","year":"2023"},{"key":"ref14","first-page":"13 916","article-title":"Make-an-audio: Text-to-audio generation with prompt-enhanced diffusion models","volume-title":"International Conference on Machine Learning","author":"Huang"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612348"},{"key":"ref16","article-title":"Tangoflux: Super fast and faithful text to audio generation with flow matching and clap-ranked preference optimization","author":"Hung","year":"2024"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2025-1137"},{"key":"ref18","article-title":"Fast timing-conditioned latent audio diffusion","volume-title":"Forty-first International Conference on Machine Learning","author":"Evans"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10888461"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890164"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1163\/9789004658820"},{"key":"ref22","article-title":"Perceptual organization of sound: A conceptual framework","author":"McAdams","year":"1999","journal-title":"Psychological and Physiological Advances in Hearing"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21236\/ADA164453"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1142\/S0218001493000339"},{"key":"ref25","article-title":"Recent advances in musique concr\u00e8te at carl","volume-title":"Proceedings of the 1985 International Computer Music Conference (ICMC)","author":"Dolson"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.2307\/3680788"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/MMUL.2003.1195160"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1121\/1.408434"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuron.2011.06.032"},{"key":"ref30","article-title":"Vision language models are few-shot audio spectrogram classifiers","volume-title":"Audio Imagination: NeurIPS 2024 Workshop AI-Driven Speech, Music, and Sound Generation","author":"Dixit"},{"key":"ref31","article-title":"A formal evaluation framework for sound morphing","volume-title":"ICMC","author":"Caetano"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1038\/416087a"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1121\/2.0001581"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1011"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1447"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1037\/h0046162"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1080\/10106049.2023.2197516"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74048-3_4"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49660.2025.10889879"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447380"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10888184"},{"key":"ref43","article-title":"Stable-v2a: Synthesis of synchronized sound effects with temporal and semantic controls","author":"Gramaccioni","year":"2024"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/taslpro.2025.3597477"}],"event":{"name":"2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)","location":"Tahoe City, CA, USA","start":{"date-parts":[[2025,10,12]]},"end":{"date-parts":[[2025,10,15]]}},"container-title":["2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11230875\/11230917\/11230973.pdf?arnumber=11230973","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:58:54Z","timestamp":1763193534000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11230973\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,12]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/waspaa66052.2025.11230973","relation":{},"subject":[],"published":{"date-parts":[[2025,10,12]]}}}