{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T00:53:33Z","timestamp":1730249613996,"version":"3.28.0"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/icme52920.2022.9859957","type":"proceedings-article","created":{"date-parts":[[2022,8,26]],"date-time":"2022-08-26T15:45:18Z","timestamp":1661528718000},"page":"1-6","source":"Crossref","is-referenced-by-count":5,"title":["Multi-Scale Temporal-Frequency Attention for Music Source Separation"],"prefix":"10.1109","author":[{"given":"Lianwu","family":"Chen","sequence":"first","affiliation":[{"name":"Kuaishou Technology Co.,Beijing,China"}]},{"given":"Xiguang","family":"Zheng","sequence":"additional","affiliation":[{"name":"Kuaishou Technology Co.,Beijing,China"}]},{"given":"Chen","family":"Zhang","sequence":"additional","affiliation":[{"name":"Kuaishou Technology Co.,Beijing,China"}]},{"given":"Liang","family":"Guo","sequence":"additional","affiliation":[{"name":"Kuaishou Technology Co.,Beijing,China"}]},{"given":"Bing","family":"Yu","sequence":"additional","affiliation":[{"name":"Kuaishou Technology Co.,Beijing,China"}]}],"member":"263","reference":[{"key":"ref30","article-title":"Hybrid spectrogram and waveform source separation","author":"d\u00e9fossez","year":"0","journal-title":"Proceedings of the ISMIR 2021 Workshop on Music Source Separation"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/655"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IWAENC.2018.8521383"},{"key":"ref12","article-title":"D3net: Densely connected multidilated densenet for music source separation","author":"takahashi","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref13","article-title":"Music source separation in the wave-form domain","author":"d\u00e9fosscz","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21105\/joss.02154"},{"key":"ref15","article-title":"Decoupling magnitude and phase estimation with deep resunet for music source separation","author":"kong","year":"0","journal-title":"Proceedings of the ISMIR 2021 Workshop on Music Source Separation"},{"key":"ref16","first-page":"323","article-title":"Singing voice separation with deep u-net convolutional networks","author":"jansson","year":"0","journal-title":"Proceedings of the International Society for Music Information Retrieval Conference (ISMIR)"},{"key":"ref17","article-title":"Wave-u-net: A multi-scale neural network for end-to-end audio source separation","author":"stoller","year":"0","journal-title":"International Society for Music Information Retrieval Conference (ISMIR)"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/WASPAA.2017.8169987"},{"key":"ref19","article-title":"Investigating u-nets with various intermediate blocks for spectrogram-based singing voice separation","author":"choi","year":"0","journal-title":"21th International Society for Music Information Retrieval Conference"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3389\/frsip.2021.808395"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"788","DOI":"10.1038\/44565","article-title":"Learning the parts of objects by non-negative matrix factorization","volume":"401","author":"lee","year":"1999","journal-title":"Nature"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2005.858005"},{"key":"ref3","first-page":"293","article-title":"The 2018 signal separation evaluation campaign","author":"st\u00f6ter","year":"0","journal-title":"nternational Conference on Latent Variable Analysis and Signal Separation"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178348"},{"key":"ref29","article-title":"Kuielab-mdx-nct: A two-stream neural network for music demixing","author":"kim","year":"0","journal-title":"Proceedings of the MDX Workshop"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2007.01.011"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952158"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/EUSIPCO.2016.7760548"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICDSP.2011.6004991"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"1667","DOI":"10.21105\/joss.01667","article-title":"Open-unmix-a reference implementation for music source separation","volume":"4","author":"st\u00f6rer","year":"2019","journal-title":"Journal of Open Source Software"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2012.2213249"},{"key":"ref20","first-page":"625","article-title":"State of the art report: Audio-based music structure analysis","author":"paulus","year":"2010","journal-title":"ISMIR Utrecht"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP49672.2021.9362081"},{"key":"ref21","article-title":"Voice and accompaniment separation in music using self-attention convolutional neural network","author":"liu","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2555"},{"key":"ref23","first-page":"14549","article-title":"Interactive speech and noise modeling for speech enhancement","author":"zheng","year":"0","journal-title":"AAAI"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683555"},{"key":"ref25","first-page":"5998","article-title":"Attention is all you need","author":"ashish","year":"2017","journal-title":"Advances in Neural Information Processing Systems (NIPS)"}],"event":{"name":"2022 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2022,7,18]]},"location":"Taipei, Taiwan","end":{"date-parts":[[2022,7,22]]}},"container-title":["2022 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9859562\/9858923\/09859957.pdf?arnumber=9859957","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,19]],"date-time":"2024-01-19T13:40:52Z","timestamp":1705671652000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9859957\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/icme52920.2022.9859957","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}