{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T10:28:16Z","timestamp":1771064896577,"version":"3.50.1"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,23]],"date-time":"2021-08-23T00:00:00Z","timestamp":1629676800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,23]],"date-time":"2021-08-23T00:00:00Z","timestamp":1629676800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000182","name":"US Army Medical Research and Materiel Command","doi-asserted-by":"publisher","award":["W81XWH-17-C-0238"],"award-info":[{"award-number":["W81XWH-17-C-0238"]}],"id":[{"id":"10.13039\/100000182","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,23]]},"DOI":"10.23919\/eusipco54536.2021.9616282","type":"proceedings-article","created":{"date-parts":[[2021,12,8]],"date-time":"2021-12-08T21:55:53Z","timestamp":1639000553000},"page":"486-490","source":"Crossref","is-referenced-by-count":12,"title":["Speaker-Aware Speech Enhancement with Self-Attention"],"prefix":"10.23919","author":[{"given":"Ju","family":"Lin","sequence":"first","affiliation":[]},{"given":"Adriaan J.","family":"Van Wijngaarden","sequence":"additional","affiliation":[]},{"given":"Melissa C.","family":"Smith","sequence":"additional","affiliation":[]},{"given":"Kuang-Ching","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"626","article-title":"SDR - half-baked or well done?","author":"le roux","year":"0","journal-title":"Proc IEEE Int'l Conf Acoustics Speech and Signal Proc"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2114881"},{"key":"ref33","first-page":"1","article-title":"Fast and accurate deep network learning by exponential linear units (ELUs)","author":"clevert","year":"2016","journal-title":"Proc Int'l Conf on Learning Representations"},{"key":"ref32","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"Proc Int'l Conf on Machine Learning"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462665"},{"key":"ref37","first-page":"863","article-title":"Perceptual objective listening quality prediction","year":"2018","journal-title":"ITU Recommendations"},{"key":"ref36","first-page":"862","article-title":"Perceptual evaluation of speech quality (PESQ): An objective method for end-to-end speech quality assessment of narrow-band telephone networks and speech codecs","year":"2001","journal-title":"ITU Recommendations"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941023"},{"key":"ref34","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc Int'l Conf on Learning Representations"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1946"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2007.911054"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/WASPAA.2019.8937186"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053591"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.2995273"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053214"},{"key":"ref15","author":"pandey","year":"2020","journal-title":"Dense CNN with self-attention for time-domain speech enhancement"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2952"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3125143"},{"key":"ref18","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc Advances in Neural Information Proc Sys"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2017.2784878"},{"key":"ref28","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"van der maaten","year":"2008","journal-title":"J Machine Learning Res"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2364452"},{"key":"ref27","first-page":"146","article-title":"Investi-gating RNN-based speech enhancement methods for noise-robust Text-to-Speech","author":"valentini-botinhao","year":"2016","journal-title":"Proc ISCA Speech Synthesis Workshop"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2013-130"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"3642","DOI":"10.21437\/Interspeech.2017-1428","article-title":"SEGAN: Speech enhancement generative adversarial network","author":"pascual","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1121\/1.4806631"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1405"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2954"},{"key":"ref7","first-page":"2031","article-title":"MetricGAN: Generative adversarial networks based black-box metric scores optimization for speech enhancement","author":"fu","year":"2019","journal-title":"Proc Int'l Conf on Machine Learning"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2012-6"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683799"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1201\/b14529"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1777"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2108"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1400"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"800","DOI":"10.1109\/JSTSP.2019.2922820","article-title":"SpeakerBeam: Speaker aware neural network for target speaker extraction in speech mixtures","volume":"13","author":"\u017emol\u00edkov\u00e1","year":"2019","journal-title":"IEEE J Sel Topics Signal Process"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1101"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1697"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054311"}],"event":{"name":"2021 29th European Signal Processing Conference (EUSIPCO)","location":"Dublin, Ireland","start":{"date-parts":[[2021,8,23]]},"end":{"date-parts":[[2021,8,27]]}},"container-title":["2021 29th European Signal Processing Conference (EUSIPCO)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9615915\/9615917\/09616282.pdf?arnumber=9616282","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,3,21]],"date-time":"2022-03-21T20:58:07Z","timestamp":1647896287000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9616282\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,23]]},"references-count":40,"URL":"https:\/\/doi.org\/10.23919\/eusipco54536.2021.9616282","relation":{},"subject":[],"published":{"date-parts":[[2021,8,23]]}}}