{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,24]],"date-time":"2026-04-24T14:52:30Z","timestamp":1777042350403,"version":"3.51.4"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747578","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"9281-9285","source":"Crossref","is-referenced-by-count":97,"title":["FRCRN: Boosting Feature Representation Using Frequency Recurrence for Monaural Speech Enhancement"],"prefix":"10.1109","author":[{"given":"Shengkui","family":"Zhao","sequence":"first","affiliation":[{"name":"Alibaba Group"}]},{"given":"Bin","family":"Ma","sequence":"additional","affiliation":[{"name":"Alibaba Group"}]},{"given":"Karn N.","family":"Watcharasupat","sequence":"additional","affiliation":[{"name":"Nanyang Technological University (NTU),School of Electrical and Electronic Engineering,Singapore"}]},{"given":"Woon-Seng","family":"Gan","sequence":"additional","affiliation":[{"name":"Nanyang Technological University (NTU),School of Electrical and Electronic Engineering,Singapore"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2015.2512042"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3038"},{"key":"ref12","first-page":"146","article-title":"Investigating RNn-based speech enhancement methods for noise-robust Text-to-Speech","author":"valentini-botinhao","year":"2016","journal-title":"Proc SSW"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747230"},{"key":"ref14","article-title":"Deep complex networks","author":"trabelsi","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6489"},{"key":"ref16","article-title":"Csr-i (wsj0) complete ldc93s6a","volume":"83","author":"garofolo","year":"1993","journal-title":"Web Download Philadelphia Linguistic Data Consortium"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2631"},{"key":"ref18","article-title":"FullSubnet: A full-band and sub-band fusion model for real-time single-channel speech enhancement","author":"hao","year":"2020"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414852"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1405"},{"key":"ref28","article-title":"Dnsmos: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors","author":"reddy","year":"2020","journal-title":"ICASSP"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1465"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2007.911054"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2537"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462155"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-343"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1482"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414569"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461861"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-224"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461404"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3079813"},{"key":"ref22","doi-asserted-by":"crossref","first-page":"3642","DOI":"10.21437\/Interspeech.2017-1428","article-title":"Segan: Speech enhancement generative adversarial network","author":"pascual","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref21","article-title":"Glance and Gaze: A collaborative learning framework for single-channel speech enhancement","author":"li","year":"2021"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2409"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2143"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413555"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-599"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747578.pdf?arnumber=9747578","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T20:10:01Z","timestamp":1661199001000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747578\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747578","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}