{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:17:37Z","timestamp":1740100657982,"version":"3.37.3"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747003","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"8592-8596","source":"Crossref","is-referenced-by-count":9,"title":["Importantaug: A Data Augmentation Agent for Speech"],"prefix":"10.1109","author":[{"given":"Viet Anh","family":"Trinh","sequence":"first","affiliation":[{"name":"CUNY,The Graduate Center,New York,USA"}]},{"given":"Hassan","family":"Salami Kavaki","sequence":"additional","affiliation":[{"name":"CUNY,The Graduate Center,New York,USA"}]},{"given":"Michael I","family":"Mandel","sequence":"additional","affiliation":[{"name":"CUNY,The Graduate Center,New York,USA"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2007.383267"},{"article-title":"Salgan: Visual saliency prediction with generative adversarial networks","year":"2017","author":"pan","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3131275"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1168"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2883"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2377"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3040545"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00111"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1721"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2637"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054130"},{"article-title":"Deep speech: Scaling up end-to-end speech recognition","year":"2014","author":"hannun","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/34.730558"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.620"},{"key":"ref7","first-page":"545","article-title":"Graph-based visual saliency","author":"harel","year":"2006","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1510"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.513"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-711"},{"article-title":"Speech commands: A dataset for limited-vocabulary speech recognition","year":"2018","author":"warden","key":"ref20"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-685"},{"article-title":"Musan: A music, speech, and noise corpus","year":"2015","author":"snyder","key":"ref21"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1058"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref25","first-page":"972","article-title":"Self-normalizing neural networks","author":"klambauer","year":"2017","journal-title":"Advances in neural information processing systems"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747003.pdf?arnumber=9747003","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T20:14:24Z","timestamp":1661199264000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747003\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747003","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}