{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T20:58:18Z","timestamp":1774990698695,"version":"3.50.1"},"reference-count":155,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100007175","name":"Conseil National de la Recherche Scientifique","doi-asserted-by":"publisher","award":["INS2I 2018"],"award-info":[{"award-number":["INS2I 2018"]}],"id":[{"id":"10.13039\/501100007175","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Sel. Top. Signal Process."],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/jstsp.2019.2908700","type":"journal-article","created":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T18:51:20Z","timestamp":1554144680000},"page":"206-219","source":"Crossref","is-referenced-by-count":672,"title":["Deep Learning for Audio Signal Processing"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0053-215X","authenticated-orcid":false,"given":"Hendrik","family":"Purwins","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6711-3603","authenticated-orcid":false,"given":"Bo","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4604-9729","authenticated-orcid":false,"given":"Tuomas","family":"Virtanen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3862-6888","authenticated-orcid":false,"given":"Jan","family":"Schluter","sequence":"additional","affiliation":[]},{"given":"Shuo-Yiin","family":"Chang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4126-6556","authenticated-orcid":false,"given":"Tara","family":"Sainath","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Listen, attend and spell","author":"chan","year":"2015"},{"key":"ref38","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012","journal-title":"ICML Rep Learning Workshop"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74690-4_56"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404793"},{"key":"ref31","article-title":"Efficient Neural Audio Synthesis","author":"kalchbrenner","year":"0"},{"key":"ref30","article-title":"SampleRNN: An unconditional end-to-end neural audio generation model","author":"mehri","year":"2016"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178838"},{"key":"ref36","first-page":"338","article-title":"Long short-term memory recurrent neural network architectures for large scale acoustic modeling","author":"sak","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-84"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472617"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/0364-0213(90)90002-E"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511800474"},{"key":"ref29","article-title":"A critical review of recurrent neural networks for sequence learning","author":"lipton","year":"2015"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178847"},{"key":"ref22","article-title":"Wavenet: A generative model for raw audio","author":"oord","year":"2016"},{"key":"ref21","first-page":"1","article-title":"Learning the speech front-end with raw waveform CLDNNs","author":"sainath","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref24","first-page":"289","author":"holschneider","year":"1989","journal-title":"Wavelets Time-Frequency Methods and Phase Space"},{"key":"ref23","author":"goodfellow","year":"2016","journal-title":"Deep Learning"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2000.859408"},{"key":"ref26","article-title":"Multi-scale context aggregation by dilated convolutions","author":"yu","year":"2015"},{"key":"ref100","first-page":"417","article-title":"Boundary detection in music structure analysis using convolutional neural networks","author":"ullrich","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref25","article-title":"Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected CRFs","author":"chen","year":"2016"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-233"},{"key":"ref51","first-page":"2672","article-title":"Generative adversarial nets","author":"goodfellow","year":"0","journal-title":"Proc 27th Int Conf Neural Inf Process Syst"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2015.2512042"},{"key":"ref153","first-page":"234","article-title":"U-net: Convolutional networks for biomedical image segmentation","author":"ronneberger","year":"0","journal-title":"inICMIC"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639610"},{"key":"ref150","first-page":"44","article-title":"Learning to pinpoint singing voice from weakly labeled examples","author":"schl\u00fcter","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2359159"},{"key":"ref151","first-page":"537","article-title":"Local interpretable model-agnostic explanations for music content analysis","author":"mishra","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref146","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1016\/j.jpdc.2008.05.014"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"ref149","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404853"},{"key":"ref59","first-page":"214","article-title":"Wasserstein generative adversarial networks","author":"arjovsky","year":"0","journal-title":"Proc 34th Int Conf Mach Learn"},{"key":"ref58","article-title":"Soft-DTW: A differentiable loss function for time-series","author":"cuturi","year":"0","journal-title":"Proc 34th Int Conf Mach Learn"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2017.8268927"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1620"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462581"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1428"},{"key":"ref53","article-title":"TimbreTron: A WaveNet(CycleGAN(CQT(Audio))) Pipeline for Musical Timbre Transfer","author":"huang","year":"0","journal-title":"Proc ICLR"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461671"},{"key":"ref40","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"0","journal-title":"Proc 28th Int Conf Neural Inf Process Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.4.541"},{"key":"ref5","first-page":"39","article-title":"Deep belief networks for phone recognition","author":"mohamed","year":"0","journal-title":"Proc NIPS Workshop Deep Learn Speech Recognit Related Appl"},{"key":"ref8","article-title":"AudioSet: A large-scale dataset of manually annotated audio events","year":"2019"},{"key":"ref49","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"Proc 31st Int Conf Neural Inf Process Syst"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2012.6288863"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953069"},{"key":"ref45","first-page":"ii-1764","article-title":"Towards end-to-end speech recognition with recurrent neural networks","author":"graves","year":"0","journal-title":"Proc 31st Int Conf Mach Learn"},{"key":"ref48","article-title":"Neural machine translation by jointly learning to align and translate","author":"bahdanau","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref47","first-page":"34","article-title":"An end-to-end framework for audio-to-score music transcription on monophonic excerpts","author":"romn","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref42","article-title":"Very deep convolutional networks for end-to-end speech recognition","author":"zhang","year":"2016"},{"key":"ref41","article-title":"Neural speech recognizer: Acoustic-to-word LSTM model for large vocabulary speech recognition","author":"soltau","year":"2016"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472641"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-173"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2013.2291240"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-49127-9_43"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472778"},{"key":"ref73","first-page":"3586","article-title":"Audio augmentation for speech recognition","author":"ko","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2013.6707748"},{"key":"ref129","doi-asserted-by":"crossref","first-page":"7","DOI":"10.1109\/TASLP.2014.2364452","article-title":"A regression approach to speech enhancement based on deep neural networks","volume":"23","author":"xu","year":"2015","journal-title":"IEEE Transactions on Audio Speech and Language Processing"},{"key":"ref71","article-title":"Vocal tract length perturbation (VTLP) improves speech recognition","author":"jaitly","year":"0","journal-title":"Proc ICML Workshop Deep Learn Audio Speech Language Process"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6853900"},{"key":"ref70","article-title":"Bytes are all you need: End-to-end multilingual speech recognition and synthesis with bytes","author":"li","year":"2018"},{"key":"ref76","first-page":"121","article-title":"Exploring data augmentation for improved singing voice detection with neural networks","author":"schl\u00fcter","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref130","article-title":"Supervised speech separation based on deep learning: An overview","author":"wang","year":"2017"},{"key":"ref77","first-page":"248","article-title":"A software framework for musical data augmentation","author":"mcfee","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1510"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2012.220"},{"key":"ref133","first-page":"436","article-title":"Speech enhancement based on deep denoising autoencoder","author":"lu","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-211"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2329237"},{"key":"ref78","article-title":"Learning from between-class examples for deep sound recognition","author":"tokozume","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6639038"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1155\/2007\/43745"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-22482-4_11"},{"key":"ref138","article-title":"Parallel WaveNet: Fast high-fidelity speech synthesis","author":"van den oord","year":"2018"},{"key":"ref137","article-title":"Synthesizing audio with generative adversarial networks","author":"donahue","year":"2018"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3301275.3302288"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1528"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref61","doi-asserted-by":"crossref","first-page":"236","DOI":"10.1109\/TASSP.1984.1164317","article-title":"Signal Estimation from Modified Short-Time Fourier Transform","volume":"32","author":"daniel","year":"1984","journal-title":"IEEE Trans Acoust Speech Signal Process"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/GlobalSIP.2016.7905999"},{"key":"ref64","article-title":"Deep complex networks","author":"trabelsi","year":"2017"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461921"},{"key":"ref65","article-title":"ImageNet","year":"2019"},{"key":"ref141","first-page":"1068","article-title":"Neural audio synthesis of musical notes with wavenet autoencoders","author":"engel","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref66","article-title":"Linguistic Data Consortium","year":"2019"},{"key":"ref142","article-title":"Deep learning techniques for music generation &#x2013; A survey","author":"briot","year":"2017"},{"key":"ref67","article-title":"Million Song Dataset","year":"2019"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1495"},{"key":"ref68","article-title":"Learning features of music from scratch","author":"thickstun","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2016.2607341"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/323533a0"},{"key":"ref69","article-title":"Reference Annotations: The Beatles","year":"2019"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1037\/h0042519"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472917"},{"key":"ref95","first-page":"589","article-title":"Universal onset detection with bidirectional long short-term memory neural networks","author":"eyben","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2778423"},{"key":"ref94","article-title":"Deep learning for music","author":"bayle","year":"2018"},{"key":"ref107","first-page":"220","article-title":"Sample-level deep convolutional neural networks for music auto-tagging using raw waveforms","author":"lee","year":"0","journal-title":"Proc Sound and Music Computing Conf"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/E17-2076"},{"key":"ref106","first-page":"805","article-title":"Automatic tagging using deep convolutional neural networks","author":"choi","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854622"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854950"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639585"},{"key":"ref104","article-title":"A single-step approach to musical tempo estimation using a convolutional neural network","author":"schreiber","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-284"},{"key":"ref103","first-page":"37","article-title":"Feature learning for chord recognition: The deep chroma extractor","author":"korzeniowski","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref102","first-page":"188","article-title":"Structured training for large-vocabulary chord recognition","author":"mcfee","year":"0","journal-title":"Proc Int Soc for Music Inf Retrieval Conf"},{"key":"ref111","article-title":"Multi-speaker localization using convolutional neural network trained with noise","author":"chakrabarty","year":"0","journal-title":"Proc NIPS Workshop Mach Learn Audio Process"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462024"},{"key":"ref110","article-title":"Dense prediction on sequences with time-dilated convolutions for speech recognition","author":"sercu","year":"0","journal-title":"Proc NIPS Workshop End-to-end Learn Speech Audio Process"},{"key":"ref98","first-page":"255","article-title":"Joint beat and downbeat tracking with recurrent neural networks","author":"b\u00f6ck","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref99","first-page":"106","article-title":"Analysis of common design choices in deep learning systems for downbeat tracking","author":"fuentes","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6853980"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2016.2623565"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1986.1168654"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1980.1163420"},{"key":"ref12","first-page":"612","article-title":"Deep convolutional networks on the pitch spiral for music instrument recognition","author":"lostanlen","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref13","first-page":"63","article-title":"Deep salience representations for $f_0$ estimation in polyphonic music","author":"bittner","year":"0","journal-title":"Proc Int Soc Music Inf Retrieval Conf"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854953"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2013.6707746"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2015.2468583"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2016.7727634"},{"key":"ref82","first-page":"67","article-title":"Automatic speech recognition&#x2013;a brief history of the technology development","volume":"1","author":"juang","year":"2005"},{"key":"ref117","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-211"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2011.5947700"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2005.858005"},{"key":"ref18","first-page":"1766","article-title":"Estimating phoneme class conditional probabilities from raw speech signal using convolutional neural networks","author":"palaz","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4613-1367-0_10"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462040"},{"key":"ref19","first-page":"890","article-title":"Acoustic modeling with deep neural networks using raw time signal for LVCSR","author":"t\u00fcske","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-08-051584-7.50037-1"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO.2018.8553182"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/MLSP.2016.7738817"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2352935"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.3390\/app6060162"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1223"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1176"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952155"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2016.2580946"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462603"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4615-3210-1"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2013.6707749"},{"key":"ref87","first-page":"1209","article-title":"Sequence discriminative distributed training of long short-term memory recurrent neural networks","author":"sak","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178838"}],"container-title":["IEEE Journal of Selected Topics in Signal Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/4200690\/8717740\/08678825.pdf?arnumber=8678825","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T21:08:15Z","timestamp":1657746495000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8678825\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":155,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/jstsp.2019.2908700","relation":{},"ISSN":["1932-4553","1941-0484"],"issn-type":[{"value":"1932-4553","type":"print"},{"value":"1941-0484","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,5]]}}}