{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T14:12:35Z","timestamp":1768313555886,"version":"3.49.0"},"publisher-location":"Cham","reference-count":19,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783319535463","type":"print"},{"value":"9783319535470","type":"electronic"}],"license":[{"start":{"date-parts":[[2017,1,1]],"date-time":"2017-01-01T00:00:00Z","timestamp":1483228800000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017]]},"DOI":"10.1007\/978-3-319-53547-0_25","type":"book-chapter","created":{"date-parts":[[2017,2,14]],"date-time":"2017-02-14T04:54:19Z","timestamp":1487048059000},"page":"258-266","source":"Crossref","is-referenced-by-count":119,"title":["Monoaural Audio Source Separation Using Deep Convolutional Neural Networks"],"prefix":"10.1007","author":[{"given":"Pritish","family":"Chandna","sequence":"first","affiliation":[]},{"given":"Marius","family":"Miron","sequence":"additional","affiliation":[]},{"given":"Jordi","family":"Janer","sequence":"additional","affiliation":[]},{"given":"Emilia","family":"G\u00f3mez","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,2,15]]},"reference":[{"issue":"10","key":"25_CR1","doi-asserted-by":"crossref","first-page":"1533","DOI":"10.1109\/TASLP.2014.2339736","volume":"22","author":"O Abdel-Hamid","year":"2014","unstructured":"Abdel-Hamid, O., Mohamed, A.R., Jiang, H., Deng, L., Penn, G., Yu, D.: Convolutional neural networks for speech recognition. IEEE\/ACM Trans. Audio Speech Lang. Process. 22(10), 1533\u20131545 (2014)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"25_CR2","doi-asserted-by":"crossref","unstructured":"Chandna, P.: Audio source separation using deep neural networks, Master Thesis, Universitat Pompeu Fabra (2016)","DOI":"10.1007\/978-3-319-53547-0_25"},{"key":"25_CR3","unstructured":"Dong, C., Loy, C.C., He, K., Tang, X.: Image super-resolution using deep convolutional networks. CoRR, abs\/1501.00092 (2015)"},{"key":"25_CR4","unstructured":"Durrieu, J., Ozerov, A., F\u00e9votte, C.: Main instrument separation from stereophonic audio signals using a source\/filter model. In: 17th European Signal Processing Conference (2009)"},{"key":"25_CR5","unstructured":"G\u00f3mez, E., Ca\u00f1adas, F., Salamon, J., Bonada, J., Vera, P., Caba\u00f1as, P.: Predominant fundamental frequency estimation vs singing voice separation for the automatic transcription of accompanied flamenco singing. In: 13th International Society for Music Information Retrieval Conference (ISMIR 2012) (2012)"},{"key":"25_CR6","unstructured":"Han, Y., Lee, K.: Acoustic scene classification using convolutional neural network and multiple-width frequency-delta data augmentation (2016)"},{"key":"25_CR7","unstructured":"Hidalgo, J.: Low latency audio source separation for speech enhancement in cochlear implants, Master Thesis, Universitat Pompeu Fabra (2012)"},{"key":"25_CR8","doi-asserted-by":"crossref","unstructured":"Huang, P.-S., Kim, M., Hasegawa-Johnson, M., Smaragdis, P.: Deep learning for monaural speech separation. In: Acoustics, Speech and Signal Processing (ICASSP), pp. 1562\u20131566 (2014)","DOI":"10.1109\/ICASSP.2014.6853860"},{"issue":"4","key":"25_CR9","doi-asserted-by":"crossref","first-page":"2379","DOI":"10.1121\/1.2839887","volume":"123","author":"K Kokkinakis","year":"2008","unstructured":"Kokkinakis, K., Loizou, P.C.: Using blind source separation techniques to improve speech recognition in bilateral cochlear implant patients. J. Acoust. Soc. Am. 123(4), 2379\u20132390 (2008)","journal-title":"J. Acoust. Soc. Am."},{"key":"25_CR10","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems, pp. 1097\u20131105 (2012)"},{"key":"25_CR11","doi-asserted-by":"crossref","unstructured":"Noh, H., Hong, S., Han, B.: Learning deconvolution network for semantic segmentation. CoRR, abs\/1505.04366 (2015)","DOI":"10.1109\/ICCV.2015.178"},{"key":"25_CR12","doi-asserted-by":"crossref","unstructured":"Nugraha, A.A., Liutkus, A., Vincent, E.: Multichannel audio source separation with deep neural networks. Technical report (2016)","DOI":"10.1109\/EUSIPCO.2016.7760548"},{"key":"25_CR13","doi-asserted-by":"crossref","first-page":"395","DOI":"10.1007\/978-3-642-55016-4_14","volume-title":"Blind Source Separation","author":"Z Rafii","year":"2014","unstructured":"Rafii, Z., Liutkus, A., Pardo, B.: REPET for background\/foreground separation in audio. In: Naik, G.R., Wang, W. (eds.) Blind Source Separation, pp. 395\u2013411. Springer, Heidelberg (2014)"},{"key":"25_CR14","doi-asserted-by":"crossref","unstructured":"Sainath, T.N., Kingsbury, B., Ramabhadran, B.: Auto-encoder bottleneck features using deep belief networks. In: 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4153\u20134156. IEEE (2012)","DOI":"10.1109\/ICASSP.2012.6288833"},{"key":"25_CR15","unstructured":"Simpson, A.J.R.: Probabilistic Binary-Mask Cocktail-Party Source Separation in a Convolutional Deep Neural Network (2015)"},{"key":"25_CR16","doi-asserted-by":"crossref","unstructured":"Uhlich, S., Giron, F., Mitsufuji, Y.: Deep neural network based instrument extraction from music. In: 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2135\u20132139. IEEE (2015)","DOI":"10.1109\/ICASSP.2015.7178348"},{"issue":"4","key":"25_CR17","doi-asserted-by":"crossref","first-page":"1462","DOI":"10.1109\/TSA.2005.858005","volume":"14","author":"E Vincent","year":"2006","unstructured":"Vincent, E., Gribonval, R., Fevotte, C.: Performance measurement in blind audio source separation. IEEE Trans. Audio Speech Lang. Process. 14(4), 1462\u20131469 (2006)","journal-title":"IEEE Trans. Audio Speech Lang. Process."},{"issue":"12","key":"25_CR18","doi-asserted-by":"crossref","first-page":"1849","DOI":"10.1109\/TASLP.2014.2352935","volume":"22","author":"Y Wang","year":"2014","unstructured":"Wang, Y., Narayanan, A., Wang, D.: On training targets for supervised speech separation. IEEE\/ACM Trans. Audio Speech Lang. Process. 22(12), 1849\u20131858 (2014)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"25_CR19","unstructured":"Zeiler, M.D.: Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701"}],"container-title":["Lecture Notes in Computer Science","Latent Variable Analysis and Signal Separation"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-53547-0_25","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,9,18]],"date-time":"2019-09-18T13:07:23Z","timestamp":1568812043000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-319-53547-0_25"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017]]},"ISBN":["9783319535463","9783319535470"],"references-count":19,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-53547-0_25","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2017]]}}}