{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T22:20:08Z","timestamp":1773526808203,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2021,9,16]],"date-time":"2021-09-16T00:00:00Z","timestamp":1631750400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,9,16]],"date-time":"2021-09-16T00:00:00Z","timestamp":1631750400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["Grant No. 61801471"],"award-info":[{"award-number":["Grant No. 61801471"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004739","name":"Youth Innovation Promotion Association of the Chinese Academy of Sciences","doi-asserted-by":"publisher","award":["2021022"],"award-info":[{"award-number":["2021022"]}],"id":[{"id":"10.13039\/501100004739","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/n\/a","name":"the development fund for Shanghai talents","doi-asserted-by":"publisher","award":["2020011"],"award-info":[{"award-number":["2020011"]}],"id":[{"id":"10.13039\/n\/a","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Sign Process Syst"],"published-print":{"date-parts":[[2021,11]]},"DOI":"10.1007\/s11265-021-01702-x","type":"journal-article","created":{"date-parts":[[2021,9,16]],"date-time":"2021-09-16T02:03:31Z","timestamp":1631757811000},"page":"1287-1299","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Environmental Sound Classification Based on Stacked Concatenated DNN using Aggregated Features"],"prefix":"10.1007","volume":"93","author":[{"given":"Chengwei","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8727-4228","authenticated-orcid":false,"given":"Feng","family":"Hong","sequence":"additional","affiliation":[]},{"given":"Haihong","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Yushuang","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"Youyuan","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,16]]},"reference":[{"key":"1702_CR1","doi-asserted-by":"crossref","unstructured":"Chachada, S., Jay, C. C. (2014). Environmental sound recognition: A survey. Apsipa Transactions on Signal & Information Processing, 3.","DOI":"10.1017\/ATSIP.2014.12"},{"key":"1702_CR2","doi-asserted-by":"crossref","unstructured":"Baum, E., Harper, M., Alicea, R., & Ordonez, C. (2018). Sound Identification for Fire-Fighting Mobile Robots. In 2018 Second IEEE international conference on robotic computing (IRC), pp.79\u201386.","DOI":"10.1109\/IRC.2018.00020"},{"key":"1702_CR3","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1016\/j.apacoust.2016.06.010","volume":"117","author":"C Mydlarz","year":"2017","unstructured":"Mydlarz, C., Salamon, J., & Bello, J. P. (2017). The implementation of low-cost urban acoustic monitoring devices. Applied Acoustics, 117, 207\u2013218.","journal-title":"Applied Acoustics"},{"key":"1702_CR4","doi-asserted-by":"crossref","unstructured":"Fan, X., Sun, T., Chen, W., Fan, Q. (2020). Deep neural network based environment sound classification and its implementation on hearing aid app. Measurement, 159(9), 107790.","DOI":"10.1016\/j.measurement.2020.107790"},{"issue":"7","key":"1702_CR5","doi-asserted-by":"publisher","first-page":"1","DOI":"10.3390\/s19071733","volume":"19","author":"Y Su","year":"2019","unstructured":"Su, Y., Zhang, K., Wang, J., & Madani, K. (2019). Environment sound classification using a two-stream CNN based on decision-level fusion. Sensors (Switzerland), 19(7), 1\u201315.","journal-title":"Sensors (Switzerland)"},{"issue":"3","key":"1702_CR6","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1109\/MSP.2014.2326181","volume":"32","author":"D Barchiesi","year":"2014","unstructured":"Barchiesi, D., Giannoulis, D., Stowellm, D., & Plumbleym, M. D. (2014). Acoustic Scene Classification: Classifying environments from the sounds they produce. IEEE Signal Processing Magazine, 32(3), 16\u201334.","journal-title":"IEEE Signal Processing Magazine"},{"key":"1702_CR7","unstructured":"Cheveign\u00e9, D. A. (2008). Computational Auditory Scene Analysis. ISTE."},{"key":"1702_CR8","doi-asserted-by":"crossref","unstructured":"Mesaros, A., Heittola, T. (2017). DCASE 2017 Challenge setup: Tasks, datasets and baseline system. In Detection and Classification of Acoustic Scenes & Events 2017.","DOI":"10.1007\/978-3-319-63450-0_6"},{"issue":"3","key":"1702_CR9","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1109\/MSP.2014.2326181","volume":"32","author":"D Barchiesi","year":"2015","unstructured":"Barchiesi, D., Giannoulis, D. D., Stowell, D., & Plumbley, M. D. (2015). Acoustic Scene Classification: Classifying environments from the sounds they produce. IEEE Signal Processing Magazine, 32(3), 16\u201334.","journal-title":"IEEE Signal Processing Magazine"},{"key":"1702_CR10","doi-asserted-by":"crossref","unstructured":"Piczak, K. J. (2015). Environmental sound classification with convolutional neural networks. In 2015 IEEE 25th international workshop on machine learning for signal processing (MLSP), pp. 1\u20136.","DOI":"10.1109\/MLSP.2015.7324337"},{"key":"1702_CR11","doi-asserted-by":"crossref","unstructured":"Su, Y., Zhang, K., Wang, J., Zhou, D., Madani, K. (2020). Performance analysis of multiple aggregated acoustic features for environment sound classification. Applied Acoustics,\u00a0158, 107050.","DOI":"10.1016\/j.apacoust.2019.107050"},{"key":"1702_CR12","volume-title":"Acoustic scene recognition with deep learning","author":"W Dai","year":"2014","unstructured":"Dai, W. (2014). Acoustic scene recognition with deep learning. Carnegie Mellon University, Pittsburg, Pennsylvania, USA."},{"key":"1702_CR13","doi-asserted-by":"crossref","unstructured":"Bountourakis, V., Vrysis, L., Papanikolaou, G. (2015). Machine learning algorithms for environmental sound recognition: Towards soundscape semantics. In Proceedings of the Audio Mostly 2015 on Interaction With Sound (AM '15), pp.1\u20137.","DOI":"10.1145\/2814895.2814905"},{"key":"1702_CR14","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/1486.001.0001","volume-title":"Auditory Scene Analysis","author":"AS Bregman","year":"1990","unstructured":"Bregman, A. S. (1990). Auditory Scene Analysis. MIT Press."},{"issue":"18","key":"1702_CR15","doi-asserted-by":"publisher","first-page":"3885","DOI":"10.3390\/app9183885","volume":"9","author":"BD Silva","year":"2019","unstructured":"Silva, B. D., Happi, A. W., Braeken, A., & Touhafi, A. (2019). Evaluation of classical Machine Learning techniques towards urban sound recognition on embedded systems. Applied Sciences, 9(18), 3885.","journal-title":"Applied Sciences"},{"issue":"6","key":"1702_CR16","doi-asserted-by":"publisher","first-page":"1253","DOI":"10.1109\/TASLP.2017.2690561","volume":"25","author":"A Rakotomamonjy","year":"2017","unstructured":"Rakotomamonjy, A. (2017). Supervised representation learning for audio scene classification. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 25(6), 1253\u20131265.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"1702_CR17","doi-asserted-by":"crossref","unstructured":"Ahmad, S., Agrawal, S., Joshi, S., Taran, S. (2020). Environmental sound classification using optimum allocation sampling based empirical mode decomposition. Physica A: Statistical Mechanics and its Applications, 537, 122613.","DOI":"10.1016\/j.physa.2019.122613"},{"key":"1702_CR18","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.eswa.2019.06.040","volume":"136","author":"S Abdoli","year":"2019","unstructured":"Abdoli, S., Cardinal, P., & Lameiras, K. A. (2019). End-to-end environmental sound classification using a 1D convolutional neural network. Expert Systems with Applications, 136, 52\u2013263.","journal-title":"Expert Systems with Applications"},{"key":"1702_CR19","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1016\/j.apacoust.2018.12.019","volume":"148","author":"Y Chen","year":"2019","unstructured":"Chen, Y., Guo, Q., Liang, X., Wang, J., & Qian, Y. (2019). Environmental sound classification with dilated convolutions. Applied Acoustics, 148, 123\u2013132.","journal-title":"Applied Acoustics"},{"key":"1702_CR20","doi-asserted-by":"crossref","unstructured":"Huang, Z., Liu, C., Fei, H. (2020). Urban sound classification based on 2-order dense convolutional network using dual features. Applied Acoustics,\u00a0164, 107243.","DOI":"10.1016\/j.apacoust.2020.107243"},{"key":"1702_CR21","doi-asserted-by":"publisher","first-page":"896","DOI":"10.1016\/j.neucom.2020.08.069","volume":"453","author":"Z Zhang","year":"2021","unstructured":"Zhang, Z., Xu, S., Zhang, S., Qiao, T., & Cao, S. (2021). Attention based convolutional recurrent neural network for environmental sound classification. Neurocomputing, 453, 896\u2013903.","journal-title":"Neurocomputing"},{"key":"1702_CR22","doi-asserted-by":"crossref","unstructured":"Parascandolo, G., Huttunen, H., Virtanen, T. (2020). Recurrent Neural Networks for Polyphonic Sound Event Detection in Real Life Recordings.\u00a0In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6440\u20136444.","DOI":"10.1109\/ICASSP.2016.7472917"},{"key":"1702_CR23","doi-asserted-by":"publisher","first-page":"66529","DOI":"10.1109\/ACCESS.2020.2984903","volume":"8","author":"F Demir","year":"2020","unstructured":"Demir, F., Abdullah, D. A., & Sengur, A. (2020). A New Deep CNN Model for Environmental Sound Classification. IEEE Access, 8, 66529\u201366537.","journal-title":"IEEE Access"},{"key":"1702_CR24","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1109\/LSP.2017.2657381","volume":"24","author":"J Salamon","year":"2017","unstructured":"Salamon, J., & Bello, J. P. (2017). Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification. IEEE Signal Processing Letters, 24, 279\u2013283.","journal-title":"IEEE Signal Processing Letters"},{"key":"1702_CR25","doi-asserted-by":"crossref","unstructured":"Lu, R., Duan, Z., Zhang, C. (2017). Metric learning based data augmentation for environmental sound classification. In 2017 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, pp. 1\u20135.","DOI":"10.1109\/WASPAA.2017.8169983"},{"key":"1702_CR26","doi-asserted-by":"crossref","unstructured":"Mushtaq, Z., & Su, S. F. (2020). Environmental sound classification using a regularized deep convolutional neural network with data augmentation. Applied Acoustics, 167, 107389.","DOI":"10.1016\/j.apacoust.2020.107389"},{"key":"1702_CR27","unstructured":"Mun, S., Park, S., Han, D. K., Ko, H. (2017). Generative adversarial network based acoustic scene training set augmentation and selection using SVM hyper-plane. Work Detect Classified Acoustic Scenes Events, pp. 93\u201397."},{"key":"1702_CR28","doi-asserted-by":"crossref","unstructured":"Mun, S., Shon, S., Kim, W. et al. (2017). Deep Neural Network based learning and transferring mid-level audio features for acoustic scene classification. In 2017 IEEE International Conference Acoustic Speech, Signal Processing (ICASSP), pp. 796\u2013800.","DOI":"10.1109\/ICASSP.2017.7952265"},{"issue":"7","key":"1702_CR29","doi-asserted-by":"publisher","first-page":"1152","DOI":"10.3390\/app8071152","volume":"87","author":"S Li","year":"2018","unstructured":"Li, S., Yao, Y., Hu, J., & Liu, G. (2018). An ensemble stacked convolutional neural network model for environmental event sound recognition. Applied Sciences, 87(7), 1152.","journal-title":"Applied Sciences"},{"key":"1702_CR30","doi-asserted-by":"crossref","unstructured":"Li, X., Chebiyyam, V., Kirchhoff, K. (2019). Multi-stream network with temporal attention for environmental sound classification. In Proceeding Annual Conference International Speech Communication Association INTERSPEECH, pp. 3604\u20133608.","DOI":"10.21437\/Interspeech.2019-3019"},{"key":"1702_CR31","doi-asserted-by":"crossref","unstructured":"Piczak, K. J. (2015). ESC: Dataset for environmental sound classification. In Proceedings of the 23rd ACM international conference on Multimedia, pp. 1015\u20131018.","DOI":"10.1145\/2733373.2806390"},{"key":"1702_CR32","doi-asserted-by":"crossref","unstructured":"McFee, B., Raffel, C. et al. (2015). librosa: Audio and Music Signal Analysis in Python. In\u00a0Proceedings of the 14th Python in Science Conference, vol. 8, pp.18\u201325.","DOI":"10.25080\/Majora-7b98e3ed-003"},{"issue":"5786","key":"1702_CR33","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1126\/science.1127647","volume":"313","author":"GE Hinton","year":"2006","unstructured":"Hinton, G. E., Salakhutdinov, R. R., & Code, M. (2006). Reducing the Dimensionality of Data with Neural Networks. Science, 313(5786), 504\u2013507.","journal-title":"Science"},{"key":"1702_CR34","doi-asserted-by":"publisher","first-page":"356","DOI":"10.1007\/978-3-030-03335-4_31","volume":"11257","author":"Z Zhang","year":"2018","unstructured":"Zhang, Z., Xu, S., Cao, S., & Zhang, S. (2018). Deep convolutional neural network with mixup for environmental sound classification. Lecture Notes in Computer Science, 11257, 356\u2013367.","journal-title":"Lecture Notes in Computer Science"},{"key":"1702_CR35","doi-asserted-by":"crossref","unstructured":"Mushtaq, Z., Su, S. F., Tran, Q. V. (2021). Spectral images based environmental sound classification using CNN with meaningful data augmentation. Appllied Acoustics, 172,107581.","DOI":"10.1016\/j.apacoust.2020.107581"},{"key":"1702_CR36","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zou, Y., Shi, W. (2017). Dilated convolution neural network with LeakyReLU for environmental sound classification. In 2017 22nd International Conference on Digital Signal Processing (DSP), London, UK, pp.1\u20135.","DOI":"10.1109\/ICDSP.2017.8096153"},{"key":"1702_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, K., Cai, Y., Ren, Y., Ye, R., & He, L. (2020). MTF-CRNN: Multiscale Time-Frequency Convolutional Recurrent Neural Network For Sound Event Detection. IEEE Access, pp.99, 1\u20131.","DOI":"10.1109\/ACCESS.2020.3015047"}],"container-title":["Journal of Signal Processing Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-021-01702-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11265-021-01702-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11265-021-01702-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,11,9]],"date-time":"2021-11-09T02:08:32Z","timestamp":1636423712000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11265-021-01702-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,16]]},"references-count":37,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2021,11]]}},"alternative-id":["1702"],"URL":"https:\/\/doi.org\/10.1007\/s11265-021-01702-x","relation":{},"ISSN":["1939-8018","1939-8115"],"issn-type":[{"value":"1939-8018","type":"print"},{"value":"1939-8115","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,9,16]]},"assertion":[{"value":"18 April 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 August 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 September 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 September 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interest"}}]}}