{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:28:21Z","timestamp":1775068101005,"version":"3.50.1"},"reference-count":50,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2023,1,18]],"date-time":"2023-01-18T00:00:00Z","timestamp":1674000000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,18]],"date-time":"2023-01-18T00:00:00Z","timestamp":1674000000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Circuits Syst Signal Process"],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1007\/s00034-022-02278-y","type":"journal-article","created":{"date-parts":[[2023,1,18]],"date-time":"2023-01-18T09:02:53Z","timestamp":1674032573000},"page":"3464-3484","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":19,"title":["Multiple Predominant Instruments Recognition in Polyphonic Music Using Spectro\/Modgd-gram Fusion"],"prefix":"10.1007","volume":"42","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2987-6586","authenticated-orcid":false,"given":"C. R.","family":"Lekshmi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5488-9026","authenticated-orcid":false,"given":"Rajan","family":"Rajeev","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,1,18]]},"reference":[{"key":"2278_CR1","doi-asserted-by":"crossref","unstructured":"M. Airaksinen, L. Juvela, P. Alku, O. Rsnen, Data augmentation strategies for neural network F0 estimation. In: Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),10-15 Brighton, UK, (2019)","DOI":"10.1109\/ICASSP.2019.8683041"},{"key":"2278_CR2","doi-asserted-by":"crossref","unstructured":"R. Ajayakumar, R. Rajan, Predominant Instrument Recognition in Polyphonic Music Using GMM-DNN Framework. in Proc. of International Conference on Signal Processing and Communications (SPCOM), (2020),1-5","DOI":"10.1109\/SPCOM50965.2020.9179626"},{"key":"2278_CR3","doi-asserted-by":"crossref","unstructured":"G. Atkar, P. Jayaraju, Speech synthesis using generative adversarial network for improving readability of Hindi words to recuperate from dyslexia. Neural Computing and Applications, 1-10 (2021)","DOI":"10.1007\/s00521-021-05695-3"},{"key":"2278_CR4","unstructured":"J.J. Bosch, J. Janer, F. Fuhrmann, P. Herrera, A comparison of sound segregation techniques for predominant instrument recognition in musical audio signals. In: Proceedings of 13th International Society for Music Information Retrieval Conference (ISMIR) 552-564 (2012)"},{"key":"2278_CR5","doi-asserted-by":"crossref","unstructured":"C. Chen, Q. Li, A multimodal music emotion classification method based on multi-feature combined network classifier. Math. Probl. Eng. 2020 (2020)","DOI":"10.1155\/2020\/4606027"},{"issue":"4","key":"2278_CR6","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1109\/TASSP.1980.1163420","volume":"28","author":"S Davis","year":"1980","unstructured":"S. Davis, P. Mermelstein, Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences. IEEE Trans. Acoust. Speech Signal Process. 28(4), 357\u2013366 (1980)","journal-title":"IEEE Trans. Acoust. Speech Signal Process."},{"key":"2278_CR7","unstructured":"A. Diment, P. Rajan, T. Heittola, T. Virtanen, Modified group delay feature for musical instrument recognition. In: Proceedings of 10th International Symposium on Computer Music Multidisciplinary Research (CMMR), Marseille, France, 431-438 (2013)"},{"key":"2278_CR8","doi-asserted-by":"crossref","unstructured":"T.-B. Do, H.-H. Nguyen, T.-T.-N. Nguyen, H. Vu, T.-T.-H. Tran, T.-L. Le, Plant identification using score-based fusion of multi-organ images. In: Proceedings of 9th International Conference on Knowledge and Systems Engineering (KSE), 191-196 (2017)","DOI":"10.1109\/KSE.2017.8119457"},{"key":"2278_CR9","unstructured":"C. Donahue, J.J. McAuley, M. Puckette, Adversarial audio synthesis. In: Proceedings of International Conference on Learning Representations (ICLR), 1-16 (2019)"},{"issue":"1","key":"2278_CR10","doi-asserted-by":"publisher","first-page":"138","DOI":"10.1109\/TASLP.2013.2285484","volume":"22","author":"Z Duan","year":"2013","unstructured":"Z. Duan, J. Han, B. Pardo, Multi-pitch streaming of harmonic sound mixtures. IEEE\/ACM Trans. Audio Speech Language Process. 22(1), 138\u2013150 (2013)","journal-title":"IEEE\/ACM Trans. Audio Speech Language Process."},{"key":"2278_CR11","unstructured":"F. Fuhrmann, P. Herrera, Polyphonic instrument recognition for exploring semantic similarities in music. In: Proceedings of 13th International Conference on Digital Audio Effects DAFx10, pp. 1-8 (2010)"},{"issue":"5","key":"2278_CR12","doi-asserted-by":"publisher","first-page":"829","DOI":"10.1162\/necoa01273","volume":"32","author":"J Gao","year":"2020","unstructured":"J. Gao, P. Li, Z. Chen, J. Zhang, A survey on deep learning for multimodal data fusion. Neural Comput. 32(5), 829\u2013864 (2020). https:\/\/doi.org\/10.1162\/necoa01273","journal-title":"Neural Comput."},{"key":"2278_CR13","doi-asserted-by":"crossref","unstructured":"D. Ghosal, M.H. Kolekar, Music genre recognition using deep neural networks and transfer learning. In: Proceedings of Interspeech, 2087-2091 (2018)","DOI":"10.21437\/Interspeech.2018-2045"},{"key":"2278_CR14","unstructured":"X. Glorot, Y. Bengio, Understanding the difficulty of training deep feedforward neural networks. In: Proceedings of the thirteenth International conference on artificial intelligence and statistics, 249-256 (2010). JMLR Workshop and Conference Proceedings"},{"key":"2278_CR15","unstructured":"I. Gulrajani, F. Ahmed, M. Arjovsky, V. Dumoulin, A. Courville, Improved training of wasserstein GANs. In: Proceedings of Neural Information Processing System (NIPS) (2017)"},{"key":"2278_CR16","unstructured":"S. Gururani, C. Summers, A. Lerch, Instrument activity detection in polyphonic music using deep neural networks. In: Proceedings of International Society for Music Information Retrieval Conference (ISMIR), 569-576 (2018)"},{"issue":"1","key":"2278_CR17","doi-asserted-by":"publisher","first-page":"208","DOI":"10.1109\/TASLP.2016.2632307","volume":"25","author":"Y Han","year":"2017","unstructured":"Y. Han, J. Kim, K. Lee, Deep convolutional neural networks for predominant instrument recognition in polyphonic music. IEEE\/ACM Trans Audio Speech Language Process. 25(1), 208\u2013221 (2017)","journal-title":"IEEE\/ACM Trans Audio Speech Language Process."},{"key":"2278_CR18","doi-asserted-by":"crossref","unstructured":"B. Hariharan, P. Arbel\u00e1ez, R. Girshick, J. Malik, Hypercolumns for object segmentation and fine-grained localization. In: Proceedings of the IEEE conference on computer vision and pattern recognition, 447-456 (2015)","DOI":"10.1109\/CVPR.2015.7298642"},{"key":"2278_CR19","unstructured":"T. Heittola, A. Klapuri, T. Virtanen, Musical instrument recognition in polyphonic audio using source-filter model for sound separation. In: Proceedings of International Society of Music Information Retrieval Conference, 327-332 (ISMIR) (2009)"},{"key":"2278_CR20","unstructured":"G.C. Juan, A. Jakob, E. Cano, Jazz solo instrument classification with convolutional neural networks, source separation, and transfer learning. In: Proceedings of International Society for Music Information Retrieval Conference, 577-584,(ISMIR) (2018)"},{"key":"2278_CR21","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1155\/2007\/51979","volume":"2007","author":"T Kitahara","year":"2006","unstructured":"T. Kitahara, M. Goto, K. Komatani, T. Ogata, H.G. Okuno, Instrument identification in polyphonic music: feature weighting to minimize influence of sound overlaps. EURASIP J. Adv. Signal Process. 2007, 1\u201315 (2006)","journal-title":"EURASIP J. Adv. Signal Process."},{"key":"2278_CR22","doi-asserted-by":"crossref","unstructured":"A. Kratimenos, K. Avramidis, C. Garoufis, A. Zlatintsi, P. Maragos, Augmentation methods on monophonic audio for instrument classification in polyphonic music. In: Proceedings of 28th European Signal Processing Conference, 156-160 (2021). IEEE","DOI":"10.23919\/Eusipco47968.2020.9287745"},{"key":"2278_CR23","first-page":"17022","volume":"33","author":"J Kong","year":"2020","unstructured":"J. Kong, J. Kim, J. Bae, Hifi-gan: generative adversarial networks for efficient and high fidelity speech synthesis. Adv. Neural Inf. Process. Syst. 33, 17022\u201317033 (2020)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"2278_CR24","unstructured":"P. Li, J. Qian, T. Wang, Automatic instrument recognition in polyphonic music using convolutional neural networks. arXiv:1511.05520 (2015)"},{"key":"2278_CR25","doi-asserted-by":"publisher","DOI":"10.3390\/app10093166","author":"C-J Lin","year":"2020","unstructured":"C.-J. Lin, C.-H. Lin, S.-Y. Jeng, Using feature fusion and parameter optimization of dual-input convolutional neural network for face gender recognition. Appl. Sci. (2020). https:\/\/doi.org\/10.3390\/app10093166","journal-title":"Appl. Sci."},{"key":"2278_CR26","doi-asserted-by":"crossref","unstructured":"A. Madhu, S. Kumaraswamy, Data augmentation using generative adversarial network for environmental sound classification. In: Proceedings of 27th European Signal Processing Conference, 1-5 (2019). IEEE","DOI":"10.23919\/EUSIPCO.2019.8902819"},{"key":"2278_CR27","doi-asserted-by":"publisher","unstructured":"B. McFee, C. Raffel, D. Liang, D. Ellis, M. Mcvicar, E. Battenberg, O. Nieto, librosa: Audio and music signal analysis in python, pp. 18-24 (2015). https:\/\/doi.org\/10.25080\/Majora-7b98e3ed-003","DOI":"10.25080\/Majora-7b98e3ed-003"},{"key":"2278_CR28","doi-asserted-by":"publisher","DOI":"10.1016\/j.imu.2021.100779","volume":"27","author":"S Motamed","year":"2021","unstructured":"S. Motamed, P. Rogalla, F. Khalvati, Data augmentation using generative adversarial networks (gans) for gan-based detection of pneumonia and covid-19 in chest x-ray images. Inf. Med. Unlock. 27, 100779 (2021)","journal-title":"Inf. Med. Unlock."},{"issue":"5","key":"2278_CR29","doi-asserted-by":"publisher","first-page":"745","DOI":"10.1007\/s12046-011-0045-1","volume":"36","author":"HA Murthy","year":"2011","unstructured":"H.A. Murthy, B. Yegnanarayana, Group delay functions and its applications in speech technology. Sadhana 36(5), 745\u2013782 (2011)","journal-title":"Sadhana"},{"key":"2278_CR30","volume-title":"Discrete Time Signal Processing","author":"AV Oppenheim","year":"1990","unstructured":"A.V. Oppenheim, R.W. Schafer, Discrete Time Signal Processing (Prentice Hall Inc, New Jersey, 1990)"},{"key":"2278_CR31","doi-asserted-by":"crossref","unstructured":"S. Oramas, F. Barbieri, O. Nieto Caballero, X. Serra, Multimodal deep learning for music genre classification. Trans. Int. Soc. Music Inf. 4-21 (2018)","DOI":"10.5334\/tismir.10"},{"key":"2278_CR32","unstructured":"D. O\u2019Shaughnessy, Speech communication: human and machine. Universities press, 1-5 (1987)"},{"key":"2278_CR33","unstructured":"L. Perez, J. Wang, The effectiveness of data augmentation in image classification using deep learning. arXiv:1712.04621 (2017)"},{"key":"2278_CR34","doi-asserted-by":"crossref","unstructured":"J. Pons, O. Slizovskaia, R. Gong, E. Gomez, X. Serra, Timbre analysis of music audio signals with convolutional neural networks. In: Proceedings of 25th European Signal Processing Conference, 2744-2748 (2017). IEEE","DOI":"10.23919\/EUSIPCO.2017.8081710"},{"key":"2278_CR35","doi-asserted-by":"crossref","unstructured":"K. Racharla, V. Kumar, C.B. Jayant, A. Khairkar, P. Harish, Predominant musical instrument classification based on spectral features. In: Proceedings of 7th International Conference on Signal Processing and Integrated Networks (SPIN), 617-622 (2020)","DOI":"10.1109\/SPIN48934.2020.9071125"},{"key":"2278_CR36","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1016\/j.specom.2017.02.004","volume":"89","author":"R Rajan","year":"2017","unstructured":"R. Rajan, H.A. Murthy, Two-pitch tracking in co-channel speech using modified group delay functions. Speech Commun. 89, 37\u201346 (2017)","journal-title":"Speech Commun."},{"key":"2278_CR37","doi-asserted-by":"crossref","unstructured":"R. Rajan, H.A. Murthy, Group delay based melody monopitch extraction from music. In: Proceedings of the IEEE International Conference on Audio, Speech and Signal Processing, 186-190 (2013)","DOI":"10.1109\/ICASSP.2013.6637634"},{"key":"2278_CR38","doi-asserted-by":"crossref","unstructured":"R. Rajan, Estimating pitch in speech and music using modified group delay functions. Ph.D. dissertation, Indian Institute of Technology, Madras (2017)","DOI":"10.1016\/j.specom.2017.02.004"},{"key":"2278_CR39","doi-asserted-by":"publisher","unstructured":"R. Rajan, H.A. Murthy, Music genre classification by fusion of modified group delay and melodic features. In: Proceedings of Twenty-third National Conference on Communications (NCC), 1-6 (2017). https:\/\/doi.org\/10.1109\/NCC.2017.8077056","DOI":"10.1109\/NCC.2017.8077056"},{"key":"2278_CR40","doi-asserted-by":"crossref","unstructured":"R. Rajan, H.A. Murthy, Melodic pitch extraction from music signals using modified group delay functions. In: Proceedings of 2013 National Conference on Communications (NCC), pp. 1-5. IEEE, (2013)","DOI":"10.1109\/NCC.2013.6487986"},{"key":"2278_CR41","doi-asserted-by":"publisher","unstructured":"L.C. Reghunath, R. Rajan, Transformer-based ensemble method for multiple predominant instruments recognition in polyphonic music. EURASIP Journal on Audio, Speech, and Music Processing, 11 (2022),1\u201314, Springer. https:\/\/doi.org\/10.1186\/s13636-022-00245-8","DOI":"10.1186\/s13636-022-00245-8"},{"key":"2278_CR42","unstructured":"L.C. Reghunath, R. Rajan, Attention-based predominant instruments recognition in polyphonic music. In: Proceedings of 18th Sound and Music Computing Conference (SMC),(2021),199-206"},{"key":"2278_CR43","doi-asserted-by":"crossref","unstructured":"J. Sebastian, H.A. Murthy, Group delay-based music source separation using deep recurrent neural networks. In: Proceedings of International Conference on Signal Processing and Communications (SPCOM), 1-5 (2016). IEEE","DOI":"10.1109\/SPCOM.2016.7746672"},{"key":"2278_CR44","doi-asserted-by":"publisher","first-page":"0245230","DOI":"10.1371\/journal.pone.0245230","volume":"16","author":"M Seeland","year":"2021","unstructured":"M. Seeland, P. M\u00e4der, Multi-view classification with convolutional neural networks. PLOS ONE 16, 0245230 (2021). https:\/\/doi.org\/10.1371\/journal.pone.0245230","journal-title":"PLOS ONE"},{"key":"2278_CR45","unstructured":"O. Slizovskaia, E. Gomez Gutierrez, G. Haro Ortega, Automatic musical instrument recognition in audiovisual recordings by combining image and audio classification strategies. In: Proceedings of 13th Sound and Music Computing Conference (SMC) 2016, 442-7 (2016)"},{"key":"2278_CR46","unstructured":"M. Sukhavasi, S. Adapa, Music theme recognition using cnn and self-attention. arXiv preprint arXiv:1911.07041 (2019)"},{"key":"2278_CR47","doi-asserted-by":"crossref","unstructured":"M. Uzair, N. Jamil, Effects of hidden layers on the efficiency of neural networks. In: Proceedings of IEEE 23rd International Multitopic Conference (INMIC), 1-6 (2020). IEEE","DOI":"10.1109\/INMIC50486.2020.9318195"},{"key":"2278_CR48","doi-asserted-by":"crossref","unstructured":"W. Yao, A. Moumtzidou, C.O. Dumitru, A. Stelios, I. Gialampoukidis, S. Vrochidis, M. Datcu, I. Kompatsiaris, Early and late fusion of multiple modalities in sentinel imagery and social media retrieval. In: Proceedings of International Conference of Pattern Recognition (ICPR) (2021)","DOI":"10.1007\/978-3-030-68787-8_43"},{"key":"2278_CR49","doi-asserted-by":"publisher","first-page":"852","DOI":"10.1109\/TASLP.2020.2971419","volume":"28","author":"D Yu","year":"2020","unstructured":"D. Yu, H. Duan, J. Fang, B. Zeng, Predominant instrument recognition based on deep neural network with auxiliary classification. IEEE\/ACM Trans. Audio Speech Language Process. 28, 852\u2013861 (2020)","journal-title":"IEEE\/ACM Trans. Audio Speech Language Process."},{"key":"2278_CR50","doi-asserted-by":"crossref","unstructured":"M.D. Zeiler, R. Fergus, T visualizing and understanding convolutional networks. In: Proceedings of European conference on computer vision (ECCV), 818-8331 (2014)","DOI":"10.1007\/978-3-319-10590-1_53"}],"container-title":["Circuits, Systems, and Signal Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-022-02278-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00034-022-02278-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-022-02278-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,12]],"date-time":"2024-10-12T11:54:41Z","timestamp":1728734081000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00034-022-02278-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,18]]},"references-count":50,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2023,6]]}},"alternative-id":["2278"],"URL":"https:\/\/doi.org\/10.1007\/s00034-022-02278-y","relation":{},"ISSN":["0278-081X","1531-5878"],"issn-type":[{"value":"0278-081X","type":"print"},{"value":"1531-5878","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,1,18]]},"assertion":[{"value":"11 March 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 December 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 December 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 January 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}