{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,17]],"date-time":"2025-04-17T06:08:46Z","timestamp":1744870126435,"version":"3.37.3"},"reference-count":26,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Speech Technol"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s10772-023-10075-4","type":"journal-article","created":{"date-parts":[[2023,12,23]],"date-time":"2023-12-23T05:02:05Z","timestamp":1703307725000},"page":"1091-1098","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Gender and age-evolution detection based on audio forensic analysis using light deep neural network"],"prefix":"10.1007","volume":"26","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9416-9680","authenticated-orcid":false,"given":"Noor D.","family":"AL-Shakarchy","sequence":"first","affiliation":[]},{"given":"Huda","family":"Rageb","sequence":"additional","affiliation":[]},{"given":"Mais Saad","family":"Safoq","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,12,23]]},"reference":[{"key":"10075_CR1","unstructured":"Ahmad, J., Fiaz, M., Kwon, S. I., Sodanil, M., Vo, B., & Baik, S. W. (2016). Gender identification using MFCC for telephone applications-a comparative study. arXiv Prepr. arXiv1601.01577., 2016."},{"key":"10075_CR2","doi-asserted-by":"crossref","unstructured":"Alnuaim, A. A., Zakariah, M., Shashidhar, C., Hatamleh, W. A., Tarazi, H., Shukla, P. K., & Ratna, R. (2022). Speaker gender recognition based on deep neural networks and ResNet50, Wireless Communications and Mobile Computing. Hindawi.","DOI":"10.1155\/2022\/4444388"},{"key":"10075_CR3","unstructured":"Becker, S., Ackermann, M., Lapuschkin, S., M\u00fcller, K. R., & Samek, W. (2018). Interpreting and explaining deep neural networks for classification of audio signals, arXiv Prepr. ArXiv1807.03418, 2018."},{"key":"10075_CR4","unstructured":"Choi, J., Kim, S., Park, W., Yong, S., & Nam, S. (2020). Children\u2019s song dataset for singing voice research, 21th International Society for Music Information Retrieval Conference (ISMIR)."},{"key":"10075_CR5","doi-asserted-by":"crossref","unstructured":"Chung*, A. Z. J. S., Nagrani*, A. (2018). VoxCeleb2: Deep Speaker Recognition, Interspeech.","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"10075_CR6","doi-asserted-by":"publisher","first-page":"351","DOI":"10.1016\/j.apacoust.2019.07.033","volume":"156","author":"F Ertam","year":"2019","unstructured":"Ertam, F. (2019). An effective gender recognition approach using voice data via deeper LSTM networks. Applied Acoustics, 156, 351\u2013358.","journal-title":"Applied Acoustics"},{"key":"10075_CR7","doi-asserted-by":"crossref","unstructured":"Goyal, S., Patage, V. V., & Tiwari, S. (2020). Gender and age group predictions from speech features using multi-layer perceptron model, 2020 IEEE 17th India Council international conference (INDICON) (pp. 1\u20136). IEEE.","DOI":"10.1109\/INDICON49873.2020.9342434"},{"key":"10075_CR8","doi-asserted-by":"crossref","unstructured":"Gupta, P., Goel, S., & Purwar, A. (2018). A stacked technique for gender recognition through voice, 2018 Eleventh international conference on contemporary computing, (IC3) (pp. 1\u20133). IEEE.","DOI":"10.1109\/IC3.2018.8530520"},{"key":"10075_CR9","doi-asserted-by":"crossref","unstructured":"Gupta, Y., Gangwar, K., Singhal, M., & Hemavathi, D. (2022). Gender and age recognition using audio data\u2014artificial neural networks, Soft Computing for Security Applications, 1397, 449\u2013470","DOI":"10.1007\/978-981-16-5301-8_34"},{"key":"10075_CR10","doi-asserted-by":"crossref","unstructured":"Lee, Y. O., Jo, J., & Hwang, J. (2017). Application of deep neural network and generative adversarial network to industrial maintenance: A case study of induction motor fault detection. Proceedings of 2017 IEEE international conference on Big Data (Big Data), Boston, MA, USA, vol. 1\u201314 December, pp. 3248\u20133253, 2017.","DOI":"10.1109\/BigData.2017.8258307"},{"issue":"1","key":"10075_CR11","doi-asserted-by":"publisher","first-page":"492","DOI":"10.3390\/make1010030","volume":"1","author":"IE Livieris","year":"2019","unstructured":"Livieris, I. E., Pintelas, E., & Pintelas, P. (2019). Gender recognition by voice using an improved self-labeled algorithm. Machine Learning and Knowledge Extraction, 1(1), 492\u2013503.","journal-title":"Machine Learning and Knowledge Extraction"},{"key":"10075_CR12","doi-asserted-by":"crossref","unstructured":"Markitantov, M., & Verkholyak, O. (2019). Automatic recognition of speaker age and gender based on deep neural networks, International conference on speech and computer, (pp. 327\u2013336). Springer","DOI":"10.1007\/978-3-030-26061-3_34"},{"issue":"9","key":"10075_CR13","first-page":"1529","volume":"31","author":"S Mavaddati","year":"2018","unstructured":"Mavaddati, S. (2018). Voice-based age and gender recognition using training generative sparse model. International Journal of Engineering, 31(9), 1529\u20131535.","journal-title":"International Journal of Engineering"},{"key":"10075_CR14","doi-asserted-by":"publisher","DOI":"10.1016\/j.apacoust.2020.107823","volume":"175","author":"MM Nasef","year":"2021","unstructured":"Nasef, M. M., Sauber, A. M., & Nabil, M. M. (2021). Voice gender recognition under unconstrained environments using self-attention. Applied Acoustics, 175, 107823.","journal-title":"Applied Acoustics"},{"key":"10075_CR15","doi-asserted-by":"crossref","unstructured":"Pahwa, A., & Aggarwal, G. (2016). Speech feature extraction for gender recognition, International Journal of Images, Grapics and Signal Processing, 9(3), 17\u201325.","DOI":"10.5815\/ijigsp.2016.09.03"},{"key":"10075_CR16","doi-asserted-by":"crossref","unstructured":"Priya, E., Reshma, P. S., Sashaank, S. (2022). Temporal and spectral features based gender recognition from audio signals, 2022 International conference on communication, computing and internet of things  (IC3IoT) (pp. 1\u20135). IEEE.","DOI":"10.1109\/IC3IOT53935.2022.9767929"},{"key":"10075_CR17","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1016\/j.knosys.2016.10.008","volume":"115","author":"Z Qawaqneh","year":"2017","unstructured":"Qawaqneh, Z., Mallouh, A. A., & Barkana, B. D. (2017). Deep neural network framework and transformed MFCCs for speaker\u2019s age and gender classification. Knowledge-Based Systems, 115, 5\u201314.","journal-title":"Knowledge-Based Systems"},{"key":"10075_CR18","doi-asserted-by":"crossref","unstructured":"Ramdinmawii, E., & Mittal, V. K. (2016). Gender identification from speech signal by examining the speech production characteristics, International conference on statistical process control and operations management (ICSPCom), vol. 244\u2013249. 1, 2016.","DOI":"10.1109\/ICSPCom.2016.7980584"},{"key":"10075_CR19","doi-asserted-by":"crossref","unstructured":"Sharma, G., & Mala, S. (2020). Framework for gender recognition using voice, 2020 10th international conference on cloud computing, data science & engineering (Confluence) (pp. 32\u201337).  IEEE.","DOI":"10.1109\/Confluence47617.2020.9058146"},{"key":"10075_CR20","doi-asserted-by":"crossref","unstructured":"Shergill, J. S., Pravin, C., & Ojha, V. (2021). Accent and gender recognition from English language speech and audio using signal processing and deep learning, International conference on Hybrid Intelligent Systems, (HIS 2020) (pp. 62\u201372). Springer.","DOI":"10.1007\/978-3-030-73050-5_7"},{"key":"10075_CR21","doi-asserted-by":"crossref","unstructured":"Susithra, N., Rajalakshmi, K., Ashwath, P., Ajay, B., Rohit, D., & Stewaugh, S. (2022). Speech based emotion recognition and gender identification using FNN and CNN Models, 2022 3rd international conference for emerging technology, (INCET) (pp. 1\u20136).","DOI":"10.1109\/INCET54531.2022.9824908"},{"key":"10075_CR22","doi-asserted-by":"crossref","unstructured":"Wang, Z. (2017). Learning utterance-level representations for speech emotion and age\/gender recognition using deep neural, 2017 IEEE international conference on acoustics, speech and signal processing, (ICASSP) (pp. 5150\u20135154).","DOI":"10.1109\/ICASSP.2017.7953138"},{"key":"10075_CR23","first-page":"1","volume-title":"Soft Computing","author":"G Yasmin","year":"2022","unstructured":"Yasmin, G., Das, A. K., Nayak, J., Vimal, S., & Dutta, S. (2022). A rough set theory and deep learning-based predictive system for gender recognition using audio speech. In A. Di Nola & R. Cerulli (Eds), Soft Computing (pp. 1\u201324). Springer."},{"key":"10075_CR24","doi-asserted-by":"crossref","unstructured":"Yusnita, M. A., Hafiz, A. M., Fadzilah, M. N., Zulhanip, A. Z., & Idris, M. (2017). Automatic gender recognition using linear prediction coefficients and artificial neural network on speech signal. 2017 7th IEEE international conference on control system, computing and Engineering (ICCSCE).","DOI":"10.1109\/ICCSCE.2017.8284437"},{"key":"10075_CR25","doi-asserted-by":"publisher","DOI":"10.4324\/9780429292200","volume-title":"Digital audio forensics fundamentals: From capture to courtroom","author":"J Zjalic","year":"2020","unstructured":"Zjalic, J. (2020). Digital audio forensics fundamentals: From capture to courtroom (1st ed.). Focal Press.","edition":"1"},{"key":"10075_CR26","doi-asserted-by":"crossref","unstructured":"Zvarevashe, K., & Olugbara, O. O. (2018). Gender voice recognition using random forest recursive feature elimination with gradient boosting machines, 2018 international conference on advances in big data, computing and data communication systems, (icABCD 2018) (pp. 1\u20136). IEEE.","DOI":"10.1109\/ICABCD.2018.8465466"}],"container-title":["International Journal of Speech Technology"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10075-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10772-023-10075-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10075-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T10:15:55Z","timestamp":1704968155000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10772-023-10075-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12]]},"references-count":26,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["10075"],"URL":"https:\/\/doi.org\/10.1007\/s10772-023-10075-4","relation":{},"ISSN":["1381-2416","1572-8110"],"issn-type":[{"type":"print","value":"1381-2416"},{"type":"electronic","value":"1572-8110"}],"subject":[],"published":{"date-parts":[[2023,12]]},"assertion":[{"value":"5 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 November 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 December 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}