{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T16:08:01Z","timestamp":1772726881215,"version":"3.50.1"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030657253","type":"print"},{"value":"9783030657260","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-65726-0_21","type":"book-chapter","created":{"date-parts":[[2020,12,22]],"date-time":"2020-12-22T15:03:34Z","timestamp":1608649414000},"page":"229-240","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Audio Interval Retrieval Using Convolutional Neural Networks"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6917-4234","authenticated-orcid":false,"given":"Ievgeniia","family":"Kuzminykh","sequence":"first","affiliation":[]},{"given":"Dan","family":"Shevchuk","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3866-0672","authenticated-orcid":false,"given":"Stavros","family":"Shiaeles","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1788-547X","authenticated-orcid":false,"given":"Bogdan","family":"Ghita","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,12,22]]},"reference":[{"key":"21_CR1","unstructured":"Market Research Report: Fortune Business Insights (2018). https:\/\/www.fortunebusinessinsights.com\/industry-reports\/natural-language-processing-nlp-market-101933. Accessed 26 June 2020"},{"key":"21_CR2","unstructured":"Exclusive: Amazon says 100 million Alexa devices have been sold\u2014what\u2019s next? The verge interview. https:\/\/www.theverge.com\/2019\/1\/4\/18168565\/amazon-alexa-devices-how-many-sold-number-100-million-dave-limp. Accessed 26 June 2020"},{"key":"21_CR3","unstructured":"The Dynata global trends report. Dynata (2019)"},{"key":"21_CR4","unstructured":"Consumer Intelligence Series: Prepare for the voice revolution. PwC report (2019)"},{"key":"21_CR5","doi-asserted-by":"crossref","unstructured":"Kim, K., Heo, M., Choi, S., Zhang, B.: DeepStory: video story QA by deep embedded memory networks. In: Proceedings of the 26th International Joint Conference on Artificial Intelligence (IJCAI), Melbourne, Australia, pp. 2016\u20132022 (2017)","DOI":"10.24963\/ijcai.2017\/280"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Lei, J., Yu, L., Berg, T.L., Bansal, M.: TVR: a large-scale dataset for video-subtitle moment retrieval. arXiv:2001.09099 (2020)","DOI":"10.1007\/978-3-030-58589-1_27"},{"issue":"1","key":"21_CR7","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s12046-016-0574-8","volume":"42","author":"N Brindha","year":"2016","unstructured":"Brindha, N., Visalakshi, P.: Bridging semantic gap between high-level and low-level features in content-based video retrieval using multi-stage ESN\u2013SVM classifier. S\u0101dhan\u0101 42(1), 1\u201310 (2016). https:\/\/doi.org\/10.1007\/s12046-016-0574-8","journal-title":"S\u0101dhan\u0101"},{"issue":"2\u20133","key":"21_CR8","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1002\/ima.20150","volume":"18","author":"AF Smeaton","year":"2008","unstructured":"Smeaton, A.F., Wilkins, P., et al.: Content-based video retrieval: three example systems from TRECVid. Int. J. Imaging Syst. Technol. 18(2\u20133), 195\u2013201 (2008)","journal-title":"Int. J. Imaging Syst. Technol."},{"issue":"6","key":"21_CR9","doi-asserted-by":"publisher","first-page":"1406","DOI":"10.1109\/TCSVT.2017.2667710","volume":"28","author":"A Araujo","year":"2018","unstructured":"Araujo, A., Girod, B.: Large-scale video retrieval using image queries. IEEE Trans. Circ. Syst. Video Technol. 28(6), 1406\u20131420 (2018)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Hershey, S., et al.: CNN architectures for large-scale audio classification. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), New Orleans, LA, pp. 131\u2013135 (2017)","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"21_CR11","doi-asserted-by":"crossref","unstructured":"Gemmeke, J.F., et al.: Audio set: an ontology and human-labeled dataset for audio events. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), New Orleans, LA, pp. 776\u2013780 (2017)","DOI":"10.1109\/ICASSP.2017.7952261"},{"issue":"10","key":"21_CR12","doi-asserted-by":"publisher","first-page":"952","DOI":"10.1002\/int.20508","volume":"26","author":"E Dogan","year":"2011","unstructured":"Dogan, E., Sert, M., Yaz\u0131c\u0131, A.: A flexible and scalable audio information retrieval system for mixed-type audio signals. Int. J. Intell. Syst. 26(10), 952\u2013970 (2011)","journal-title":"Int. J. Intell. Syst."},{"key":"21_CR13","doi-asserted-by":"crossref","unstructured":"Guggenberger, M.: Aurio: audio processing, analysis and retrieval. In: Proceedings of the 23rd ACM International Conference on Multimedia (MM 2015), pp. 705\u2013708 (2015)","DOI":"10.1145\/2733373.2807408"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Sundaram, S., Narayanan, S.: Audio retrieval by latent perceptual indexing. In: IEEE International Conference on Acoustics, Speech and Signal Processing, Las Vegas, NV, pp. 49\u201352 (2008)","DOI":"10.1109\/ICASSP.2008.4517543"},{"issue":"2","key":"21_CR15","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1016\/j.patrec.2005.07.005","volume":"27","author":"C Wan","year":"2006","unstructured":"Wan, C., Liu, M.: Content-based audio retrieval with relevance feedback. Pattern Recogn. Lett. 27(2), 85\u201392 (2006)","journal-title":"Pattern Recogn. Lett."},{"issue":"1","key":"21_CR16","first-page":"200","volume":"52","author":"K Kim","year":"2006","unstructured":"Kim, K., Kim, S., Jeon, J., Park, K.: Quick audio retrieval using multiple feature vectors. IEEE Trans. Consum. Electron. 52(1), 200\u2013205 (2006)","journal-title":"IEEE Trans. Consum. Electron."},{"issue":"3","key":"21_CR17","doi-asserted-by":"publisher","first-page":"e0194151","DOI":"10.1371\/journal.pone.0194151","volume":"13","author":"KA Qazi","year":"2018","unstructured":"Qazi, K.A., Nawaz, T., Mehmood, Z., Rashid, M., Habib, H.A.: A hybrid technique for speech segregation and classification using a sophisticated deep neural network. PLoS ONE 13(3), e0194151 (2018)","journal-title":"PLoS ONE"},{"issue":"1","key":"21_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/1687-4722-2012-23","volume":"2012","author":"T M\u00e4kinen","year":"2012","unstructured":"M\u00e4kinen, T., Kiranyaz, S., Raitoharju, J., Gabbouj, M.: An evolutionary feature synthesis approach for content-based audio retrieval. EURASIP J. Audio Speech Music Process. 2012(1), 1\u201323 (2012). https:\/\/doi.org\/10.1186\/1687-4722-2012-23","journal-title":"EURASIP J. Audio Speech Music Process."},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Patel, N.P., Patwardhan, M.S.: Identification of most contributing features for audio classification. In: International Conference on Cloud & Ubiquitous Computing & Emerging Technologies, Pune, pp. 219\u2013223 (2013)","DOI":"10.1109\/CUBE.2013.48"},{"issue":"1","key":"21_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s13636-018-0138-4","volume":"2018","author":"V Lostanlen","year":"2018","unstructured":"Lostanlen, V., Lafay, G., And\u00e9n, J., Lagrange, M.: Relevance-based quantization of scattering features for unsupervised mining of environmental audio. EURASIP J. Audio Speech Music Process. 2018(1), 1\u201310 (2018). https:\/\/doi.org\/10.1186\/s13636-018-0138-4","journal-title":"EURASIP J. Audio Speech Music Process."},{"issue":"3","key":"21_CR21","doi-asserted-by":"publisher","first-page":"269","DOI":"10.1023\/A:1012491016871","volume":"15","author":"G Lu","year":"2001","unstructured":"Lu, G.: Indexing and retrieval of audio: a survey. Multimed. Tools Appl. 15(3), 269\u2013290 (2001). https:\/\/doi.org\/10.1023\/A:1012491016871","journal-title":"Multimed. Tools Appl."},{"issue":"9","key":"21_CR22","doi-asserted-by":"publisher","first-page":"1939","DOI":"10.1109\/JPROC.2013.2251591","volume":"101","author":"G Richard","year":"2013","unstructured":"Richard, G., Sundaram, S., Narayanan, S.: An overview on perceptually motivated audio indexing and classification. Proc. IEEE 101(9), 1939\u20131954 (2013)","journal-title":"Proc. IEEE"},{"issue":"3","key":"21_CR23","doi-asserted-by":"publisher","first-page":"313","DOI":"10.1007\/s11042-006-0027-1","volume":"30","author":"J Pinquier","year":"2006","unstructured":"Pinquier, J., Andr\u00e9-Obrecht, R.: Audio indexing: primary components retrieval: robust classification in audio documents. Multimed. Tools Appl. 30(3), 313\u2013330 (2006). https:\/\/doi.org\/10.1007\/s11042-006-0027-1","journal-title":"Multimed. Tools Appl."},{"issue":"3","key":"21_CR24","doi-asserted-by":"publisher","first-page":"540","DOI":"10.1109\/TASLP.2015.2389618","volume":"23","author":"I McLoughlin","year":"2015","unstructured":"McLoughlin, I., Zhang, H., Xie, Z., Song, Y., Xiao, W.: Robust sound event classification using deep neural networks. IEEE\/ACM Trans. Audio Speech Lang. Process. 23(3), 540\u2013552 (2015)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"issue":"2","key":"21_CR25","doi-asserted-by":"publisher","first-page":"101","DOI":"10.1007\/s00530-010-0205-x","volume":"17","author":"L Xie","year":"2011","unstructured":"Xie, L., et al.: Pitch-density-based features and an SVM binary tree approach for multi-class audio classification in broadcast news. Multimed. Syst. 17(2), 101\u2013112 (2011). https:\/\/doi.org\/10.1007\/s00530-010-0205-x","journal-title":"Multimed. Syst."},{"key":"21_CR26","doi-asserted-by":"crossref","unstructured":"Pfeiffer, S., Fischer, S., Effelsberg, W.: Automatic audio content analysis. In: Proceedings of the Fourth ACM International Conference on Multimedia (MULTIMEDIA 1996), pp. 21\u201330 (1997)","DOI":"10.1145\/244130.244139"},{"issue":"1","key":"21_CR27","doi-asserted-by":"publisher","first-page":"2","DOI":"10.1007\/s005300050106","volume":"7","author":"J Foote","year":"1999","unstructured":"Foote, J.: An overview of audio information retrieval. Multimed. Syst. 7(1), 2\u201310 (1999). https:\/\/doi.org\/10.1007\/s005300050106","journal-title":"Multimed. Syst."},{"issue":"4","key":"21_CR28","first-page":"193","volume":"9","author":"C Catal","year":"2012","unstructured":"Catal, C.: Performance evaluation metrics for software fault prediction studies. Acta Polytech. Hung. 9(4), 193\u2013206 (2012)","journal-title":"Acta Polytech. Hung."},{"issue":"6","key":"21_CR29","doi-asserted-by":"publisher","first-page":"e0177678","DOI":"10.1371\/journal.pone.0177678","volume":"12","author":"S Boughorbel","year":"2017","unstructured":"Boughorbel, S., Jarray, F., El-Anbari, M.: Optimal classifier for imbalanced data using Matthews Correlation Coefficient metric. PLoS ONE 12(6), e0177678 (2017)","journal-title":"PLoS ONE"},{"key":"21_CR30","unstructured":"Andrew, G., et al.: MobileNets: efficient convolutional neural networks for mobile vision applications. Computer Vision and Pattern Recognition arXiv:1704.04861 (2017)"},{"issue":"6","key":"21_CR31","doi-asserted-by":"publisher","first-page":"1097","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. Commun. ACM 60(6), 1097\u20131105 (2017)","journal-title":"Commun. ACM"},{"key":"21_CR32","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. Computer Vision and Pattern Recognition arXiv:1512.03385 (2015)","DOI":"10.1109\/CVPR.2016.90"}],"container-title":["Lecture Notes in Computer Science","Internet of Things, Smart Spaces, and Next Generation Networks and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-65726-0_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T17:57:24Z","timestamp":1709834244000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-65726-0_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030657253","9783030657260"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-65726-0_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"22 December 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"NEW2AN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Next Generation Wired\/Wireless Networking","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"St. Petersburg","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Russia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"new2an2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.new2an.org\/#\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EDAS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"225","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"74","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"33% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4.6","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}