{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T17:43:39Z","timestamp":1750355019713},"publisher-location":"Cham","reference-count":21,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319672199"},{"type":"electronic","value":"9783319672205"}],"license":[{"start":{"date-parts":[[2017,9,2]],"date-time":"2017-09-02T00:00:00Z","timestamp":1504310400000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018]]},"DOI":"10.1007\/978-3-319-67220-5_3","type":"book-chapter","created":{"date-parts":[[2017,9,1]],"date-time":"2017-09-01T11:19:00Z","timestamp":1504264740000},"page":"27-37","source":"Crossref","is-referenced-by-count":6,"title":["Evaluation of Gated Recurrent Neural Networks in Music Classification Tasks"],"prefix":"10.1007","author":[{"given":"Jan","family":"Jakubik","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,9,2]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Sundermeyer, M., Schl\u00fcter R., Ney, H.: LSTM Neural Networks for Language Modeling. Interspeech (2012)","DOI":"10.21437\/Interspeech.2012-65"},{"key":"3_CR2","doi-asserted-by":"crossref","unstructured":"Graves, A., Santiago, F., Schmidhuber, J.: Bidirectional LSTM networks for improved phoneme classification and recognition. In: Artificial Neural Networks: Formal Models and Their Applications\u2013ICANN 2005, pp. 753\u2013753 (2015)","DOI":"10.1007\/11550907_126"},{"key":"3_CR3","doi-asserted-by":"crossref","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"J Schmidhuber","year":"1997","unstructured":"Schmidhuber, J., Hochreiter, S.: Long short-term memory. Neural Comput. 9, 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"issue":"3","key":"3_CR4","doi-asserted-by":"crossref","first-page":"226","DOI":"10.1007\/s11633-016-1006-2","volume":"13","author":"G Zhou","year":"2016","unstructured":"Zhou, G., et al.: Minimal gated unit for recurrent neural networks. Int. J. Autom. Comput. 13(3), 226\u2013234 (2016)","journal-title":"Int. J. Autom. Comput."},{"issue":"2","key":"3_CR5","doi-asserted-by":"crossref","first-page":"303","DOI":"10.1109\/TMM.2010.2098858","volume":"13","author":"Z Fu","year":"2011","unstructured":"Fu, Z., et al.: A survey of audio-based music classification and annotation. IEEE Trans. Multimedia 13(2), 303\u2013319 (2011)","journal-title":"IEEE Trans. Multimedia"},{"key":"3_CR6","unstructured":"Sturm, B.: The GTZAN dataset: Its contents, its faults, their effects on evaluation, and its future use (2013). arXiv preprint, arXiv:1306.1461"},{"issue":"5","key":"3_CR7","doi-asserted-by":"crossref","first-page":"293","DOI":"10.1109\/TSA.2002.800560","volume":"10","author":"G Tzanetakis","year":"2002","unstructured":"Tzanetakis, G., Cook, P.: Musical genre classification of audio signals. IEEE Trans. Speech Audio Process. 10(5), 293\u2013302 (2002)","journal-title":"IEEE Trans. Speech Audio Process."},{"key":"3_CR8","doi-asserted-by":"crossref","unstructured":"Sturm, B.: An analysis of the GTZAN music genre dataset. In: Proceedings of the Second International ACM Workshop on Music Information Retrieval with User-Centered and Multimodal Strategies. ACM (2012)","DOI":"10.1145\/2390848.2390851"},{"key":"3_CR9","unstructured":"Kim, Y.E., et al.: Music emotion recognition: a state of the art review. In: Proceedings of the 11th International Conference on Music Information Retrieval (2010)"},{"issue":"3","key":"3_CR10","doi-asserted-by":"crossref","first-page":"40","DOI":"10.1145\/2168752.2168754","volume":"3","author":"Y Yang","year":"2012","unstructured":"Yang, Y., Chen, H.H.: Machine recognition of music emotion: a review. ACM Trans. Intell. Syst. Technol. 3(3), 40 (2012)","journal-title":"ACM Trans. Intell. Syst. Technol."},{"key":"3_CR11","unstructured":"Song, Y., Dixon, S., Pearce, M.: Evaluation of musical features for emotion classification. In: Proceedings of the 13th International Conference on Music Information Retrieval (2012)"},{"key":"3_CR12","unstructured":"Hamel, P., Wood, S., Eck, D.: Automatic identification of instrument classes in polyphonic and polyinstrument audio. In: Proceedings of the 10th International Conference on Music Information Retrieval, Kobe, Japan (2009)"},{"key":"3_CR13","unstructured":"Abe\u00dfer, J., Dittmar, C., Schuller, G.: Automatic recognition and parametrization of frequency modulation techniques in bass guitar recordings. In: Audio Engineering Society Conference: 42nd International Conference: Semantic Audio. Audio Engineering Society (2011)"},{"key":"3_CR14","unstructured":"Chung, J., et al.: Empirical evaluation of gated recurrent neural networks on sequence modeling (2014). arXiv preprint, arXiv:1412.3555"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Greff, K., et al.: LSTM: A search space odyssey. IEEE Trans. Neural Netw. Learn. Syst. (2016)","DOI":"10.1109\/TNNLS.2016.2582924"},{"key":"3_CR16","unstructured":"Jozefowicz, R., Zaremba, W., Sutskever I.: An empirical exploration of recurrent network architectures. In: Proceedings of the 32nd International Conference on Machine Learning (2015)"},{"key":"3_CR17","doi-asserted-by":"crossref","unstructured":"Goller, C., K\u00fcchler, A.: Learning task-dependent distributed representations by backpropagation through structure. Neural Networks (1996)","DOI":"10.1109\/ICNN.1996.548916"},{"key":"3_CR18","unstructured":"Chung, J., Gulcehre, C., Cho, K.H., Bengio, Y.: Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling (2014)"},{"key":"3_CR19","unstructured":"Aljanaki, A., Wiering, F., Veltkamp, R.: Collecting annotations for induced musical emotion via online game with a purpose Emotify. Technical report Series 2014. UU-CS-2014-015 (2014)"},{"key":"3_CR20","unstructured":"Seyerlehner, K., Widmer, G., Schnitzer, D.: From rhythm patterns to perceived tempo. In: Proceedings of the 8th International Conference on Music Information Retrieval (2007)"},{"key":"3_CR21","unstructured":"Theano Development Team: \u201cTheano: A Python framework for fast computation of mathematical expressions\u201d"}],"container-title":["Advances in Intelligent Systems and Computing","Information Systems Architecture and Technology: Proceedings of 38th International Conference on Information Systems Architecture and Technology \u2013 ISAT 2017"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-67220-5_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,2]],"date-time":"2022-08-02T02:11:54Z","timestamp":1659406314000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-319-67220-5_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,9,2]]},"ISBN":["9783319672199","9783319672205"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-67220-5_3","relation":{},"ISSN":["2194-5357","2194-5365"],"issn-type":[{"type":"print","value":"2194-5357"},{"type":"electronic","value":"2194-5365"}],"subject":[],"published":{"date-parts":[[2017,9,2]]}}}