{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,2]],"date-time":"2025-11-02T16:45:48Z","timestamp":1762101948358,"version":"3.40.3"},"publisher-location":"Cham","reference-count":40,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319518107"},{"type":"electronic","value":"9783319518114"}],"license":[{"start":{"date-parts":[[2016,12,31]],"date-time":"2016-12-31T00:00:00Z","timestamp":1483142400000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017]]},"DOI":"10.1007\/978-3-319-51811-4_30","type":"book-chapter","created":{"date-parts":[[2016,12,30]],"date-time":"2016-12-30T09:22:29Z","timestamp":1483089749000},"page":"365-378","source":"Crossref","is-referenced-by-count":26,"title":["Spatio-Temporal VLAD Encoding for Human Action Recognition in Videos"],"prefix":"10.1007","author":[{"given":"Ionut C.","family":"Duta","sequence":"first","affiliation":[]},{"given":"Bogdan","family":"Ionescu","sequence":"additional","affiliation":[]},{"given":"Kiyoharu","family":"Aizawa","sequence":"additional","affiliation":[]},{"given":"Nicu","family":"Sebe","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2016,12,31]]},"reference":[{"key":"30_CR1","doi-asserted-by":"crossref","unstructured":"Arandjelovi\u0107, R., Zisserman, A.: Three things everyone should know to improve object retrieval. In: CVPR (2012)","DOI":"10.1109\/CVPR.2012.6248018"},{"key":"30_CR2","doi-asserted-by":"crossref","unstructured":"Arandjelovic, R., Zisserman, A.: All about VLAD. In: CVPR (2013)","DOI":"10.1109\/CVPR.2013.207"},{"key":"30_CR3","doi-asserted-by":"crossref","unstructured":"Ballas, N., Yang, Y., Lan, Z.Z., Delezoide, B., Pr\u00eateux, F., Hauptmann, A.: Space-time robust representation for action recognition. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.336"},{"key":"30_CR4","doi-asserted-by":"crossref","unstructured":"Bilen, H., Fernando, B., Gavves, E., Vedaldi, A., Gould, S.: Dynamic image networks for action recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.331"},{"key":"30_CR5","doi-asserted-by":"crossref","unstructured":"Dalal, N., Triggs, B.: Histograms of oriented gradients for human detection. In: CVPR (2005)","DOI":"10.1109\/CVPR.2005.177"},{"key":"30_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"428","DOI":"10.1007\/11744047_33","volume-title":"Computer Vision \u2013 ECCV 2006","author":"N Dalal","year":"2006","unstructured":"Dalal, N., Triggs, B., Schmid, C.: Human detection using oriented histograms of flow and appearance. In: Leonardis, A., Bischof, H., Pinz, A. (eds.) ECCV 2006. LNCS, vol. 3952, pp. 428\u2013441. Springer, Heidelberg (2006). doi: 10.1007\/11744047_33"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"30_CR8","doi-asserted-by":"crossref","unstructured":"Duta, I.C., Nguyen, T.A., Aizawa, K., Ionescu, B., Sebe, N.: Boosting VLAD with double assignment using deep features for action recognition in videos. In: ICPR (2016)","DOI":"10.1109\/ICPR.2016.7899964"},{"key":"30_CR9","doi-asserted-by":"crossref","unstructured":"Duta, I.C., Uijlings, J.R.R., Nguyen, T.A., Aizawa, K., Hauptmann, A.G., Ionescu, B., Sebe, N.: Histograms of motion gradients for real-time video classification. In: CBMI (2016)","DOI":"10.1109\/CBMI.2016.7500260"},{"key":"30_CR10","doi-asserted-by":"crossref","unstructured":"Jain, M., J\u00e9gou, H., Bouthemy, P.: Better exploiting motion for better action recognition. In: CVPR (2013)","DOI":"10.1109\/CVPR.2013.330"},{"issue":"9","key":"30_CR11","doi-asserted-by":"crossref","first-page":"1704","DOI":"10.1109\/TPAMI.2011.235","volume":"34","author":"H J\u00e9gou","year":"2012","unstructured":"J\u00e9gou, H., Perronnin, F., Douze, M., Sanchez, J., Perez, P., Schmid, C.: Aggregating local image descriptors into compact codes. TPAMI 34(9), 1704\u20131716 (2012)","journal-title":"TPAMI"},{"key":"30_CR12","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Toderici, G., Shetty, S., Leung, T., Sukthankar, R., Fei-Fei, L.: Large-scale video classification with convolutional neural networks. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.223"},{"key":"30_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"256","DOI":"10.1007\/978-3-642-33783-3_19","volume-title":"Computer Vision \u2013 ECCV 2012","author":"O Kliper-Gross","year":"2012","unstructured":"Kliper-Gross, O., Gurovich, Y., Hassner, T., Wolf, L.: Motion interchange patterns for action recognition in unconstrained videos. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7577, pp. 256\u2013269. Springer, Heidelberg (2012). doi: 10.1007\/978-3-642-33783-3_19"},{"key":"30_CR14","doi-asserted-by":"crossref","unstructured":"Krapac, J., Verbeek, J., Jurie, F.: Modeling spatial layout with fisher vectors for image categorization. In: ICCV (2011)","DOI":"10.1109\/ICCV.2011.6126406"},{"key":"30_CR15","doi-asserted-by":"crossref","unstructured":"Kuehne, H., Jhuang, H., Garrote, E., Poggio, T., Serre, T.: HMDB: a large video database for human motion recognition. In: ICCV (2011)","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"30_CR16","doi-asserted-by":"crossref","unstructured":"Laptev, I., Marsza\u0142ek, M., Schmid, C., Rozenfeld, B.: Learning realistic human actions from movies. In: CVPR (2008)","DOI":"10.1109\/CVPR.2008.4587756"},{"key":"30_CR17","unstructured":"Lazebnik, S., Schmid, C., Ponce, J.: Beyond bags of features: spatial pyramid matching for recognizing natural scene categories. In: CVPR (2006)"},{"key":"30_CR18","doi-asserted-by":"crossref","unstructured":"Mironic\u0103, I., Du\u0163\u0103, I.C., Ionescu, B., Sebe, N.: A modified vector of locally aggregated descriptors approach for fast video classification. Multimedia Tools and Applications (2016, in press)","DOI":"10.1007\/s11042-015-2819-7"},{"key":"30_CR19","doi-asserted-by":"crossref","unstructured":"Oneata, D., Verbeek, J., Schmid, C.: Action and event recognition with fisher vectors on a compact feature set. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.228"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"Park, E., Han, X., Berg, T.L., Berg, A.C.: Combining multiple sources of knowledge in deep CNNs for action recognition. In: WACV (2016)","DOI":"10.1109\/WACV.2016.7477589"},{"key":"30_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"660","DOI":"10.1007\/978-3-319-10578-9_43","volume-title":"Computer Vision \u2013 ECCV 2014","author":"X Peng","year":"2014","unstructured":"Peng, X., Wang, L., Qiao, Y., Peng, Q.: Boosting VLAD with supervised dictionary learning and high-order statistics. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8691, pp. 660\u2013674. Springer, Heidelberg (2014). doi: 10.1007\/978-3-319-10578-9_43"},{"key":"30_CR22","unstructured":"Peng, X., Wang, L., Wang, X., Qiao, Y.: Bag of visual words and fusion methods for action recognition: comprehensive study and good practice. arXiv:1405.4506 (2014)"},{"key":"30_CR23","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1007\/978-3-642-15561-1_11","volume-title":"Computer Vision \u2013 ECCV 2010","author":"F Perronnin","year":"2010","unstructured":"Perronnin, F., S\u00e1nchez, J., Mensink, T.: Improving the fisher kernel for large-scale image classification. In: Daniilidis, K., Maragos, P., Paragios, N. (eds.) ECCV 2010. LNCS, vol. 6314, pp. 143\u2013156. Springer, Heidelberg (2010). doi: 10.1007\/978-3-642-15561-1_11"},{"issue":"5","key":"30_CR24","doi-asserted-by":"crossref","first-page":"971","DOI":"10.1007\/s00138-012-0450-4","volume":"24","author":"KK Reddy","year":"2013","unstructured":"Reddy, K.K., Shah, M.: Recognizing 50 human action categories of web videos. Mach. Vis. Appl. 24(5), 971\u2013981 (2013)","journal-title":"Mach. Vis. Appl."},{"key":"30_CR25","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: NIPS (2014)"},{"key":"30_CR26","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"30_CR27","doi-asserted-by":"crossref","first-page":"1473","DOI":"10.1007\/s00138-012-0449-x","volume":"24","author":"B Solmaz","year":"2013","unstructured":"Solmaz, B., Assari, S.M., Shah, M.: Classifying web videos using a global video descriptor. Mach. Vis. Appl. 24, 1473\u20131485 (2013)","journal-title":"Mach. Vis. Appl."},{"key":"30_CR28","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"30_CR29","doi-asserted-by":"crossref","unstructured":"Sun, L., Jia, K., Yeung, D.Y., Shi, B.E.: Human action recognition using factorized spatio-temporal convolutional networks. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.522"},{"key":"30_CR30","doi-asserted-by":"crossref","unstructured":"Uijlings, J.R.R., Duta, I.C., Rostamzadeh, N., Sebe, N.: Realtime video classification using dense HOF\/HOG. In: ICMR (2014)","DOI":"10.1145\/2578726.2578744"},{"key":"30_CR31","doi-asserted-by":"crossref","first-page":"33","DOI":"10.1007\/s13735-014-0069-5","volume":"4","author":"JRR Uijlings","year":"2015","unstructured":"Uijlings, J.R.R., Duta, I.C., Sangineto, E., Sebe, N.: Video classification with densely extracted HOG\/HOF\/MBH features: an evaluation of the accuracy\/computational efficiency trade-off. Int. J.Multimed. Info. Retr. 4, 33\u201344 (2015)","journal-title":"Int. J..Multimed. Info. Retr."},{"issue":"1","key":"30_CR32","doi-asserted-by":"crossref","first-page":"60","DOI":"10.1007\/s11263-012-0594-8","volume":"103","author":"H Wang","year":"2013","unstructured":"Wang, H., Kl\u00e4ser, A., Schmid, C., Liu, C.L.: Dense trajectories and motion boundary descriptors for action recognition. IJCV 103(1), 60\u201379 (2013)","journal-title":"IJCV"},{"key":"30_CR33","doi-asserted-by":"crossref","first-page":"219","DOI":"10.1007\/s11263-015-0846-5","volume":"119","author":"H Wang","year":"2015","unstructured":"Wang, H., Oneata, D., Verbeek, J., Schmid, C.: A robust and efficient video representation for action recognition. IJCV 119, 219\u2013238 (2015)","journal-title":"IJCV"},{"key":"30_CR34","doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.441"},{"key":"30_CR35","unstructured":"Wang, H., Schmid, C.: LEAR-INRIA submission for the THUMOS workshop. In: ICCV Workshop (2013)"},{"key":"30_CR36","doi-asserted-by":"crossref","unstructured":"Wang, H., Ullah, M.M., Klaser, A., Laptev, I., Schmid, C.: Evaluation of local spatio-temporal features for action recognition. In: BMVC (2009)","DOI":"10.5244\/C.23.124"},{"key":"30_CR37","unstructured":"Wang, L., Xiong, Y., Wang, Z., Qiao, Y.: Towards good practices for very deep two-stream convnets. arXiv preprint arxiv:1507.02159 (2015)"},{"key":"30_CR38","doi-asserted-by":"crossref","unstructured":"Yue-Hei Ng, J., Hausknecht, M., Vijayanarasimhan, S., Vinyals, O., Monga, R., Toderici, G.: Beyond short snippets: deep networks for video classification. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299101"},{"key":"30_CR39","doi-asserted-by":"crossref","unstructured":"Zach, C., Pock, T., Bischof, H.: A duality based approach for realtime TV-L 1 optical flow. In: Pattern Recognition (2007)","DOI":"10.1007\/978-3-540-74936-3_22"},{"key":"30_CR40","doi-asserted-by":"crossref","unstructured":"Zhu, J., Wang, B., Yang, X., Zhang, W., Tu, Z.: Action recognition with actons. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.442"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-51811-4_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,9,16]],"date-time":"2019-09-16T22:41:07Z","timestamp":1568673667000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-319-51811-4_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2016,12,31]]},"ISBN":["9783319518107","9783319518114"],"references-count":40,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-51811-4_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2016,12,31]]}}}