{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,27]],"date-time":"2025-06-27T08:44:24Z","timestamp":1751013864784,"version":"3.37.3"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030057152"},{"type":"electronic","value":"9783030057169"}],"license":[{"start":{"date-parts":[[2018,12,11]],"date-time":"2018-12-11T00:00:00Z","timestamp":1544486400000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-05716-9_30","type":"book-chapter","created":{"date-parts":[[2018,12,10]],"date-time":"2018-12-10T14:17:32Z","timestamp":1544451452000},"page":"365-376","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Action Recognition Using Visual Attention with Reinforcement Learning"],"prefix":"10.1007","author":[{"given":"Hongyang","family":"Li","sequence":"first","affiliation":[]},{"given":"Jun","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Ruimin","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Mei","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Huafeng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zengmin","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,12,11]]},"reference":[{"key":"30_CR1","unstructured":"Ba, J., Mnih, V., Kavukcuoglu, K.: Multiple object recognition with visual attention. In: ICLR (2015)"},{"key":"30_CR2","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? A new model and the kinetics dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"30_CR3","doi-asserted-by":"crossref","unstructured":"Donahue, J., et al.: Long-term recurrent convolutional networks for visual recognition and description. In: CVPR (2015)","DOI":"10.21236\/ADA623249"},{"key":"30_CR4","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Wildes, R.: Spatiotemporal residual networks for video action recognition. In: NIPS (2016)","DOI":"10.1109\/CVPR.2017.787"},{"key":"30_CR5","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.213"},{"key":"30_CR6","unstructured":"Girdhar, R., Ramanan, D.: Attentional pooling for action recognition. In: NIPS (2017)"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"Girdhar, R., Ramanan, D., Gupta, A., Sivic, J., Russell, B.: ActionVLAD: learning spatio-temporal aggregation for action classification. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.337"},{"key":"30_CR8","doi-asserted-by":"crossref","first-page":"364","DOI":"10.1016\/j.sigpro.2017.06.001","volume":"143","author":"Y Ji","year":"2018","unstructured":"Ji, Y., Yang, Y., Xu, X., Shen, H.T.: One-shot learning based pattern transition map for action early recognition. Signal Process. 143, 364\u2013370 (2018)","journal-title":"Signal Process."},{"key":"30_CR9","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Toderici, G., Shetty, S., Leung, T., Sukthankar, R., Fei-Fei, L.: Large-scale video classification with convolutional neural networks. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.223"},{"key":"30_CR10","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: NIPS (2012)"},{"key":"30_CR11","doi-asserted-by":"crossref","unstructured":"Kuehne, H., Jhuang, H., Garrote, E., Poggio, T., Serre, T.: HMDB: a large video database for human motion recognition. In: ICCV (2011)","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"30_CR12","doi-asserted-by":"crossref","first-page":"41","DOI":"10.1016\/j.cviu.2017.10.011","volume":"166","author":"Z Li","year":"2018","unstructured":"Li, Z., Gavrilyuk, K., Gavves, E., Jain, M., Snoek, C.G.: VideoLSTM convolves, attends and flows for action recognition. Comput. Vis. Image Underst. 166, 41\u201350 (2018)","journal-title":"Comput. Vis. Image Underst."},{"key":"30_CR13","doi-asserted-by":"crossref","unstructured":"Long, X., Gan, C., de Melo, G., Wu, J., Liu, X., Wen, S.: Attention clusters: purely attention based local feature integration for video classification. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00817"},{"key":"30_CR14","unstructured":"Mnih, V., Heess, N., Graves, A., Kavukcuoglu, K.: Recurrent models of visual attention. In: NIPS (2014)"},{"key":"30_CR15","unstructured":"Ng, J.Y.H., Hausknecht, M., Vijayanarasimhan, S., Vinyals, O., Monga, R., Toderici, G.: Beyond short snippets: deep networks for video classification. In: CVPR (2015)"},{"key":"30_CR16","unstructured":"Sharma, S., Kiros, R., Salakhutdinov, R.: Action recognition using visual attention. In: ICLR (2016)"},{"key":"30_CR17","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: NIPS (2014)"},{"key":"30_CR18","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"30_CR19","unstructured":"Srivastava, N., Mansimov, E., Salakhudinov, R.: Unsupervised learning of video representations using LSTMs. In: ICML (2015)"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.441"},{"key":"30_CR21","doi-asserted-by":"crossref","unstructured":"Wang, K., Wang, X., Lin, L., Wang, M., Zuo, W.: 3D human activity recognition with reconfigurable convolutional neural networks (2014)","DOI":"10.1145\/2647868.2654912"},{"key":"30_CR22","doi-asserted-by":"crossref","unstructured":"Wang, L., Qiao, Y., Tang, X.: Action recognition with trajectory-pooled deep convolutional descriptors. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299059"},{"key":"30_CR23","doi-asserted-by":"crossref","unstructured":"Wang, L., Li, W., Li, W., Van Gool, L.: Appearance-and-relation networks for video classification. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00155"},{"key":"30_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1007\/978-3-319-46484-8_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Wang","year":"2016","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2"},{"key":"30_CR25","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: ICML (2015)"},{"key":"30_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1007\/978-3-319-27671-7_16","volume-title":"MultiMedia Modeling","author":"Z Xu","year":"2016","unstructured":"Xu, Z., Hu, R., Chen, J., Chen, H., Li, H.: Global contrast based salient region boundary sampling for action recognition. In: Tian, Q., Sebe, N., Qi, G.-J., Huet, B., Hong, R., Liu, X. (eds.) MMM 2016. LNCS, vol. 9516, pp. 187\u2013198. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-27671-7_16"},{"key":"30_CR27","doi-asserted-by":"crossref","unstructured":"Yeung, S., Russakovsky, O., Mori, G., Fei-Fei, L.: End-to-end learning of action detection from frame glimpses in videos. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.293"},{"key":"30_CR28","doi-asserted-by":"crossref","first-page":"137","DOI":"10.1016\/j.sigpro.2017.12.008","volume":"145","author":"M Zhang","year":"2018","unstructured":"Zhang, M., Yang, Y., Ji, Y., Xie, N., Shen, F.: Recurrent attention network using spatial-temporal relations for action recognition. Signal Process. 145, 137\u2013145 (2018)","journal-title":"Signal Process."}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-05716-9_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,11,6]],"date-time":"2019-11-06T23:52:09Z","timestamp":1573084329000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-05716-9_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,12,11]]},"ISBN":["9783030057152","9783030057169"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-05716-9_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2018,12,11]]},"assertion":[{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Thessaloniki","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2019","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 January 2019","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2019","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2019","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/mmm2019.iti.gr\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double blind for full papers and workshop papers, single blind for other paper types","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"204","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"96","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"47% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"2.67","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"6 demonstration papers, 5 industry papers, 6 workshop papers, and 6 Video Browser Showdown papers were also accepted.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}}]}}