{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T22:40:41Z","timestamp":1742942441821,"version":"3.40.3"},"publisher-location":"Cham","reference-count":30,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030339036"},{"type":"electronic","value":"9783030339043"}],"license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-33904-3_69","type":"book-chapter","created":{"date-parts":[[2019,10,25]],"date-time":"2019-10-25T22:40:05Z","timestamp":1572043205000},"page":"729-739","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["A Novel Scheme for Training Two-Stream CNNs for Action Recognition"],"prefix":"10.1007","author":[{"given":"Reinier","family":"Oves Garc\u00eda","sequence":"first","affiliation":[]},{"given":"Eduardo F.","family":"Morales","sequence":"additional","affiliation":[]},{"given":"L. Enrique","family":"Sucar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,10,22]]},"reference":[{"key":"69_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"29","DOI":"10.1007\/978-3-642-25446-8_4","volume-title":"Human Behavior Understanding","author":"M Baccouche","year":"2011","unstructured":"Baccouche, M., Mamalet, F., Wolf, C., Garcia, C., Baskurt, A.: Sequential deep learning for human action recognition. In: Salah, A.A., Lepri, B. (eds.) HBU 2011. LNCS, vol. 7065, pp. 29\u201339. Springer, Heidelberg (2011). https:\/\/doi.org\/10.1007\/978-3-642-25446-8_4"},{"key":"69_CR2","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: 2017 IEEE Conference on CVPR, pp. 4724\u20134733. IEEE (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"69_CR3","doi-asserted-by":"crossref","unstructured":"Choutas, V., Weinzaepfel, P., Revaud, J., Schmid, C.: Potion: pose motion representation for action recognition. In: CVPR. pp. 7024\u20137033 (2018)","DOI":"10.1109\/CVPR.2018.00734"},{"key":"69_CR4","doi-asserted-by":"crossref","unstructured":"Cruz, C., Sucar, L.E., Morales, E.F.: Real-time face recognition for human-robot interaction. In: 2008 8th IEEE International Conference on Automatic Face & Gesture Recognition, pp. 1\u20136. IEEE (2008)","DOI":"10.1109\/AFGR.2008.4813386"},{"key":"69_CR5","unstructured":"Diba, A., et al.: Temporal 3d convnets: new architecture and transfer learning for video classification. arXiv preprint arXiv:1711.08200 (2017)"},{"key":"69_CR6","unstructured":"Diba, A., Pazandeh, A.M., Van Gool, L.: Efficient two-stream motion and appearance 3d cnns for video classification. arXiv preprint arXiv:1608.08851 (2016)"},{"key":"69_CR7","doi-asserted-by":"crossref","unstructured":"Donahue, J., et al.: Long-term recurrent convolutional networks for visual recognition and description. In: CVPR, pp. 2625\u20132634 (2015)","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"69_CR8","unstructured":"Du, Y., Wang, W., Wang, L.: Hierarchical recurrent neural network for skeleton based action recognition. In: CVPR, pp. 1110\u20131118 (2015)"},{"key":"69_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"363","DOI":"10.1007\/3-540-45103-X_50","volume-title":"Image Analysis","author":"G Farneb\u00e4ck","year":"2003","unstructured":"Farneb\u00e4ck, G.: Two-frame motion estimation based on polynomial expansion. In: Bigun, J., Gustavsson, T. (eds.) SCIA 2003. LNCS, vol. 2749, pp. 363\u2013370. Springer, Heidelberg (2003). https:\/\/doi.org\/10.1007\/3-540-45103-X_50"},{"key":"69_CR10","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Can spatiotemporal 3d cnns retrace the history of 2d cnns and imagenet? In: CVPR. pp. 6546\u20136555 (2018)","DOI":"10.1109\/CVPR.2018.00685"},{"key":"69_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"69_CR12","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"630","DOI":"10.1007\/978-3-319-46493-0_38","volume-title":"Computer Vision \u2013 ECCV 2016","author":"K He","year":"2016","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Identity mappings in deep residual networks. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 630\u2013645. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_38"},{"key":"69_CR13","doi-asserted-by":"publisher","first-page":"4","DOI":"10.1016\/j.imavis.2017.01.010","volume":"60","author":"S Herath","year":"2017","unstructured":"Herath, S., Harandi, M., Porikli, F.: Going deeper into action recognition: a survey. Image Vis. Comput. 60, 4\u201321 (2017)","journal-title":"Image Vis. Comput."},{"key":"69_CR14","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167 (2015)"},{"issue":"1","key":"69_CR15","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"S Ji","year":"2013","unstructured":"Ji, S., Xu, W., Yang, M., Yu, K.: 3d convolutional neural networks for human action recognition. IEEE Trans. Pattern Anal. Mach. Intell. 35(1), 221\u2013231 (2013)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"69_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"816","DOI":"10.1007\/978-3-319-46487-9_50","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Liu","year":"2016","unstructured":"Liu, J., Shahroudy, A., Xu, D., Wang, G.: Spatio-temporal LSTM with trust gates for 3D human action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9907, pp. 816\u2013833. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46487-9_50"},{"key":"69_CR17","doi-asserted-by":"crossref","unstructured":"Ma, S., Sigal, L., Sclaroff, S.: Learning activity progression in lstms for activity detection and early detection. In: CVPR, pp. 1942\u20131950 (2016)","DOI":"10.1109\/CVPR.2016.214"},{"key":"69_CR18","doi-asserted-by":"publisher","first-page":"158","DOI":"10.1016\/j.patcog.2017.05.025","volume":"71","author":"L Nanni","year":"2017","unstructured":"Nanni, L., Ghidoni, S., Brahnam, S.: Handcrafted vs. non-handcrafted features for computer vision classification. Pattern Recogn. 71, 158\u2013172 (2017)","journal-title":"Pattern Recogn."},{"key":"69_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"744","DOI":"10.1007\/978-3-319-46493-0_45","volume-title":"Computer Vision \u2013 ECCV 2016","author":"X Peng","year":"2016","unstructured":"Peng, X., Schmid, C.: Multi-region two-stream R-CNN for action detection. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 744\u2013759. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_45"},{"key":"69_CR20","doi-asserted-by":"crossref","unstructured":"Saha, S., Singh, G., Sapienza, M., Torr, P.H., Cuzzolin, F.: Deep learning for detecting multiple space-time action tubes in videos. arXiv preprint arXiv:1608.01529 (2016)","DOI":"10.5244\/C.30.58"},{"key":"69_CR21","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: Advances in Neural Information Processing Systems, pp. 568\u2013576 (2014)"},{"key":"69_CR22","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"69_CR23","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"69_CR24","doi-asserted-by":"crossref","unstructured":"Suter, D.: Motion estimation and vector splines. In: CVPR, vol. 94, pp. 939\u2013942 (1994)","DOI":"10.1109\/CVPR.1994.323929"},{"key":"69_CR25","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: CVPR, pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"69_CR26","unstructured":"Tran, D., Ray, J., Shou, Z., Chang, S.F., Paluri, M.: Convnet architecture search for spatiotemporal feature learning. arXiv preprint arXiv:1708.05038 (2017)"},{"key":"69_CR27","doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: ICCV, pp. 3551\u20133558 (2013)","DOI":"10.1109\/ICCV.2013.441"},{"key":"69_CR28","doi-asserted-by":"crossref","unstructured":"Wang, Y., Song, J., Wang, L., Van Gool, L., Hilliges, O.: Two-stream sr-cnns for action recognition in videos. In: BMVC (2016)","DOI":"10.5244\/C.30.108"},{"key":"69_CR29","doi-asserted-by":"crossref","unstructured":"Yue-Hei Ng, J., Hausknecht, M., Vijayanarasimhan, S., Vinyals, O., Monga, R., Toderici, G.: Beyond short snippets: deep networks for video classification. In: CVPR, pp. 4694\u20134702 (2015)","DOI":"10.1109\/CVPR.2015.7299101"},{"key":"69_CR30","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"214","DOI":"10.1007\/978-3-540-74936-3_22","volume-title":"Pattern Recognition","author":"C Zach","year":"2007","unstructured":"Zach, C., Pock, T., Bischof, H.: A duality based approach for realtime TV-L1 optical flow. In: Hamprecht, F.A., Schn\u00f6rr, C., J\u00e4hne, B. (eds.) DAGM 2007. LNCS, vol. 4713, pp. 214\u2013223. Springer, Heidelberg (2007). https:\/\/doi.org\/10.1007\/978-3-540-74936-3_22"}],"container-title":["Lecture Notes in Computer Science","Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-33904-3_69","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T14:13:21Z","timestamp":1710252801000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-33904-3_69"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"ISBN":["9783030339036","9783030339043"],"references-count":30,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-33904-3_69","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2019]]},"assertion":[{"value":"22 October 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CIARP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Iberoamerican Congress on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Havana","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Cuba","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2019","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 October 2019","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31 October 2019","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ciarp2019","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/ciarp.uci.cu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OCS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"128","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"70","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"55% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}