{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T10:11:13Z","timestamp":1742983873563,"version":"3.40.3"},"publisher-location":"Cham","reference-count":37,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030503468"},{"type":"electronic","value":"9783030503475"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-50347-5_18","type":"book-chapter","created":{"date-parts":[[2020,6,18]],"date-time":"2020-06-18T14:04:01Z","timestamp":1592489041000},"page":"196-208","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Two-Stream Framework for Activity Recognition with 2D Human Pose Estimation"],"prefix":"10.1007","author":[{"given":"Wei","family":"Chang","sequence":"first","affiliation":[]},{"given":"Chunyang","family":"Ye","sequence":"additional","affiliation":[]},{"given":"Hui","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,6,17]]},"reference":[{"unstructured":"Ba, J., Mnih, V., Kavukcuoglu, K.: Multiple object recognition with visual attention. arXiv preprint arXiv:1412.7755 (2014)","key":"18_CR1"},{"doi-asserted-by":"crossref","unstructured":"Cao, Z., Hidalgo, G., Simon, T., Wei, S.E., Sheikh, Y.: OpenPose: realtime multi-person 2D pose estimation using part affinity fields. arXiv preprint arXiv:1812.08008 (2018)","key":"18_CR2","DOI":"10.1109\/CVPR.2017.143"},{"doi-asserted-by":"crossref","unstructured":"Cao, Z., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2D pose estimation using part affinity fields. In: CVPR, pp. 7291\u20137299 (2017)","key":"18_CR3","DOI":"10.1109\/CVPR.2017.143"},{"doi-asserted-by":"crossref","unstructured":"Choutas, V., Weinzaepfel, P., Revaud, J., Schmid, C.: Potion: pose motion representation for action recognition. In: CVPR, pp. 7024\u20137033 (2018)","key":"18_CR4","DOI":"10.1109\/CVPR.2018.00734"},{"doi-asserted-by":"crossref","unstructured":"Diba, A., Sharma, V., Van Gool, L.: Deep temporal linear encoding networks. In: CVPR, pp. 2329\u20132338 (2017)","key":"18_CR5","DOI":"10.1109\/CVPR.2017.168"},{"doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Wildes, R.: Spatiotemporal residual networks for video action recognition. In: NIPS, pp. 3468\u20133476 (2016)","key":"18_CR6","DOI":"10.1109\/CVPR.2017.787"},{"doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: CVPR, pp. 1933\u20131941 (2016)","key":"18_CR7","DOI":"10.1109\/CVPR.2016.213"},{"unstructured":"Glorot, X., Bordes, A., Bengio, Y.: Deep sparse rectifier neural networks. In: AISTATS, pp. 315\u2013323 (2011)","key":"18_CR8"},{"doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","key":"18_CR9","DOI":"10.1109\/CVPR.2016.90"},{"doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van, L., Weinberger, K.Q.: Densely connected convolutional networks. In: CVPR, pp. 4700\u20134708 (2017)","key":"18_CR10","DOI":"10.1109\/CVPR.2017.243"},{"unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167 (2015)","key":"18_CR11"},{"unstructured":"Krizhevsky, A., Hinton, G.: Learning multiple layers of features from tiny images, Technical report, Citeseer (2009)","key":"18_CR12"},{"doi-asserted-by":"crossref","unstructured":"Kuehne, H., Jhuang, H., Garrote, E., Poggio, T., Serre, T.: HMDB: a large video database for human motion recognition. In: ICCV, pp. 2556\u20132563. IEEE (2011)","key":"18_CR13","DOI":"10.1109\/ICCV.2011.6126543"},{"unstructured":"Lan, Z., Ming, L., Li, X., Hauptmann, A.G., Raj, B.: Beyond gaussian pyramid: multi-skip feature stacking for action recognition. In: CVPR, pp. 204\u2013212 (2015)","key":"18_CR14"},{"issue":"11","key":"18_CR15","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P., et al.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"18_CR16","first-page":"41","volume":"166","author":"Z Li","year":"2018","unstructured":"Li, Z., Gavrilyuk, K., Gavves, E., Jain, M., Snoek, C.G.: VideoLSTM convolves, attends and flows for action recognition. CVIU 166, 41\u201350 (2018)","journal-title":"CVIU"},{"doi-asserted-by":"crossref","unstructured":"Long, X., et al.: Multimodal keyless attention fusion for video classification. In: AAAI, pp. 1\u20138 (2018)","key":"18_CR17","DOI":"10.1609\/aaai.v32i1.12319"},{"key":"18_CR18","first-page":"76","volume":"71","author":"CY Ma","year":"2019","unstructured":"Ma, C.Y., Chen, M.H., Kira, Z., AlRegib, G.: TS-LSTM and temporal-inception: exploiting spatiotemporal dynamics for activity recognition. SPIC 71, 76\u201387 (2019)","journal-title":"SPIC"},{"key":"18_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"281","DOI":"10.1007\/978-3-030-12939-2_20","volume-title":"Pattern Recognition","author":"L Sevilla-Lara","year":"2019","unstructured":"Sevilla-Lara, L., Liao, Y., G\u00fcney, F., Jampani, V., Geiger, A., Black, M.J.: On the integration of optical flow and action recognition. In: Brox, T., Bruhn, A., Fritz, M. (eds.) GCPR 2018. LNCS, vol. 11269, pp. 281\u2013297. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-12939-2_20"},{"unstructured":"Sharma, S., Kiros, R., Salakhutdinov, R.: Action recognition using visual attention. arXiv preprint arXiv:1511.04119 (2015)","key":"18_CR20"},{"issue":"1","key":"18_CR21","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"J Shuiwang","year":"2013","unstructured":"Shuiwang, J., Ming, Y., Kai, Y.: 3D convolutional neural networks for human action recognition. TPAMI 35(1), 221\u2013231 (2013)","journal-title":"TPAMI"},{"doi-asserted-by":"crossref","unstructured":"Simon, T., Joo, H., Matthews, I., Sheikh, Y.: Hand keypoint detection in single images using multiview bootstrapping. In: CVPR, pp. 1145\u20131153 (2017)","key":"18_CR22","DOI":"10.1109\/CVPR.2017.494"},{"unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: NIPS, pp. 568\u2013576 (2014)","key":"18_CR23"},{"unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)","key":"18_CR24"},{"doi-asserted-by":"crossref","unstructured":"Sun, L., Jia, K., Yeung, D.Y., Shi, B.E.: Human action recognition using factorized spatio-temporal convolutional networks. In: ICCV, pp. 4597\u20134605 (2015)","key":"18_CR25","DOI":"10.1109\/ICCV.2015.522"},{"doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: CVPR, pp. 1\u20139 (2015)","key":"18_CR26","DOI":"10.1109\/CVPR.2015.7298594"},{"doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: CVPR, pp. 2818\u20132826 (2016)","key":"18_CR27","DOI":"10.1109\/CVPR.2016.308"},{"doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: ICCV, pp. 4489\u20134497 (2015)","key":"18_CR28","DOI":"10.1109\/ICCV.2015.510"},{"issue":"6","key":"18_CR29","doi-asserted-by":"publisher","first-page":"1510","DOI":"10.1109\/TPAMI.2017.2712608","volume":"40","author":"G Varol","year":"2017","unstructured":"Varol, G., Laptev, I., Schmid, C.: Long-term temporal convolutions for action recognition. TPAMI 40(6), 1510\u20131517 (2017)","journal-title":"TPAMI"},{"doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: ICCV, pp. 3551\u20133558 (2013)","key":"18_CR30","DOI":"10.1109\/ICCV.2013.441"},{"key":"18_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1007\/978-3-319-46484-8_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Wang","year":"2016","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2"},{"issue":"3","key":"18_CR32","doi-asserted-by":"publisher","first-page":"254","DOI":"10.1007\/s11263-015-0859-0","volume":"119","author":"L Wang","year":"2016","unstructured":"Wang, L., Yu, Q., Tang, X.: MoFAP: a multi-level representation for action recognition. IJCV 119(3), 254\u2013271 (2016)","journal-title":"IJCV"},{"doi-asserted-by":"crossref","unstructured":"Wang L, Qiao Y, Tang, X.: Action recognition with trajectory-pooled deep-convolutional descriptors. In: CVPR, pp. 4305\u20134314 (2015)","key":"18_CR33","DOI":"10.1109\/CVPR.2015.7299059"},{"issue":"3\u20134","key":"18_CR34","doi-asserted-by":"publisher","first-page":"229","DOI":"10.1007\/BF00992696","volume":"8","author":"RJ Williams","year":"1992","unstructured":"Williams, R.J.: Simple statistical gradient-following algorithms for connectionist reinforcement learning. Mach. Learn. 8(3\u20134), 229\u2013256 (1992). https:\/\/doi.org\/10.1007\/BF00992696","journal-title":"Mach. Learn."},{"doi-asserted-by":"crossref","unstructured":"Wu, Z., Jiang, Y.G., Wang, X., Ye, H., Xue, X.: Multi-stream multi-class fusion of deep networks for video classification. In: ACMMULTIMEDIA, pp. 791\u2013800. ACM (2016)","key":"18_CR35","DOI":"10.1145\/2964284.2964328"},{"unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Bengio, Y.: Show, attend and tell: neural image caption generation with visual attention. Computer Science, pp. 2048\u20132057 (2015)","key":"18_CR36"},{"doi-asserted-by":"crossref","unstructured":"Zhu, J., Zhu, Z., Zou, W.: End-to-end video-level representation learning for action recognition. In: ICPR, pp. 645\u2013650. IEEE (2018)","key":"18_CR37","DOI":"10.1109\/ICPR.2018.8545710"}],"container-title":["Lecture Notes in Computer Science","Image Analysis and Recognition"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-50347-5_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,29]],"date-time":"2022-10-29T08:16:34Z","timestamp":1667031394000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-50347-5_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030503468","9783030503475"],"references-count":37,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-50347-5_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"17 June 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"P\u00f3voa de Varzim","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Portugal","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 June 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 June 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iciar2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.aimiconf.org\/iciar20\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easy Chair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"123","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"54","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"15","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"44% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2,9","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,8","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Due to the corona pandemic, ICIAR 2020 will be held virtually only.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}