{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T07:26:45Z","timestamp":1740122805503,"version":"3.37.3"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2023,1]]},"DOI":"10.1007\/s11042-022-13316-x","type":"journal-article","created":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T17:08:59Z","timestamp":1656695339000},"page":"2617-2633","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Improved use of descriptors for early recognition of actions in video"],"prefix":"10.1007","volume":"82","author":[{"given":"Mehrin","family":"Saremi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7430-542X","authenticated-orcid":false,"given":"Farzin","family":"Yaghmaee","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,7,1]]},"reference":[{"key":"13316_CR1","doi-asserted-by":"publisher","unstructured":"Cao Y, Barrett D, Barbu A, Narayanaswamy S, Yu H, Michaux A, Lin Y, Dickinson S, Siskind J M, Wang S (2013) Recognize human activities from partially observed videos. In: Proceedings of the IEEE computer society conference on computer vision and pattern recognition. https:\/\/doi.org\/10.1109\/CVPR.2013.343, pp 2658\u20132665","DOI":"10.1109\/CVPR.2013.343"},{"key":"13316_CR2","doi-asserted-by":"publisher","unstructured":"Dalal N, Triggs B, Schmid C (2006) Human detection using oriented histograms of flow and appearance. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics). https:\/\/doi.org\/10.1007\/11744047_33, pp 428\u2013441","DOI":"10.1007\/11744047_33"},{"key":"13316_CR3","doi-asserted-by":"publisher","unstructured":"Doll\u00e1r P, Rabaud V, Cottrell G, Belongie S (2005) Behavior recognition via sparse spatio-temporal features. In: Proceedings - 2nd Joint IEEE international workshop on visual surveillance and performance evaluation of tracking and surveillance, VS-PETS. https:\/\/doi.org\/10.1109\/VSPETS.2005.1570899, vol 2005, pp 65\u201372","DOI":"10.1109\/VSPETS.2005.1570899"},{"key":"13316_CR4","doi-asserted-by":"publisher","unstructured":"Hassan M, Atieh M (2015) Action prediction in smart home based on reinforcement learning. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics). https:\/\/doi.org\/10.1007\/978-3-319-14424-5_22, vol 8456. Springer, pp 207\u2013212","DOI":"10.1007\/978-3-319-14424-5_22"},{"key":"13316_CR5","doi-asserted-by":"publisher","unstructured":"Kantorov V, Laptev I (2014) Efficient feature extraction, encoding, and classification for action recognition. In: Proceedings of the IEEE computer society conference on computer vision and pattern recognition. https:\/\/doi.org\/10.1109\/CVPR.2014.332, pp 2593\u20132600","DOI":"10.1109\/CVPR.2014.332"},{"key":"13316_CR6","doi-asserted-by":"publisher","unstructured":"Khan M A, Javed K, Khan S A, Saba T, Habib U, Khan J A, Abbasi A A (2020) Human action recognition using fusion of multiview and deep features: an application to video surveillance. Multimedia Tools and Applications, 1\u201327. https:\/\/doi.org\/10.1007\/s11042-020-08806-9","DOI":"10.1007\/s11042-020-08806-9"},{"key":"13316_CR7","doi-asserted-by":"publisher","first-page":"105986","DOI":"10.1016\/j.asoc.2019.105986","volume":"87","author":"MA Khan","year":"2020","unstructured":"Khan M A, Sharif M, Akram T, Raza M, Saba T, Rehman A (2020) Hand-crafted and deep convolutional neural network features fusion and selection strategy: an application to intelligent human action recognition. Appl Soft Comput J 87:105986. https:\/\/doi.org\/10.1016\/j.asoc.2019.105986","journal-title":"Appl Soft Comput J"},{"issue":"9","key":"13316_CR8","doi-asserted-by":"publisher","first-page":"1844","DOI":"10.1109\/TPAMI.2015.2491928","volume":"38","author":"Y Kong","year":"2016","unstructured":"Kong Y, Fu Y (2016) Max-margin action prediction machine. IEEE Trans Pattern Anal Mach Intell 38(9):1844\u20131858. https:\/\/doi.org\/10.1109\/TPAMI.2015.2491928","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"13316_CR9","doi-asserted-by":"publisher","unstructured":"Kong Y, Jia Y, Fu Y (2012) Learning human interaction by interactive phrases. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics). https:\/\/doi.org\/10.1007\/978-3-642-33718-5_22, vol 7572 LNCS, pp 300\u2013313","DOI":"10.1007\/978-3-642-33718-5_22"},{"key":"13316_CR10","doi-asserted-by":"publisher","unstructured":"Kong Y, Kit D, Fu Y (2014) A discriminative model with multiple temporal scales for action prediction. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_39, vol 8693 LNCS, pp 596\u2013611","DOI":"10.1007\/978-3-319-10602-1_39"},{"key":"13316_CR11","doi-asserted-by":"publisher","unstructured":"Kong Y, Tao Z, Fu Y (2017) Deep sequential context networks for action prediction. In: 2017 IEEE Conference on computer vision and pattern recognition (CVPR). https:\/\/doi.org\/10.1109\/CVPR.2017.390, http:\/\/ieeexplore.ieee.org\/document\/8099873\/, pp 3662\u20133670","DOI":"10.1109\/CVPR.2017.390"},{"issue":"5","key":"13316_CR12","doi-asserted-by":"publisher","first-page":"2272","DOI":"10.1109\/TIP.2017.2751145","volume":"27","author":"S Lai","year":"2017","unstructured":"Lai S, Zheng W S, Hu J F, Zhang J (2017) Global-local temporal saliency action prediction. IEEE Trans Image Process 27(5):2272\u20132285. https:\/\/doi.org\/10.1109\/TIP.2017.2751145","journal-title":"IEEE Trans Image Process"},{"key":"13316_CR13","doi-asserted-by":"publisher","unstructured":"Laptev I (2005) On space-time interest points. In: International journal of computer vision. https:\/\/doi.org\/10.1007\/s11263-005-1838-7, vol 64, pp 107\u2013123","DOI":"10.1007\/s11263-005-1838-7"},{"issue":"8","key":"13316_CR14","doi-asserted-by":"publisher","first-page":"1644","DOI":"10.1109\/TPAMI.2013.2297321","volume":"36","author":"K Li","year":"2014","unstructured":"Li K, Fu Y (2014) Prediction of human activity by discovering temporal sequence patterns. IEEE Trans Pattern Anal Mach Intell 36(8):1644\u20131657. https:\/\/doi.org\/10.1109\/TPAMI.2013.2297321","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"13316_CR15","doi-asserted-by":"crossref","unstructured":"Liu J, Shahroudy A, Wang G, Duan L-Y, Kot AC (2018) Ssnet: scale selection network for online 3d action prediction. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 8349\u20138358","DOI":"10.1109\/CVPR.2018.00871"},{"key":"13316_CR16","doi-asserted-by":"publisher","unstructured":"Liu J, Shahroudy A, Wang G, Duan L-Y, Kot Chichung A (2019) Skeleton-based online action prediction using scale selection network. IEEE Trans Pattern Anal Mach Intell, 1\u20131. https:\/\/doi.org\/10.1109\/tpami.2019.2898954","DOI":"10.1109\/tpami.2019.2898954"},{"key":"13316_CR17","doi-asserted-by":"publisher","unstructured":"Ma S, Sigal L, Sclaroff S (2016) Learning activity progression in LSTMs for activity detection and early detection. In: 2016 IEEE Conference on computer vision and pattern recognition (CVPR). https:\/\/doi.org\/10.1109\/CVPR.2016.214, http:\/\/ieeexplore.ieee.org\/document\/7780583\/, pp 1942\u20131950","DOI":"10.1109\/CVPR.2016.214"},{"key":"13316_CR18","unstructured":"Manning C, Raghavan P, Sch\u00fctze H (2010) Introduction to information retrieval. Cambridge University Press"},{"key":"13316_CR19","unstructured":"Rana AJ, Tirupattur P, Duarte K, Demir U, Rawat Y, Shah M (2020) An online system for real-time activity detection in untrimmed surveillance videos Mamshad Nayeem Rizve. Appl Sci 10(1)"},{"key":"13316_CR20","unstructured":"Rasouli A, Kotseruba I, Tsotsos JK (2019) Pedestrian action anticipation using contextual feature fusion in stacked RNNs. In: Proceedings of the 30th British Machine Vision Conference 2019, BMVC 2019"},{"issue":"5","key":"13316_CR21","doi-asserted-by":"publisher","first-page":"971","DOI":"10.1007\/s00138-012-0450-4","volume":"24","author":"KK Reddy","year":"2013","unstructured":"Reddy KK, Shah M (2013) Recognizing 50 human action categories of web videos. Mach Vis Appl 24(5):971\u2013981. https:\/\/doi.org\/10.1007\/s00138-012-0450-4","journal-title":"Mach Vis Appl"},{"key":"13316_CR22","doi-asserted-by":"publisher","unstructured":"Rodriguez MD, Ahmed J, Shah M (2008) Action MACH: a spatio-temporal maximum average correlation height filter for action recognition. In: 26th IEEE Conference on computer vision and pattern recognition, CVPR. https:\/\/doi.org\/10.1109\/CVPR.2008.4587727","DOI":"10.1109\/CVPR.2008.4587727"},{"key":"13316_CR23","doi-asserted-by":"publisher","unstructured":"Ryoo MS (2011) Human activity prediction: early recognition of ongoing activities from streaming videos. In: Proceedings of the IEEE international conference on computer vision. https:\/\/doi.org\/10.1109\/ICCV.2011.6126349, pp 1036\u20131043","DOI":"10.1109\/ICCV.2011.6126349"},{"key":"13316_CR24","doi-asserted-by":"publisher","unstructured":"Schuldt C, Laptev I, Caputo B (2004) Recognizing human actions: a local SVM approach. In: Proceedings - international conference on pattern recognition. https:\/\/doi.org\/10.1109\/ICPR.2004.1334462, vol 3, pp 32\u201336","DOI":"10.1109\/ICPR.2004.1334462"},{"key":"13316_CR25","doi-asserted-by":"publisher","unstructured":"Scovanner P, Ali S, Shah M (2007) A 3-dimensional sift descriptor and its application to action recognition. In: Proceedings of the 15th international conference on Multimedia - MULTIMEDIA \u201907. https:\/\/doi.org\/10.1145\/1291233.1291311, http:\/\/portal.acm.org\/citation.cfm?doid=1291233.1291311, p 357","DOI":"10.1145\/1291233.1291311"},{"issue":"1","key":"13316_CR26","doi-asserted-by":"publisher","first-page":"281","DOI":"10.1007\/s10044-019-00789-0","volume":"23","author":"M Sharif","year":"2020","unstructured":"Sharif M, Khan MA, Zahid F, Shah JH, Akram T (2020) Human action recognition: a framework of statistical weighted segmentation and rank correlation-based selection. Pattern Anal Applic 23(1):281\u2013294. https:\/\/doi.org\/10.1007\/s10044-019-00789-0","journal-title":"Pattern Anal Applic"},{"key":"13316_CR27","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1007\/978-3-319-09396-3_9","volume":"71","author":"K Soomro","year":"2014","unstructured":"Soomro K, Zamir AR (2014) Action recognition in realistic sports videos. Adv Comput Vis Pattern Recogn 71:181\u2013208. https:\/\/doi.org\/10.1007\/978-3-319-09396-3_9","journal-title":"Adv Comput Vis Pattern Recogn"},{"key":"13316_CR28","doi-asserted-by":"publisher","unstructured":"Tran DP, Nhu NG, Hoang VD (2018) Pedestrian action prediction based on deep features extraction of human posture and traffic scene. In: Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics). https:\/\/doi.org\/10.1007\/978-3-319-75420-8_53, https:\/\/link.springer.com\/chapter\/10.1007\/978-3-319-75420-8_53, vol 10752 LNAI. Springer, pp 563\u2013572","DOI":"10.1007\/978-3-319-75420-8_53"},{"key":"13316_CR29","doi-asserted-by":"crossref","unstructured":"Vondrick C, Pirsiavash H, Torralba A (2016) Anticipating visual representations from unlabeled video. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 98\u2013106","DOI":"10.1109\/CVPR.2016.18"},{"key":"13316_CR30","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1016\/j.neucom.2016.11.004","volume":"225","author":"H Wang","year":"2017","unstructured":"Wang H, Yang W, Yuan C, Ling H, Hu W (2017) Human activity prediction using temporally-weighted generalized time warping. Neurocomputing 225:139\u2013147. https:\/\/doi.org\/10.1016\/j.neucom.2016.11.004","journal-title":"Neurocomputing"},{"key":"13316_CR31","doi-asserted-by":"publisher","unstructured":"Wang H, Kl\u00e4ser A, Schmid C, Liu CL (2011) Action recognition by dense trajectories. In: Proceedings of the IEEE computer society conference on computer vision and pattern recognition. https:\/\/doi.org\/10.1007\/s11263-012-0594-8, pp 3169\u20133176","DOI":"10.1007\/s11263-012-0594-8"},{"issue":"1","key":"13316_CR32","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1007\/s11263-012-0594-8","volume":"103","author":"H Wang","year":"2013","unstructured":"Wang H, Kl\u00e4ser A, Schmid C, Liu CL (2013) Dense trajectories and motion boundary descriptors for action recognition. Int J Comput Vis 103(1):60\u201379. https:\/\/doi.org\/10.1007\/s11263-012-0594-8","journal-title":"Int J Comput Vis"},{"issue":"3","key":"13316_CR33","doi-asserted-by":"publisher","first-page":"219","DOI":"10.1007\/s11263-015-0846-5","volume":"119","author":"H Wang","year":"2016","unstructured":"Wang H, Oneata D, Verbeek J, Schmid C (2016) A robust and efficient video representation for action recognition. Int J Comput Vis 119(3):219\u2013238. https:\/\/doi.org\/10.1007\/s11263-015-0846-5","journal-title":"Int J Comput Vis"},{"key":"13316_CR34","doi-asserted-by":"publisher","unstructured":"Wang H, Schmid C (2013) Action recognition with improved trajectories. In: Proceedings of the IEEE international conference on computer vision. https:\/\/doi.org\/10.1016\/j.neucom.2016.11.004, pp 3551\u20133558","DOI":"10.1016\/j.neucom.2016.11.004"},{"key":"13316_CR35","doi-asserted-by":"publisher","unstructured":"Wang X, Hu J-F, Lai J-H, Zhang J, Zheng W-S (2019) Progressive teacher-student learning for early action prediction. In: 2019 IEEE\/CVF Conference on computer vision and pattern recognition (CVPR). https:\/\/doi.org\/10.1109\/cvpr.2019.00367. Institute of Electrical and Electronics Engineers (IEEE), pp 3551\u20133560","DOI":"10.1109\/cvpr.2019.00367"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-13316-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-022-13316-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-13316-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,3]],"date-time":"2023-01-03T05:32:22Z","timestamp":1672723942000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-022-13316-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,1]]},"references-count":35,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2023,1]]}},"alternative-id":["13316"],"URL":"https:\/\/doi.org\/10.1007\/s11042-022-13316-x","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2022,7,1]]},"assertion":[{"value":"9 February 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 May 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 May 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 July 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of Interests"}}]}}