{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T08:40:23Z","timestamp":1740818423192,"version":"3.38.0"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62001413","62001413","62001413","62001413","62001413","62001413"],"award-info":[{"award-number":["62001413","62001413","62001413","62001413","62001413","62001413"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003787","name":"Natural Science Foundation of Hebei Province","doi-asserted-by":"publisher","award":["F2020203064","F2020203064","F2020203064","F2020203064","F2020203064","F2020203064"],"award-info":[{"award-number":["F2020203064","F2020203064","F2020203064","F2020203064","F2020203064","F2020203064"]}],"id":[{"id":"10.13039\/501100003787","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Cogn Comput"],"published-print":{"date-parts":[[2025,2]]},"DOI":"10.1007\/s12559-024-10374-1","type":"journal-article","created":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:09:54Z","timestamp":1732734594000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["DmrNet: Dual-stream Mutual Information Contraction and Re-discrimination Network for Semi-supervised Temporal Action Detection"],"prefix":"10.1007","volume":"17","author":[{"given":"Qiming","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Zhengping","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Yulu","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Shuai","family":"Bi","sequence":"additional","affiliation":[]},{"given":"Hehao","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jirui","family":"Di","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,27]]},"reference":[{"key":"10374_CR1","doi-asserted-by":"publisher","first-page":"106","DOI":"10.1016\/j.neucom.2022.12.049","volume":"524","author":"Y Chen","year":"2023","unstructured":"Chen Y, Jiang H, Xiao J, Li D, Gu Q. Temporal action detection with dynamic weights based on curriculum learning. Neurocomputing. 2023;524:106\u201316.","journal-title":"Neurocomputing"},{"key":"10374_CR2","doi-asserted-by":"publisher","first-page":"354","DOI":"10.1016\/j.patcog.2017.10.013","volume":"77","author":"J Gu","year":"2018","unstructured":"Gu J, Wang Z, Kuen J, Ma L, Shahroudy A, Shuai B, et al. Recent advances in convolutional neural networks. Pattern Recogn. 2018;77:354\u201377.","journal-title":"Pattern Recogn"},{"issue":"7553","key":"10374_CR3","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun Y, Bengio Y, Hinton G. Deep learning. Nature. 2015;521(7553):436\u201344.","journal-title":"Nature"},{"key":"10374_CR4","doi-asserted-by":"publisher","unstructured":"Jhuang H, Gall J, Zuffi S, Schmid C, Black MJ. Towards understanding action recognition. In: proceedings of the IEEE\/CVF international conference on computer vision. 2013. pp. 3192-3199. https:\/\/doi.org\/10.1109\/ICCV.2013.396.","DOI":"10.1109\/ICCV.2013.396"},{"key":"10374_CR5","unstructured":"Kay W, Carreira J, Simonyan K, Zhang B, Hillier C, Vijayanarasimhan S, et al. The kinetics human action video dataset. 2017. axXiv preprint https:\/\/arxiv.org\/abs\/1705.06950. Accessed 10 Dec 2023."},{"key":"10374_CR6","unstructured":"Soomro K, Zamir A, Shah M. Ucf101: a dataset of 101 human actions classes from videos in the wild. 2012. axXiv preprint https:\/\/arxiv.org\/abs\/1212.0402.\u00a0Accessed 10 Dec 2023."},{"key":"10374_CR7","doi-asserted-by":"publisher","first-page":"166488","DOI":"10.1109\/ACCESS.2021.3136567","volume":"9","author":"BH Ngo","year":"2021","unstructured":"Ngo BH, Kim JH, Chae YJ, Cho SI. Multi-view collaborative learning for semi-supervised domain adaptation. IEEE Access. 2021;9:166488\u2013501.","journal-title":"IEEE Access"},{"key":"10374_CR8","doi-asserted-by":"publisher","first-page":"128467","DOI":"10.1109\/ACCESS.2021.3110605","volume":"9","author":"BH Ngo","year":"2021","unstructured":"Ngo BH, Park JH, Park SJ, Cho SI. Semi-supervised domain adaptation using explicit class-wise matching for domain-invariant and class-discriminative feature learning. IEEE Access. 2021;9:128467\u201380.","journal-title":"IEEE Access"},{"key":"10374_CR9","doi-asserted-by":"publisher","unstructured":"Gao J, Yang Z, Chen K, Sun C, Nevatia R. TURN TAP: temporal unit regression network for temporal action proposals. In: proceedings of the IEEE\/CVF international conference on computer vision. 2017. pp. 3648\u20133656. https:\/\/doi.org\/10.48550\/arXiv.1703.06189.","DOI":"10.48550\/arXiv.1703.06189"},{"key":"10374_CR10","doi-asserted-by":"publisher","unstructured":"Xu H, Das A, Saenko K. R-C3D: region convolutional 3d network for temporal activity detection. In: proceedings of the IEEE\/CVF international conference on computer vision. 2017. pp. 5794\u20135803. https:\/\/doi.org\/10.48550\/arXiv.1703.07814.","DOI":"10.48550\/arXiv.1703.07814"},{"key":"10374_CR11","doi-asserted-by":"publisher","unstructured":"Zhao C, Thabet A K, Ghanem B. Video self-stitching graph network for temporal action localization. In: proceedings of the IEEE\/CVF international conference on computer vision. 2021. pp. 13658\u201313667. https:\/\/doi.org\/10.48550\/arXiv.2011.14598.","DOI":"10.48550\/arXiv.2011.14598"},{"key":"10374_CR12","doi-asserted-by":"publisher","unstructured":"Lin T, Zhao X, Su H, Wang C, Yang M. BSN: boundary sensitive network for temporal action proposal generation. In: European conference on computer vision. 2018. pp. 3\u201321. https:\/\/doi.org\/10.48550\/arXiv.1806.02964.","DOI":"10.48550\/arXiv.1806.02964"},{"key":"10374_CR13","doi-asserted-by":"publisher","unstructured":"Lin T, Liu X, Li X, Ding E, Wen S. BMN: boundary-matching network for temporal action proposal generation. In: Proceedings of the IEEE\/CVF international conference on computer vision. 2019. pp. 3889\u20133898. https:\/\/doi.org\/10.48550\/arXiv.1907.09702.","DOI":"10.48550\/arXiv.1907.09702"},{"key":"10374_CR14","doi-asserted-by":"crossref","unstructured":"Lin C, Li J, Wang Y, Tai Y, Luo D, Cui Z, et al. Fast learning of temporal action proposal via dense boundary generator. In: Proceedings of the 2020\u201334th AAAI conference on artificial intelligence. 2020. pp. 11499\u201311506.","DOI":"10.1609\/aaai.v34i07.6815"},{"key":"10374_CR15","doi-asserted-by":"publisher","unstructured":"Liang Z, Zhai P, Zheng D, Fang Y. Global-aware pyramid network with boundary adjustment for anchor-free temporal action detection. In: proceedings of the 3rd international conference on control, robotics and intelligent system. 2022. pp. 187\u2013193. https:\/\/doi.org\/10.1145\/3562007.3562041.","DOI":"10.1145\/3562007.3562041"},{"key":"10374_CR16","doi-asserted-by":"publisher","unstructured":"Shi H, Chen H, Zhao G. Attention-guided boundary refinement on anchor-free temporal action detection. In: Proceedings of the Scandinavian conference on image analysis. 2023. pp. 129\u2013139. https:\/\/doi.org\/10.1007\/978-3-031-31435-3_9.","DOI":"10.1007\/978-3-031-31435-3_9"},{"key":"10374_CR17","doi-asserted-by":"publisher","unstructured":"Laine S, Aila T. Temporal ensembling for semi-supervised learning. In: Proceedings of the 5th international conference on learning representations. 2017. https:\/\/doi.org\/10.48550\/arXiv.1610.02242.","DOI":"10.48550\/arXiv.1610.02242"},{"key":"10374_CR18","doi-asserted-by":"publisher","unstructured":"Tarvainen A, Valpola H. Mean teachers are better role models: weight-averaged consistency targets improve semi-supervised deep learning results. In: proceedings of the annual conference on neural information processing systems. 2017. pp. 1196\u20131205. https:\/\/doi.org\/10.48550\/arXiv.1703.01780.","DOI":"10.48550\/arXiv.1703.01780"},{"key":"10374_CR19","doi-asserted-by":"publisher","unstructured":"Berthelot D, Carlini N, Goodfellow I, Oliver A, Papernot N, Nicolas C. Mixmatch: a holistic approach to semi-supervised learning. In: proceedings of the annual conference on neural information processing systems. 2019. p. 5050\u20135060. https:\/\/doi.org\/10.48550\/arXiv.1905.02249.","DOI":"10.48550\/arXiv.1905.02249"},{"key":"10374_CR20","doi-asserted-by":"publisher","unstructured":"Sajjadi M, Javanmardi M, Tasdizen T. Regularization with stochastic transformations and perturbations for deep semi-supervised learning. In: proceedings of the annual conference on neural information processing systems. 2016. pp. 1171\u20131179. https:\/\/doi.org\/10.48550\/arXiv.1606.04586.","DOI":"10.48550\/arXiv.1606.04586"},{"key":"10374_CR21","unstructured":"Grandvalet Y, Bengio Y. Semi-supervised learning by entropy minimization. In: Proceedings of the 17th international conference on neural information processing systems. 2005. pp. 529\u2013536."},{"issue":"5","key":"10374_CR22","doi-asserted-by":"publisher","first-page":"5970","DOI":"10.1109\/TPAMI.2022.3208419","volume":"45","author":"Y Jiang","year":"2023","unstructured":"Jiang Y, Li X, Chen Y, He Y, Xu Q, Yang Z, et al. Maxmatch: semi-supervised learning with worst-case consistency. IEEE Trans Pattern Anal Mach Intell. 2023;45(5):5970\u201387.","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"3","key":"10374_CR23","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1007\/s11263-022-01723-4","volume":"131","author":"Y Fan","year":"2023","unstructured":"Fan Y, Kukleva A, Dai D, Schiele B. Revisiting consistency regularization for semi-supervised learning. Int J Comput Vision. 2023;131(3):626\u201343.","journal-title":"Int J Comput Vision"},{"issue":"22","key":"10374_CR24","doi-asserted-by":"publisher","first-page":"26797","DOI":"10.1007\/s10489-023-04950-5","volume":"53","author":"JH Park","year":"2023","unstructured":"Park JH, Kim JH, Ngo BH, Kwon JE, Cho SI. Adversarial representation teaching with perturbation-agnostic student-teacher structure for semi-supervised learning. Appl Intell. 2023;53(22):26797\u2013809.","journal-title":"Appl Intell"},{"key":"10374_CR25","unstructured":"Lee DH. Pseudo-label: the simple and efficient semi-supervised learning method for deep neural networks. In: Workshop on challenges in  representation learning. 2013. p. 896."},{"key":"10374_CR26","doi-asserted-by":"publisher","unstructured":"Sohn K, Berthelot D, Carlini N, Zhang Z, Carlini N, Cubuk E, et al. Fixmatch: simplifying semi-supervised learning with consistency and confidence. In: proceedings of the 34th conference on neural information processing systems. 2020. pp. 596\u2013608. https:\/\/doi.org\/10.48550\/arXiv.2001.07685.","DOI":"10.48550\/arXiv.2001.07685"},{"key":"10374_CR27","doi-asserted-by":"publisher","unstructured":"Hu S, Liu C, Dutta J, Chang M C, Lyu S, Ramakrishnan N. PseudoProp: robust pseudo-label generation for semi-supervised object detection in autonomous driving systems. In: proceedings of the IEEE computer society conference on computer vision and pattern recognition workshops. 2022. pp. 4389\u20134397. https:\/\/doi.org\/10.48550\/arXiv.2203.05983.","DOI":"10.48550\/arXiv.2203.05983"},{"issue":"6","key":"10374_CR28","doi-asserted-by":"publisher","first-page":"1847","DOI":"10.1007\/s10994-022-06208-6","volume":"112","author":"H Chang","year":"2023","unstructured":"Chang H, Xie G, Yu J, Ling Q, Gao F, Yu Y. A viable framework for semi-supervised learning on realistic dataset. Mach Learn. 2023;112(6):1847\u201369.","journal-title":"Mach Learn"},{"key":"10374_CR29","doi-asserted-by":"publisher","unstructured":"Ji J, Cao K, Niebles JC. Learning temporal action proposals with fewer labels. In: proceedings of the IEEE\/CVF international conference on computer vision. 2019. pp. 7073\u20137082. https:\/\/doi.org\/10.48550\/arXiv.1910.01286.","DOI":"10.48550\/arXiv.1910.01286"},{"key":"10374_CR30","doi-asserted-by":"publisher","unstructured":"Wang X, Zhang S, Qing Z, Shao Y, Gao C, Sang N. Self-supervised learning for semi-supervised temporal action proposal. In: proceedings of the IEEE conference on computer vision and pattern recognition. 2021. pp. 1905\u20131914. https:\/\/doi.org\/10.48550\/arXiv.2104.03214.","DOI":"10.48550\/arXiv.2104.03214"},{"key":"10374_CR31","doi-asserted-by":"publisher","unstructured":"Nag S, Zhu X, Song YZ, Xiang T. Semi-supervised temporal action detection with proposal-free masking. In: proceedings of the 17th European conference on computer vision. 2022. pp. 663\u2013680. https:\/\/doi.org\/10.48550\/arXiv.2207.07059.","DOI":"10.48550\/arXiv.2207.07059"},{"key":"10374_CR32","doi-asserted-by":"publisher","first-page":"102434","DOI":"10.1016\/j.displa.2023.102434","volume":"78","author":"D Li","year":"2023","unstructured":"Li D, Yang X, Tang Y, Zhang C, Zhang W, Ma L. Active learning with effective scoring functions for semi-supervised temporal action localization. Displays. 2023;78:102434.","journal-title":"Displays"},{"key":"10374_CR33","doi-asserted-by":"publisher","unstructured":"Xia K, Wang L, Zhou SP, Hua G, Tang W. Learning from noisy pseudo labels for semi-supervised temporal action localization. In: proceedings of the 2023 IEEE\/CVF international conference on computer vision. 2023. pp. 10126\u201310135. https:\/\/doi.org\/10.1109\/ICCV51070.2023.00932.","DOI":"10.1109\/ICCV51070.2023.00932"},{"issue":"3","key":"10374_CR34","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00138-024-01521-7","volume":"35","author":"S Pehlivan","year":"2024","unstructured":"Pehlivan S, Laaksonen J. Temporal teacher with masked transformers for semi-supervised action proposal generation. Mach Vis Appl. 2024;35(3):1\u201315.","journal-title":"Mach Vis Appl"},{"key":"10374_CR35","doi-asserted-by":"publisher","unstructured":"Carreira J, Zisserman A. Quo vadis, action recognition? A new model and the kinetics dataset. In: proceedings of the IEEE conference on computer vision and pattern recognition. 2017. pp. 4724\u20134733. https:\/\/doi.org\/10.48550\/arXiv.1705.07750.","DOI":"10.48550\/arXiv.1705.07750"},{"key":"10374_CR36","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, et al. Attention is all you need. 2017. arXiv preprint https:\/\/arxiv.org\/abs\/1706.03762"},{"key":"10374_CR37","doi-asserted-by":"publisher","unstructured":"Heilbron F C, Escorcia V, Ghanem B, Niebles J C. ActivityNet: a large-scale video benchmark for human activity understanding. In: proceedings of the IEEE conference on computer vision and pattern recognition. 2015. pp. 961\u2013970. https:\/\/doi.org\/10.1109\/CVPR.2015.7298698.","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"10374_CR38","unstructured":"Jiang Y G, Liu J, Zamir A R, Toderici G, Laptev I, Shah M, et al. THUMOS challenge: action recognition with a large number of classes. 2014. Available from: https:\/\/www.crcv.ucf.edu\/THUMOS14\/. Accessed 10 Dec 2023."},{"key":"10374_CR39","doi-asserted-by":"publisher","unstructured":"Li J, Liu X, Zong Z, Zhang W, Zhang M, Song J. Graph attention-based proposal 3D convnets for action detection. In: proceedings of the 34th AAAI conference on artificial intelligence. 2020. pp. 4626\u20134633. https:\/\/doi.org\/10.1609\/aaai.v34i04.5893.","DOI":"10.1609\/aaai.v34i04.5893"},{"issue":"10","key":"10374_CR40","doi-asserted-by":"publisher","first-page":"2723","DOI":"10.1109\/TMM.2019.2959977","volume":"22","author":"P Chen","year":"2020","unstructured":"Chen P, Gan C, Shen G, Huang W, Zeng R, Tan M. Relation attention for temporal action localization. IEEE Trans Multimed. 2020;22(10):2723\u201333.","journal-title":"IEEE Trans Multimed"},{"key":"10374_CR41","doi-asserted-by":"publisher","unstructured":"Gao Z, Le W, Zhang Q, Niu Z, Zheng N, Hua G. Video imprint segmentation for temporal action detection in untrimmed videos. In: Proceedings of the AAAI conference on artificial intelligence. 2019. pp. 8328\u20138335. https:\/\/doi.org\/10.1609\/aaai.v33i01.33018328.","DOI":"10.1609\/aaai.v33i01.33018328"},{"key":"10374_CR42","doi-asserted-by":"publisher","unstructured":"Vaudaux-Ruth G, Chan-Hon-Tong A, Achard C. SALAD: self-assessment learning for action detection. In: Proceedings of the IEEE Winter conference on applications of computer vision. 2021. pp. 1268\u20131277. https:\/\/doi.org\/10.48550\/arXiv.2011.06958.","DOI":"10.48550\/arXiv.2011.06958"},{"key":"10374_CR43","doi-asserted-by":"publisher","unstructured":"Li X, Lin T, Liu X, Gan C, Zuo W, Li C, et al. Deep concept-wise temporal convolutional networks for action localization. In: proceedings of the 28th ACM international conference on multimedia. 2019. pp. 4004\u20134012. https:\/\/doi.org\/10.48550\/arXiv.1908.09442.","DOI":"10.48550\/arXiv.1908.09442"},{"key":"10374_CR44","doi-asserted-by":"publisher","unstructured":"Zeng R, Huang W, Tan M, Rong Y, Zhao P, Huang J, et al. Graph convolutional networks for temporal action localization. In: proceedings of the IEEE international conference on computer vision. 2019. pp. 7094\u20137103. https:\/\/doi.org\/10.48550\/arXiv.1909.03252.","DOI":"10.48550\/arXiv.1909.03252"},{"key":"10374_CR45","doi-asserted-by":"publisher","first-page":"503","DOI":"10.1109\/LSP.2021.3061289","volume":"28","author":"B Wang","year":"2021","unstructured":"Wang B, Yang L, Zhao Y. POLO: learning explicit cross-modality fusion for temporal action localization. IEEE Signal Process Lett. 2021;28:503\u20137.","journal-title":"IEEE Signal Process Lett"},{"key":"10374_CR46","unstructured":"Wu J, Sun P, Chen S, Yang J, Qi Z, Ma L, Luo P. Towards high-quality temporal action detection with sparse proposals. 2021. axXiv preprint https:\/\/arxiv.org\/abs\/2109.08847. Accessed 10 Dec 2023."},{"key":"10374_CR47","doi-asserted-by":"publisher","unstructured":"Liu X, Hu Y, Bai S, Ding F, Bai X, Torr P H. Multi-shot temporal event localization: a benchmark. In: proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2021. pp. 12596\u201312606. https:\/\/doi.org\/10.48550\/arXiv.2012.09434.","DOI":"10.48550\/arXiv.2012.09434"},{"key":"10374_CR48","doi-asserted-by":"publisher","unstructured":"Xu M, Zhao C, Rojas D S, Thabet A, Bernard G. G-TAD: sub-graph localization for temporal action detection. In: proceedings of the IEEE computer society conference on computer vision and pattern recognition. 2020. pp. 10153\u201310162. https:\/\/doi.org\/10.48550\/arXiv.1911.11462.","DOI":"10.48550\/arXiv.1911.11462"},{"key":"10374_CR49","doi-asserted-by":"publisher","unstructured":"Long F, Yao T, Qiu Z, Tian X, Luo J, Mei T. Gaussian temporal awareness networks for action localization. In: proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. 2019. pp. 344\u2013353. https:\/\/doi.org\/10.48550\/arXiv.1909.03877.","DOI":"10.48550\/arXiv.1909.03877"},{"key":"10374_CR50","doi-asserted-by":"publisher","unstructured":"Lin C, Xu C, Luo D, Wang Y, Tai Y, Wang C, et al. Learning salient boundary feature for anchor-free temporal action localization. In: proceedings of the IEEE conference on computer vision and pattern recognition. 2021. pp. 3319\u20133328. https:\/\/doi.org\/10.48550\/arXiv.2103.13137.","DOI":"10.48550\/arXiv.2103.13137"},{"key":"10374_CR51","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1016\/j.neucom.2021.02.085","volume":"444","author":"W Zhang","year":"2021","unstructured":"Zhang W, Wang B, Ma S, Zhang Y, Zhao Y. I2Net: mining intra-video and inter-video attention for temporal action localization. Neurocomputing. 2021;444:16\u201329.","journal-title":"Neurocomputing"},{"key":"10374_CR52","doi-asserted-by":"publisher","first-page":"2103","DOI":"10.1109\/TIP.2020.3044218","volume":"30","author":"R Su","year":"2021","unstructured":"Su R, Xu D, Sheng L, Ouyang W. PCG-TAL: progressive cross-granularity cooperation for temporal action localization. IEEE Trans Image Process. 2021;30:2103\u201313.","journal-title":"IEEE Trans Image Process"},{"key":"10374_CR53","doi-asserted-by":"publisher","unstructured":"Wang Z, Liu Q. Progressive boundary refinement network for temporal action detection. In: proceedings of the 34th AAAI conference on artificial intelligence. 2020. pp. 11612\u201311619. https:\/\/doi.org\/10.1609\/aaai.v34i07.6829.","DOI":"10.1609\/aaai.v34i07.6829"},{"key":"10374_CR54","doi-asserted-by":"publisher","unstructured":"Alwassel H, Giancola S, Ghanem B. TSP: temporally-sensitive pretraining of video encoders for localization tasks. In: proceedings of the IEEE\/CVF international conference on computer vision. 2021. pp. 3173\u20133183. https:\/\/doi.org\/10.48550\/arXiv.2011.11479.","DOI":"10.48550\/arXiv.2011.11479"},{"issue":"11","key":"10374_CR55","doi-asserted-by":"publisher","first-page":"16127","DOI":"10.1007\/s11042-022-13962-1","volume":"82","author":"MG Gan","year":"2023","unstructured":"Gan MG, Zhang Y. Improving accuracy of temporal action detection by deep hybrid convolutional network. Multimed Tools Appl. 2023;82(11):16127\u201349.","journal-title":"Multimed Tools Appl"},{"key":"10374_CR56","doi-asserted-by":"publisher","unstructured":"Liu MH, Liu HY, Zhao SR, Ma F, Li ML, Dai ZH, et al. STAN: Spatial-temporal awareness network for temporal action detection. In: proceedings of the ACM international workshop on multimedia content analysis in sports. 2023. pp. 161\u2013165. https:\/\/doi.org\/10.1145\/3606038.3616169.","DOI":"10.1145\/3606038.3616169"}],"container-title":["Cognitive Computation"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12559-024-10374-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s12559-024-10374-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12559-024-10374-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T07:34:03Z","timestamp":1740814443000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s12559-024-10374-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,27]]},"references-count":56,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,2]]}},"alternative-id":["10374"],"URL":"https:\/\/doi.org\/10.1007\/s12559-024-10374-1","relation":{},"ISSN":["1866-9956","1866-9964"],"issn-type":[{"type":"print","value":"1866-9956"},{"type":"electronic","value":"1866-9964"}],"subject":[],"published":{"date-parts":[[2024,11,27]]},"assertion":[{"value":"18 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This article does not contain any studies that used human participants or animals.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics Approval"}},{"value":"Informed consent was obtained from all individual participants included in the study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed Consent"}},{"value":"The authors declare no competing interests.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}],"article-number":"15"}}