{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T09:57:04Z","timestamp":1773482224534,"version":"3.50.1"},"reference-count":39,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,11]],"date-time":"2025-12-11T00:00:00Z","timestamp":1765411200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,12,11]],"date-time":"2025-12-11T00:00:00Z","timestamp":1765411200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Data Sci Anal"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1007\/s41060-025-00962-1","type":"journal-article","created":{"date-parts":[[2025,12,11]],"date-time":"2025-12-11T05:43:32Z","timestamp":1765431812000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing aerial human action recognition through GAN-boosted ResNeXt architecture with squeeze-and-excitation network"],"prefix":"10.1007","volume":"21","author":[{"given":"Surbhi","family":"Kapoor","sequence":"first","affiliation":[]},{"given":"Akashdeep","family":"Sharma","sequence":"additional","affiliation":[]},{"given":"Amandeep","family":"Verma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,11]]},"reference":[{"key":"962_CR1","doi-asserted-by":"crossref","unstructured":"Oh, S., Hoogs, A., Perera A., Cuntoor, N., Chen, C.C.: A large-scale benchmark dataset for event recognition in surveillance video. In: Proceedings of 8th IEEE International Conference on Advanced Video and Signal Based Surveillance, 527\u2013528 (2011)","DOI":"10.1109\/AVSS.2011.6027400"},{"key":"962_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.jvcir.2024.104298","author":"S Kapoor","year":"2024","unstructured":"Kapoor, S., Sharma, A., Verma, A.: Diving deep into human action recognition in aerial videos: a survey. J. Vis. Commun. Image Represent. (2024). https:\/\/doi.org\/10.1016\/j.jvcir.2024.104298","journal-title":"J. Vis. Commun. Image Represent."},{"key":"962_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2022.109036","volume":"250","author":"S Umirzakova","year":"2022","unstructured":"Umirzakova, S., Whangbo, T.K.: Detailed feature extraction network-based fine-grained face segmentation. Knowl.-Based Syst. 250, 109036 (2022)","journal-title":"Knowl.-Based Syst."},{"key":"962_CR4","doi-asserted-by":"crossref","unstructured":"Soleimani, A., Nasrabadi, N.M.: Convolutional neural networks for aerial multi-label pedestrian detection. In: Proceedings of 21st International Conference on Information Fusion, pp. 1005\u20131010 (2018)","DOI":"10.23919\/ICIF.2018.8455494"},{"key":"962_CR5","doi-asserted-by":"crossref","unstructured":"Barekatain, M., Marti, M., Shih, H.F.: Okutama-Action: An aerial view video dataset for concurrent human action detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 28\u201335 (2017)","DOI":"10.1109\/CVPRW.2017.267"},{"key":"962_CR6","doi-asserted-by":"crossref","unstructured":"Algamdi, A.M., Sanchez, V., Li, C.T.: Dronecaps: recognition of human actions in drone videos using capsule networks with binary volume comparisons. In: Proceedings of the IEEE International Conference on Image Processing, pp. 3174\u20133178 (2020)","DOI":"10.1109\/ICIP40778.2020.9190864"},{"key":"962_CR7","unstructured":"Nishimura, H., Tasaka, K., Kawanishi, Y., H. Murase.: Multiple human tracking using multi-cues including primitive action features. arXiv preprint arXiv: 1909.08171 (2019)"},{"issue":"8","key":"962_CR8","doi-asserted-by":"publisher","first-page":"2226","DOI":"10.1109\/TIP.2006.877407","volume":"15","author":"L Zhang","year":"2006","unstructured":"Zhang, L., Wu, X.: An edge-guided image interpolation algorithm via directional filtering and data fusion. IEEE Trans. Image Process. 15(8), 2226\u20132238 (2006)","journal-title":"IEEE Trans. Image Process."},{"key":"962_CR9","doi-asserted-by":"crossref","unstructured":"Zhang, H., Liu, D., Xiong, Z.: Two-stream action recognition oriented video super-resolution. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8799\u20138808 (2019)","DOI":"10.1109\/ICCV.2019.00889"},{"key":"962_CR10","first-page":"256","volume":"9249","author":"GJ Burghouts","year":"2014","unstructured":"Burghouts, G.J., Eekeren, A.W.M., Dijk, J.: Focus-of-attention for human activity recognition from UAVs. Electro-Opt. Infrared Syst. Technol. Appl. XI 9249, 256\u2013267 (2014)","journal-title":"Electro-Opt. Infrared Syst. Technol. Appl. XI"},{"key":"962_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109505","volume":"140","author":"S Kapoor","year":"2023","unstructured":"Kapoor, S., Sharma, A., Verma, A., Singh, S.: Aeriform in-action: a novel dataset for human action recognition in aerial videos. Pattern Recogn. 140, 109505 (2023)","journal-title":"Pattern Recogn."},{"issue":"3","key":"962_CR12","doi-asserted-by":"publisher","DOI":"10.3390\/drones7030148","volume":"7","author":"NA Othman","year":"2023","unstructured":"Othman, N.A., Aydin, I.: Development of a novel lightweight CNN model for classification of human actions in UAV-captured videos. Drones 7(3), 148 (2023)","journal-title":"Drones"},{"issue":"18","key":"962_CR13","doi-asserted-by":"publisher","DOI":"10.3390\/s22187020","volume":"22","author":"T Ahmad","year":"2022","unstructured":"Ahmad, T., Marc, C., Yutaka, M., Prendinger, H.: Detecting human actions in drone images using YoloV5 and stochastic gradient boosting. Sensors 22(18), 7020 (2022)","journal-title":"Sensors"},{"key":"962_CR14","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2019.107140","volume":"100","author":"H Mliki","year":"2020","unstructured":"Mliki, H., Bouhlel, F., Hammami, M.: Human activity recognition from UAV-captured video sequences. Pattern Recogn. 100, 07140 (2020)","journal-title":"Pattern Recogn."},{"key":"962_CR15","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.neunet.2022.12.005","volume":"159","author":"S Kumar","year":"2023","unstructured":"Kumar, S., Luthra, A., Pahwa, E., Tiwari, K.: DroneAttention\u202f: Sparse weighted temporal attention for drone-camera based activity recognition. Neural Netw. 159, 57\u201369 (2023)","journal-title":"Neural Netw."},{"key":"962_CR16","doi-asserted-by":"crossref","unstructured":"Peng, H., Razi, A.: Fully autonomous UAV-based action recognition system using aerial imagery. In: Proceedings of International Symposium on Visual Computing, 12509, 276\u2013290 (2020)","DOI":"10.1007\/978-3-030-64556-4_22"},{"key":"962_CR17","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2021.103186","volume":"206","author":"W Sultani","year":"2021","unstructured":"Sultani, W., Shah, M.: Human action recognition in drone videos using a few aerial training examples. Comput. Vis. Image Underst. 206, 103186 (2021)","journal-title":"Comput. Vis. Image Underst."},{"key":"962_CR18","doi-asserted-by":"crossref","unstructured":"Li, T., Liu, J., Zhang, W., Ni, Y., Wang, W., Li, Z.: UAV-human: A large benchmark for human behavior understanding with unmanned aerial vehicles. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16266\u201316275 (2021)","DOI":"10.1109\/CVPR46437.2021.01600"},{"key":"962_CR19","doi-asserted-by":"crossref","unstructured":"Singh, A., Patil, D., Omkar, S.N.: Eye in the sky: Real-time drone surveillance system (DSS) for violent individuals identification using scatternet hybrid deep learning network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 1629\u20131637, (2018)","DOI":"10.1109\/CVPRW.2018.00214"},{"issue":"4","key":"962_CR20","doi-asserted-by":"publisher","first-page":"82","DOI":"10.3390\/drones3040082","volume":"3","author":"AG Perera","year":"2019","unstructured":"Perera, A.G., Law, Y.W., Chahl, J.: Drone-action: an outdoor recorded drone video dataset for action recognition. Drones 3(4), 82 (2019)","journal-title":"Drones"},{"issue":"4","key":"962_CR21","first-page":"567","volume":"20","author":"S Kapoor","year":"2023","unstructured":"Kapoor, S., Sharma, A., Verma, A., Dhull, V., Goyal, C.: A comparative study on deep learning and machine learning models for human action recognition in aerial videos. Int. Arab J. Inf. Technol. 20(4), 567\u2013574 (2023)","journal-title":"Int. Arab J. Inf. Technol."},{"issue":"1","key":"962_CR22","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1109\/TPAMI.2019.2929257","volume":"43","author":"Z Cao","year":"2019","unstructured":"Cao, Z., Gines, H., Simon, T., Wei, S.E., Sheikh, Y.: Openpose: realtime multi-person 2D pose estimation using part affinity fields. IEEE Trans. Pattern Anal. Mach. Intell. 43(1), 172\u2013186 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"5","key":"962_CR23","doi-asserted-by":"publisher","first-page":"1851","DOI":"10.1007\/s11554-021-01171-2","volume":"18","author":"A Srivastava","year":"2021","unstructured":"Srivastava, A., Badal, T., Garg, A., Vidyarthi, A., Singh, R.: Recognizing human violent action using drone surveillance within real-time proximity. J. Real-Time Image Process. 18(5), 1851\u20131863 (2021)","journal-title":"J. Real-Time Image Process."},{"key":"962_CR24","doi-asserted-by":"crossref","unstructured":"Wang, Z., She, Q., Smolic, A.: Action-Net: Multipath excitation for action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 13214\u201313223 (2021)","DOI":"10.1109\/CVPR46437.2021.01301"},{"issue":"12","key":"962_CR25","doi-asserted-by":"publisher","first-page":"7774","DOI":"10.1109\/TCSVT.2023.3281671","volume":"33","author":"X Liu","year":"2023","unstructured":"Liu, X., Zhou, S., Lei, T., Jiang, P., Chen, Z., Lu, H.: First-person video domain adaptation with multi-scene cross-site datasets and attention-based methods. IEEE Trans. Circuits Syst. Video Technol. 33(12), 7774\u20137788 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"962_CR26","unstructured":"Yang, L., Zhang, R.Y., Li, L., Xie, X.: Simam: A simple, parameter-free attention module for convolutional neural networks. In: International conference on machine learning. p. 11863\u201311874 (2021)"},{"issue":"2","key":"962_CR27","doi-asserted-by":"publisher","first-page":"295","DOI":"10.1109\/TPAMI.2015.2439281","volume":"38","author":"C Dong","year":"2016","unstructured":"Dong, C., Loy, C.C., He, K.M., Tang, X.O.: Image super-resolution using deep convolutional networks. IEEE Trans. Pattern Anal. Mach. Intell. 38(2), 295\u2013307 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"962_CR28","doi-asserted-by":"crossref","unstructured":"Lai, W.S., Huang, J.B, Ahuja, N., Yang, M.H.: Deep laplacian pyramid networks for fast and accurate super-resolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 624\u2013632 (2017)","DOI":"10.1109\/CVPR.2017.618"},{"key":"962_CR29","doi-asserted-by":"crossref","unstructured":"Lim, B., Son, S., Kim, H., Nah, S., Lee, K.M.: Enhanced deep residual networks for single image super-resolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 136\u2013144 (2017)","DOI":"10.1109\/CVPRW.2017.151"},{"key":"962_CR30","doi-asserted-by":"crossref","unstructured":"Kim, J., Lee, J.K., Lee, K.M.: Deeply-recursive convolutional network for image super-resolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 1637\u20131645 (2016)","DOI":"10.1109\/CVPR.2016.181"},{"key":"962_CR31","doi-asserted-by":"crossref","unstructured":"Tong, T., Li, G., Liu, X., Gao, Q.: Image super-resolution using dense skip connections. In: Proceedings of the IEEE Conference on Computer Vision, 4799\u20134807 (2017)","DOI":"10.1109\/ICCV.2017.514"},{"key":"962_CR32","doi-asserted-by":"crossref","unstructured":"Ledig, C., Theis, L., Huszar, F., Caballero, J., Cunningham, A., Acosta, A., Aitken, A., Tejani, A., Totz, J., Wang, Z., Shi, W.: Photo-realistic single image super-resolution using a generative adversarial network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 4681\u20134690 (2017)","DOI":"10.1109\/CVPR.2017.19"},{"key":"962_CR33","doi-asserted-by":"crossref","unstructured":"Wang, X., Yu, K., Wu, S., Gu, J., Liu, Y., Dong, C., Qiao, Y., Loy, C.: ESRGAN: Enhanced super-resolution generative adversarial networks. In: Proceedings of European conference on computer vision workshops (2018)","DOI":"10.1007\/978-3-030-11021-5_5"},{"key":"962_CR34","doi-asserted-by":"crossref","unstructured":"Wang, X., Xie, L., Dong, C., Shan, Y.: Real-ESRGAN: Training real-world blind super-resolution with pure synthetic data. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, 1905\u20131914 (2021)","DOI":"10.1109\/ICCVW54120.2021.00217"},{"key":"962_CR35","first-page":"24261","volume":"34","author":"I Tolstikhin","year":"2021","unstructured":"Tolstikhin, I., Houlsb, N., Kolesnikov, A., Beyer, L., Zhai, X., Unterthiner, T., Yung, J., Steiner, A., Keysers, D., Uszkoreit, J., Lucic, M., Dosovitskiy, A.: MLP-mixer: an all-MLP architecture for vision. Adv. Neural. Inf. Process. Syst. 34, 24261\u201324272 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"962_CR36","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of IEEE Conference on Computer Vision and Pattern Recognition, 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"962_CR37","unstructured":"Nagendran, A., Harper, D., Shah, M.: UCF-ARG dataset ,Center for Research in Computer Vision at the University of Central Florida. [Online]. Available: https:\/\/www.crcv.ucf.edu\/data\/UCF ARG.php. Accessed: 28 Jan 2025"},{"key":"962_CR38","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision 3\u201319 (2018)","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"962_CR39","doi-asserted-by":"crossref","unstructured":"Fu, J., Liu, J., Tian, H., Li, Y., Bao, Y., Fang, Z., Lu, H.: Dual attention network for scene segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 3146\u20133154 (2019)","DOI":"10.1109\/CVPR.2019.00326"}],"container-title":["International Journal of Data Science and Analytics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-025-00962-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s41060-025-00962-1","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-025-00962-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T09:36:22Z","timestamp":1773480982000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s41060-025-00962-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,11]]},"references-count":39,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,6]]}},"alternative-id":["962"],"URL":"https:\/\/doi.org\/10.1007\/s41060-025-00962-1","relation":{},"ISSN":["2364-415X","2364-4168"],"issn-type":[{"value":"2364-415X","type":"print"},{"value":"2364-4168","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,11]]},"assertion":[{"value":"20 January 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 August 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interests"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}}],"article-number":"74"}}