{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T23:30:32Z","timestamp":1743118232288,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":27,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819620630"},{"type":"electronic","value":"9789819620647"}],"license":[{"start":{"date-parts":[[2024,12,28]],"date-time":"2024-12-28T00:00:00Z","timestamp":1735344000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,28]],"date-time":"2024-12-28T00:00:00Z","timestamp":1735344000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-2064-7_21","type":"book-chapter","created":{"date-parts":[[2024,12,27]],"date-time":"2024-12-27T19:25:19Z","timestamp":1735327519000},"page":"285-296","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Real-Time Action Detection in\u00a0Volleyball Matches Using DETR Architecture"],"prefix":"10.1007","author":[{"given":"Mu-Jan","family":"Shih","sequence":"first","affiliation":[]},{"given":"Yi-Yu","family":"Hsu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,28]]},"reference":[{"key":"21_CR1","unstructured":"Bochkovskiy, A., Wang, C.Y., Liao, H.Y.M.: YOLOv4: optimal speed and accuracy of object detection. arXiv preprint arXiv:2004.10934 (2020)"},{"key":"21_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"21_CR3","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo Vadis, action recognition? A new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Q., et al.: Group DeTR: fast DeTR training with group-wise one-to-many assignment. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6633\u20136642 (2023)","DOI":"10.1109\/ICCV51070.2023.00610"},{"key":"21_CR5","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C.: X3D: expanding architectures for efficient video recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 203\u2013213 (2020)","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: Slowfast networks for video recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6202\u20136211 (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"21_CR7","unstructured":"Ge, Z., Liu, S., Wang, F., Li, Z., Sun, J.: YOLOx: exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430 (2021)"},{"key":"21_CR8","doi-asserted-by":"crossref","unstructured":"Girshick, R.: Fast R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 1440\u20131448 (2015)","DOI":"10.1109\/ICCV.2015.169"},{"key":"21_CR9","unstructured":"Hao, Z., et al.: Dino : DeTR with improved denoising anchor boxes for end-to-end object detection (2023). https:\/\/openreview.net\/forum?id=3mRwyG5one"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Ibrahim, M.S., Muralidharan, S., Deng, Z., Vahdat, A., Mori, G.: A hierarchical deep temporal model for group activity recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1971\u20131980 (2016)","DOI":"10.1109\/CVPR.2016.217"},{"key":"21_CR11","unstructured":"Jocher, G., Chaurasia, A., Qiu, J.: Ultralytics YOLO (2023). https:\/\/github.com\/ultralytics\/ultralytics"},{"key":"21_CR12","unstructured":"Li, C., et al.: YOLOv6 v3. 0: a full-scale reloading. arXiv preprint arXiv:2301.05586 (2023)"},{"key":"21_CR13","unstructured":"Li, C., et al.: YOLOv6: a single-stage object detection framework for industrial applications. arXiv preprint arXiv:2209.02976 (2022)"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Li, F., Zhang, H., Liu, S., Guo, J., Ni, L.M., Zhang, L.: DN-DeTR: accelerate DeTR training by introducing query denoising. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13619\u201313627 (2022)","DOI":"10.1109\/CVPR52688.2022.01325"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Li, S., et al.: Groupformer: group activity recognition with clustered spatial-temporal transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13668\u201313677 (2021)","DOI":"10.1109\/ICCV48922.2021.01341"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"Meng, D., et al.: Conditional DeTR for fast training convergence. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3651\u20133660 (2021)","DOI":"10.1109\/ICCV48922.2021.00363"},{"key":"21_CR17","unstructured":"Pu, Y., et al.: Rank-DeTR for high quality object detection. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"21_CR18","doi-asserted-by":"crossref","unstructured":"Redmon, J., Farhadi, A.: YOLO9000: better, faster, stronger. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7263\u20137271 (2017)","DOI":"10.1109\/CVPR.2017.690"},{"key":"21_CR19","unstructured":"Redmon, J., Farhadi, A.: YOLOv3: an incremental improvement. arXiv preprint arXiv:1804.02767 (2018)"},{"key":"21_CR20","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: Advances in Neural Information Processing Systems, vol. 27 (2014)"},{"key":"21_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"19","DOI":"10.1007\/978-3-031-19772-7_2","volume-title":"Computer Vision - ECCV 2022","author":"M Tamura","year":"2022","unstructured":"Tamura, M., Vishwakarma, R., Vennelakanti, R.: Hunting group clues with transformers for social group activity recognition. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13664, pp. 19\u201335. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19772-7_2"},{"key":"21_CR22","unstructured":"Veli\u010dkovi\u0107, P., Cucurull, G., Casanova, A., Romero, A., Lio, P., Bengio, Y.: Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Wang, C.Y., Bochkovskiy, A., Liao, H.Y.M.: YOLOv7: trainable bag-of-freebies sets new state-of-the-art for real-time object detectors. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7464\u20137475 (2023)","DOI":"10.1109\/CVPR52729.2023.00721"},{"issue":"11","key":"21_CR24","doi-asserted-by":"publisher","first-page":"2740","DOI":"10.1109\/TPAMI.2018.2868668","volume":"41","author":"L Wang","year":"2018","unstructured":"Wang, L., et al.: Temporal segment networks for action recognition in videos. IEEE Trans. Pattern Anal. Mach. Intell. 41(11), 2740\u20132755 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"21_CR25","unstructured":"Xizhou, Z., Weijie, S., Lewei, L., Bin, L., Xiaogang, W., Jifeng, D.: Deformable DeTR: deformable transformers for end-to-end object detection (2021). https:\/\/openreview.net\/forum?id=gZ9hCDWe6ke"},{"key":"21_CR26","doi-asserted-by":"crossref","unstructured":"Yuan, H., Ni, D., Wang, M.: Spatio-temporal dynamic inference network for group activity recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7476\u20137485 (2021)","DOI":"10.1109\/ICCV48922.2021.00738"},{"key":"21_CR27","doi-asserted-by":"crossref","unstructured":"Zhao, Y., et al.: DeTRs beat YOLOs on real-time object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16965\u201316974 (2024)","DOI":"10.1109\/CVPR52733.2024.01605"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-2064-7_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,27]],"date-time":"2024-12-27T20:04:06Z","timestamp":1735329846000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-2064-7_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,28]]},"ISBN":["9789819620630","9789819620647"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-2064-7_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,28]]},"assertion":[{"value":"28 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nara","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 January 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/mmm2025.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}