{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T12:08:21Z","timestamp":1730203701700,"version":"3.28.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,1,6]],"date-time":"2024-01-06T00:00:00Z","timestamp":1704499200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,6]],"date-time":"2024-01-06T00:00:00Z","timestamp":1704499200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,1,6]]},"DOI":"10.1109\/ccnc51664.2024.10454650","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T14:53:49Z","timestamp":1710773629000},"page":"37-43","source":"Crossref","is-referenced-by-count":0,"title":["SwitchingNet: Edge-Assisted Model Switching for Accurate Video Recognition Over Best-Effort Networks"],"prefix":"10.1109","author":[{"given":"Florian","family":"Beye","sequence":"first","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]},{"given":"Yasunori","family":"Babazaki","sequence":"additional","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]},{"given":"Ryuhei","family":"Ando","sequence":"additional","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]},{"given":"Takashi","family":"Oshiba","sequence":"additional","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]},{"given":"Koichi","family":"Nihei","sequence":"additional","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]},{"given":"Katsuhiko","family":"Takahashi","sequence":"additional","affiliation":[{"name":"NEC Corporation,Kawasaki,Japan"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Yolov4: Optimal speed and accuracy of object detection","volume":"abs\/2004.10934","author":"Bochkovskiy","year":"2020","journal-title":"CoRR"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2019.2925910"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.compag.2020.105300"},{"key":"ref4","article-title":"A comprehensive study of deep video action recognition","volume":"abs\/2012.06567","author":"Zhu","year":"2020","journal-title":"CoRR"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00710"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2022.3218527"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2012.2221191"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9412455"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3204755"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-019-07948-9"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IWCMC48107.2020.9148347"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638007"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462653"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2018.8546064"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803275"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP46576.2022.9897530"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413968"},{"key":"ref19","article-title":"A coding framework and benchmark towards compressed video understanding","volume":"abs\/2202.02813","author":"Tian","year":"2022","journal-title":"CoRR"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CCNC51644.2023.10059877"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.3044015"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548249"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00631"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00136"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICC42927.2021.9500973"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/GLOCOM.2013.6831293"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506217"},{"key":"ref28","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proceedings of The 33rd International Conference on Machine Learning","volume":"48","author":"Mnih"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1406.3269"},{"volume-title":"x265, the free h.265\/hevc encoder","year":"2021","key":"ref30"},{"key":"ref31","first-page":"8024","article-title":"Pytorch: An imperative style, high-performance deep learning library","volume-title":"Advances in Neural Information Processing Systems 32","author":"Paszke"}],"event":{"name":"2024 IEEE 21st Consumer Communications & Networking Conference (CCNC)","start":{"date-parts":[[2024,1,6]]},"location":"Las Vegas, NV, USA","end":{"date-parts":[[2024,1,9]]}},"container-title":["2024 IEEE 21st Consumer Communications &amp; Networking Conference (CCNC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10454139\/10454627\/10454650.pdf?arnumber=10454650","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,26]],"date-time":"2024-03-26T09:33:39Z","timestamp":1711445619000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10454650\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,6]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/ccnc51664.2024.10454650","relation":{},"subject":[],"published":{"date-parts":[[2024,1,6]]}}}