{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,20]],"date-time":"2025-12-20T22:09:33Z","timestamp":1766268573289,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":70,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3612023","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:30Z","timestamp":1698391650000},"page":"1098-1107","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":14,"title":["Capturing Co-existing Distortions in User-Generated Content for No-reference Video Quality Assessment"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3681-2196","authenticated-orcid":false,"given":"Kun","family":"Yuan","sequence":"first","affiliation":[{"name":"Kuaishou Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7194-8198","authenticated-orcid":false,"given":"Zishang","family":"Kong","sequence":"additional","affiliation":[{"name":"Peking University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3617-2184","authenticated-orcid":false,"given":"Chuanchuan","family":"Zheng","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Peking, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8173-9290","authenticated-orcid":false,"given":"Ming","family":"Sun","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Peking, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3368-9206","authenticated-orcid":false,"given":"Xing","family":"Wen","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Peking, China"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"113","article-title":"Fast and reliable structure-oriented video noise estimation","volume":"15","author":"Amer Aishy","year":"2005","unstructured":"Aishy Amer and Eric Dubois. 2005. Fast and reliable structure-oriented video noise estimation. IEEE TCSVT, Vol. 15, 1 (2005), 113--118.","journal-title":"IEEE TCSVT"},{"volume-title":"ViViT: A Video Vision Transformer","author":"Arnab Anurag","key":"e_1_3_2_1_2_1","unstructured":"Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lucic, and Cordelia Schmid. 2021. ViViT: A Video Vision Transformer. In ICCV. IEEE, 6816--6826."},{"key":"e_1_3_2_1_3_1","volume-title":"ICML","volume":"139","author":"Bertasius Gedas","year":"2021","unstructured":"Gedas Bertasius, Heng Wang, and Lorenzo Torresani. 2021. Is Space-Time Attention All You Need for Video Understanding?. In ICML, Vol. 139. PMLR, 813--824."},{"volume-title":"A deep neural network for image quality assessment","author":"Bosse Sebastian","key":"e_1_3_2_1_4_1","unstructured":"Sebastian Bosse, Dominique Maniry, Thomas Wiegand, and Wojciech Samek. 2016. A deep neural network for image quality assessment. In ICIP. IEEE, 3773--3777."},{"volume-title":"Action Recognition? A New Model and the Kinetics Dataset","author":"Carreira Jo\u00e3o","key":"e_1_3_2_1_5_1","unstructured":"Jo\u00e3o Carreira and Andrew Zisserman. 2017. Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset. In CVPR. IEEE Computer Society, 4724--4733."},{"volume-title":"Deep Perceptual Preprocessing for Video Coding","author":"Chadha Aaron","key":"e_1_3_2_1_6_1","unstructured":"Aaron Chadha and Yiannis Andreopoulos. 2021. Deep Perceptual Preprocessing for Video Coding. In CVPR. Computer Vision Foundation \/ IEEE, 14852--14861."},{"key":"e_1_3_2_1_7_1","volume-title":"Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J. Colwell, and Adrian Weller.","author":"Choromanski Krzysztof Marcin","year":"2021","unstructured":"Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tam\u00e1s Sarl\u00f3s, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J. Colwell, and Adrian Weller. 2021. Rethinking Attention with Performers. In ICLR."},{"key":"e_1_3_2_1_8_1","unstructured":"MMAction2 Contributors. 2020. OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark. https:\/\/github.com\/open-mmlab\/mmaction2."},{"volume-title":"ImageNet: A large-scale hierarchical image database","author":"Deng Jia","key":"e_1_3_2_1_9_1","unstructured":"Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. 2009. ImageNet: A large-scale hierarchical image database. In CVPR. IEEE Computer Society, 248--255."},{"key":"e_1_3_2_1_10_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly Jakob Uszkoreit and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In ICLR."},{"key":"e_1_3_2_1_11_1","volume-title":"Bovik","author":"Ebenezer Joshua Peter","year":"2020","unstructured":"Joshua Peter Ebenezer, Zaixi Shang, Yongjun Wu, Hai Wei, and Alan C. Bovik. 2020. No-Reference Video Quality Assessment Using Space-Time Chips. In MMSP. IEEE, 1--6."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"crossref","unstructured":"Hossein Talebi Esfandarani and Peyman Milanfar. 2018. Learned perceptual image enhancement. In ICCP. 1--13.","DOI":"10.1109\/ICCPHOT.2018.8368474"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"crossref","unstructured":"Haoqi Fan Bo Xiong Karttikeya Mangalam Yanghao Li Zhicheng Yan Jitendra Malik and Christoph Feichtenhofer. 2021. Multiscale Vision Transformers. In ICCV. 6824--6835.","DOI":"10.1109\/ICCV48922.2021.00675"},{"volume-title":"Perceptual Quality Assessment of Smartphone Photography","author":"Fang Yuming","key":"e_1_3_2_1_14_1","unstructured":"Yuming Fang, Hanwei Zhu, Yan Zeng, Kede Ma, and Zhou Wang. 2020. Perceptual Quality Assessment of Smartphone Photography. In CVPR. Computer Vision Foundation \/ IEEE, 3674--3683."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Christoph Feichtenhofer Haoqi Fan Jitendra Malik and Kaiming He. 2019. SlowFast Networks for Video Recognition. In ICCV. 6201--6210.","DOI":"10.1109\/ICCV.2019.00630"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2017.01.054"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2017.2768542"},{"volume-title":"No-Reference Image Quality Assessment with Reinforcement Recursive List-Wise Ranking","author":"Gu Jie","key":"e_1_3_2_1_18_1","unstructured":"Jie Gu, Gaofeng Meng, Cheng Da, Shiming Xiang, and Chunhong Pan. 2019. No-Reference Image Quality Assessment with Reinforcement Recursive List-Wise Ranking. In AAAI. AAAI Press, 8336--8343."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/1618452.1618490"},{"key":"e_1_3_2_1_20_1","volume-title":"Shujun Li, and Dietmar Saupe.","author":"Hosu Vlad","year":"2017","unstructured":"Vlad Hosu, Franz Hahn, Mohsen Jenadeleh, Hanhe Lin, Hui Men, Tam\u00e1s Szir\u00e1 nyi, Shujun Li, and Dietmar Saupe. 2017. The Konstanz natural video database (KoNViD-1k). In QoMEX. IEEE, 1--6."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"crossref","unstructured":"Justin Johnson Alexandre Alahi and Li Fei-Fei. 2016. Perceptual Losses for Real-Time Style Transfer and Super-Resolution. In ECCV. 694--711.","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1090\/conm\/026\/737400"},{"key":"e_1_3_2_1_23_1","volume-title":"Doermann","author":"Kang Le","year":"2014","unstructured":"Le Kang, Peng Ye, Yi Li, and David S. Doermann. 2014. Convolutional Neural Networks for No-Reference Image Quality Assessment. In CVPR. IEEE Computer Society, 1733--1740."},{"key":"e_1_3_2_1_24_1","volume-title":"Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman.","author":"Kay Will","year":"2017","unstructured":"Will Kay, Jo a o Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, Mustafa Suleyman, and Andrew Zisserman. 2017. The Kinetics Human Action Video Dataset. CoRR, Vol. abs\/1705.06950 (2017)."},{"key":"e_1_3_2_1_25_1","volume-title":"MUSIQ: Multi-scale Image Quality Transformer. In ICCV. 5128--5137.","author":"Ke Junjie","year":"2021","unstructured":"Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang. 2021. MUSIQ: Multi-scale Image Quality Transformer. In ICCV. 5128--5137."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","unstructured":"Jongyoo Kim and Sanghoon Lee. 2017. Deep Learning of Human Visual Sensitivity in Image Quality Assessment Framework. In CVPR. 1969--1977.","DOI":"10.1109\/CVPR.2017.213"},{"key":"e_1_3_2_1_27_1","first-page":"224","article-title":"Deep Video Quality Assessor: From Spatio-Temporal Visual Sensitivity to a Convolutional Neural Aggregation Network","volume":"11205","author":"Kim Woojae","year":"2018","unstructured":"Woojae Kim, Jongyoo Kim, Sewoong Ahn, Jinwoo Kim, and Sanghoon Lee. 2018. Deep Video Quality Assessor: From Spatio-Temporal Visual Sensitivity to a Convolutional Neural Aggregation Network. In ECCV, Vol. 11205. 224--241.","journal-title":"ECCV"},{"key":"e_1_3_2_1_28_1","volume-title":"Blindly Assess Quality of In-the-Wild Videos via Quality-aware Pre-training and Motion Perception. CoRR","author":"Li Bowen","year":"2021","unstructured":"Bowen Li, Weixia Zhang, Meng Tian, Guangtao Zhai, and Xianpei Wang. 2021b. Blindly Assess Quality of In-the-Wild Videos via Quality-aware Pre-training and Motion Perception. CoRR, Vol. abs\/2108.08505 (2021)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Dingquan Li Tingting Jiang and Ming Jiang. 2019b. Quality Assessment of In-the-Wild Videos. In ACM Multimedia. ACM 2351--2359.","DOI":"10.1145\/3343031.3351028"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-020-01408-w"},{"volume-title":"No-reference image quality assessment with deep convolutional neural networks","author":"Li Yuming","key":"e_1_3_2_1_31_1","unstructured":"Yuming Li, Lai-Man Po, Litong Feng, and Fang Yuan. 2016. No-reference image quality assessment with deep convolutional neural networks. In DSP. IEEE, 685--689."},{"volume-title":"ICIAR (1) (Lecture Notes in Computer Science","author":"Li Zhuoran","key":"e_1_3_2_1_32_1","unstructured":"Zhuoran Li, Zhengfang Duanmu, Wentao Liu, and Zhou Wang. 2019a. AVC, HEVC, VP9, AVS2 or AV1? - A Comparative Study of State-of-the-Art Video Encoders on 4K Videos. In ICIAR (1) (Lecture Notes in Computer Science, Vol. 11662). Springer, 162--173."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"crossref","unstructured":"Min Liu Guangtao Zhai Zhenyu Zhang Yuntao Sun Ke Gu and Xiaokang Yang. 2014. Blind image quality assessment for noise. In BMSB. 1--5.","DOI":"10.1109\/BMSB.2014.6873480"},{"key":"e_1_3_2_1_34_1","volume-title":"Quality assessment for real out-of-focus blurred images. JVCIR","author":"Liu Yutao","year":"2017","unstructured":"Yutao Liu, Ke Gu, Guangtao Zhai, Xianming Liu, Debin Zhao, and Wen Gao. 2017. Quality assessment for real out-of-focus blurred images. JVCIR (2017)."},{"key":"e_1_3_2_1_35_1","volume-title":"Video Swin Transformer. CoRR","author":"Liu Ze","year":"2021","unstructured":"Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. 2021. Video Swin Transformer. CoRR, Vol. abs\/2106.13230 (2021)."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"crossref","unstructured":"Pina Marziliano Fr\u00e9d\u00e9ric Dufaux Stefan Winkler and Touradj Ebrahimi. 2002. A no-reference perceptual blur metric. In ICIP. 57--60.","DOI":"10.1109\/ICIP.2002.1038902"},{"key":"e_1_3_2_1_37_1","first-page":"127","article-title":"Saliency-induced reduced-reference quality index for natural scene and screen content images","volume":"145","author":"Min Xiongkuo","year":"2018","unstructured":"Xiongkuo Min, Ke Gu, Guangtao Zhai, Menghan Hu, and Xiaokang Yang. 2018. Saliency-induced reduced-reference quality index for natural scene and screen content images. SP, Vol. 145 (2018), 127--136.","journal-title":"SP"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2214050"},{"key":"e_1_3_2_1_39_1","first-page":"289","article-title":"A Completely Blind Video Integrity Oracle","volume":"25","author":"Mittal Anish","year":"2016","unstructured":"Anish Mittal, Michele A. Saad, and Alan C. Bovik. 2016. A Completely Blind Video Integrity Oracle. IEEE TIP, Vol. 25, 1 (2016), 289--300.","journal-title":"IEEE TIP"},{"key":"e_1_3_2_1_40_1","first-page":"209","article-title":"Making a \"Completely Blind\" Image Quality Analyzer","volume":"20","author":"Mittal Anish","year":"2013","unstructured":"Anish Mittal, Rajiv Soundararajan, and Alan C. Bovik. 2013. Making a \"Completely Blind\" Image Quality Analyzer. IEEE SPL, Vol. 20, 3 (2013), 209--212.","journal-title":"IEEE SPL"},{"volume-title":"PyTorch: An Imperative Style","author":"Paszke Adam","key":"e_1_3_2_1_41_1","unstructured":"Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas K\u00f6pf, Edward Z. Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In NeurIPS. 8024--8035."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2014.2299154"},{"key":"e_1_3_2_1_43_1","first-page":"612","article-title":"Large-Scale Study of Perceptual Video Quality","volume":"28","author":"Sinno Zeina","year":"2019","unstructured":"Zeina Sinno. 2019. Large-Scale Study of Perceptual Video Quality. IEEE TIP, Vol. 28, 2 (2019), 612--627.","journal-title":"IEEE TIP"},{"key":"e_1_3_2_1_44_1","volume-title":"Wassim Hamidouche, and Hanene FZ Meftah.","author":"Telili Ahmed","year":"2022","unstructured":"Ahmed Telili, Sid Ahmed Fezza, Wassim Hamidouche, and Hanene FZ Meftah. 2022. 2BiVQA: Double Bi-LSTM based Video Quality Assessment of UGC Videos. arXiv preprint arXiv:2208.14774 (2022)."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/2812802"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"crossref","unstructured":"Du Tran Heng Wang Matt Feiszli and Lorenzo Torresani. 2019. Video Classification With Channel-Separated Convolutional Networks. In ICCV.","DOI":"10.1109\/ICCV.2019.00565"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"crossref","unstructured":"Du Tran Heng Wang Lorenzo Torresani Jamie Ray Yann LeCun and Manohar Paluri. 2018. A Closer Look at Spatiotemporal Convolutions for Action Recognition. In CVPR. 6450--6459.","DOI":"10.1109\/CVPR.2018.00675"},{"key":"e_1_3_2_1_48_1","first-page":"4449","article-title":"UGC-VQA: Benchmarking Blind Video Quality Assessment for User Generated Content","volume":"30","author":"Tu Zhengzhong","year":"2021","unstructured":"Zhengzhong Tu, Yilin Wang, Neil Birkbeck, Balu Adsumilli, and Alan C. Bovik. 2021a. UGC-VQA: Benchmarking Blind Video Quality Assessment for User Generated Content. IEEE TIP, Vol. 30 (2021), 4449--4464.","journal-title":"IEEE TIP"},{"key":"e_1_3_2_1_49_1","volume-title":"Bovik","author":"Tu Zhengzhong","year":"2021","unstructured":"Zhengzhong Tu, Xiangxu Yu, Yilin Wang, Neil Birkbeck, Balu Adsumilli, and Alan C. Bovik. 2021b. RAPIQUE: Rapid and Accurate Video Quality Prediction of User Generated Content. CoRR, Vol. abs\/2101.10955 (2021)."},{"key":"e_1_3_2_1_50_1","volume-title":"Linformer: Self-Attention with Linear Complexity. CoRR","author":"Wang Sinong","year":"2020","unstructured":"Sinong Wang, Belinda Z. Li, Madian Khabsa, Han Fang, and Hao Ma. 2020. Linformer: Self-Attention with Linear Complexity. CoRR, Vol. abs\/2006.04768 (2020)."},{"key":"e_1_3_2_1_51_1","volume-title":"Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions. CoRR","author":"Wang Wenhai","year":"2021","unstructured":"Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and Ling Shao. 2021b. Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions. CoRR, Vol. abs\/2102.12122 (2021)."},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"crossref","unstructured":"Xiaolong Wang Ross B. Girshick Abhinav Gupta and Kaiming He. 2018. Non-Local Neural Networks. In CVPR. 7794--7803.","DOI":"10.1109\/CVPR.2018.00813"},{"volume-title":"YouTube UGC Dataset for Video Compression Research","author":"Wang Yilin","key":"e_1_3_2_1_53_1","unstructured":"Yilin Wang and Balu Adsumilli. 2019. YouTube UGC Dataset for Video Compression Research. In MMSP. IEEE, 1--5."},{"key":"e_1_3_2_1_54_1","volume-title":"Neil Birkbeck, Balu Adsumilli, Peyman Milanfar, and Feng Yang.","author":"Wang Yilin","year":"2021","unstructured":"Yilin Wang, Junjie Ke, Hossein Talebi, Joong Gon Yim, Neil Birkbeck, Balu Adsumilli, Peyman Milanfar, and Feng Yang. 2021a. Rich Features for Perceptual Quality Assessment of UGC Videos. In CVPR. 13435--13444."},{"key":"e_1_3_2_1_55_1","volume-title":"Yu","author":"Wang Yunbo","year":"2017","unstructured":"Yunbo Wang, Mingsheng Long, Jianmin Wang, and Philip S. Yu. 2017. Spatiotemporal Pyramid Network for Video Action Recognition. In CVPR. IEEE Computer Society, 2097--2106."},{"key":"e_1_3_2_1_56_1","volume-title":"Evans","author":"Wang Zhou","year":"2000","unstructured":"Zhou Wang, Alan C. Bovik, and Brian L. Evans. 2000. Blind Measurement of Blocking Artifacts in Images. In ICIP. IEEE, 981--984."},{"key":"e_1_3_2_1_57_1","volume-title":"DisCoVQA: Temporal Distortion-Content Transformers for Video Quality Assessment. CoRR","author":"Wu Haoning","year":"2022","unstructured":"Haoning Wu, Chaofeng Chen, Liang Liao, Jingwen Hou, Wenxiu Sun, Qiong Yan, and Weisi Lin. 2022. DisCoVQA: Temporal Distortion-Content Transformers for Video Quality Assessment. CoRR, Vol. abs\/2206.09853 (2022)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"crossref","unstructured":"Saining Xie Chen Sun Jonathan Huang Zhuowen Tu and Kevin Murphy. 2018. Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification. In ECCV. 318--335.","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"e_1_3_2_1_59_1","volume-title":"StarVQA: Space-Time Attention for Video Quality Assessment. CoRR","author":"Xing Fengchuang","year":"2021","unstructured":"Fengchuang Xing, Yuan-Gen Wang, Hanpin Wang, Leida Li, and Guopu Zhu. 2021. StarVQA: Space-Time Attention for Video Quality Assessment. CoRR, Vol. abs\/2108.09635 (2021)."},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"crossref","unstructured":"Jiahua Xu Jing Li Xingguang Zhou Wei Zhou Baichao Wang and Zhibo Chen. 2021. Perceptual Quality Assessment of Internet Videos. In ACM Multimedia. ACM 1248--1257.","DOI":"10.1145\/3474085.3475486"},{"key":"e_1_3_2_1_61_1","volume-title":"Video Representation Learning with Visual Tempo Consistency. CoRR","author":"Yang Ceyuan","year":"2020","unstructured":"Ceyuan Yang, Yinghao Xu, Bo Dai, and Bolei Zhou. 2020. Video Representation Learning with Visual Tempo Consistency. CoRR, Vol. abs\/2006.15489 (2020)."},{"key":"e_1_3_2_1_62_1","volume-title":"Doermann","author":"Ye Peng","year":"2012","unstructured":"Peng Ye, Jayant Kumar, Le Kang, and David S. Doermann. 2012. Unsupervised feature learning framework for no-reference image quality assessment. In CVPR. IEEE Computer Society, 1098--1105."},{"key":"e_1_3_2_1_63_1","volume-title":"Bovik","author":"Ying Zhenqiang","year":"2021","unstructured":"Zhenqiang Ying, Maniratnam Mandal, Deepti Ghadiyaram, and Alan C. Bovik. 2021. Patch-VQ: 'Patching Up' the Video Quality Problem. In CVPR. Computer Vision Foundation \/ IEEE, 14019--14029."},{"volume-title":"Deep Neural Networks for No-Reference Video Quality Assessment","author":"You Junyong","key":"e_1_3_2_1_64_1","unstructured":"Junyong You and Jari Korhonen. 2019. Deep Neural Networks for No-Reference Video Quality Assessment. In ICIP. IEEE, 2349--2353."},{"volume-title":"Incorporating Convolution Designs Into Visual Transformers","author":"Yuan Kun","key":"e_1_3_2_1_65_1","unstructured":"Kun Yuan, Shaopeng Guo, Ziwei Liu, Aojun Zhou, Fengwei Yu, and Wei Wu. 2021. Incorporating Convolution Designs Into Visual Transformers. In ICCV. IEEE, 579--588."},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"crossref","unstructured":"Richard Zhang Phillip Isola Alexei A. Efros Eli Shechtman and Oliver Wang. 2018. The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In CVPR. 586--595.","DOI":"10.1109\/CVPR.2018.00068"},{"key":"e_1_3_2_1_67_1","first-page":"2244","article-title":"Blind Video Quality Assessment With Weakly Supervised Learning and Resampling Strategy","volume":"29","author":"Zhang Yu","year":"2019","unstructured":"Yu Zhang, Xinbo Gao, Lihuo He, Wen Lu, and Ran He. 2019. Blind Video Quality Assessment With Weakly Supervised Learning and Resampling Strategy. IEEE TCSVT, Vol. 29, 8 (2019), 2244--2255.","journal-title":"IEEE TCSVT"},{"volume-title":"2023 b. Quality-Aware Pre-Trained Models for Blind Image Quality Assessment","author":"Zhao Kai","key":"e_1_3_2_1_68_1","unstructured":"Kai Zhao, Kun Yuan, Ming Sun, Mading Li, and Xing Wen. 2023 b. Quality-Aware Pre-Trained Models for Blind Image Quality Assessment. In CVPR. IEEE Computer Society, 22302--22313."},{"key":"e_1_3_2_1_69_1","volume-title":"Frames and Clips Integration for Video Quality Assessment. In CVPR Workshops. IEEE Computer Society, 1302--1310","author":"Zhao Kai","year":"2023","unstructured":"Kai Zhao, Kun Yuan, Ming Sun, and Xing Wen. 2023 a. Zoom-VQA: Patches, Frames and Clips Integration for Video Quality Assessment. In CVPR Workshops. IEEE Computer Society, 1302--1310."},{"key":"e_1_3_2_1_70_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijleo.2014.07.010"}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Ottawa ON Canada","acronym":"MM '23"},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612023","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3612023","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T00:04:13Z","timestamp":1755821053000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3612023"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":70,"alternative-id":["10.1145\/3581783.3612023","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3612023","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}