{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T16:07:01Z","timestamp":1771344421945,"version":"3.50.1"},"reference-count":87,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T00:00:00Z","timestamp":1768953600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T00:00:00Z","timestamp":1768953600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62461028"],"award-info":[{"award-number":["62461028"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s11263-025-02626-w","type":"journal-article","created":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T10:45:42Z","timestamp":1768992342000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Blind Omnidirectional Image Quality Assessment: Embracing the Magic Power of Multimodal Large Language Models"],"prefix":"10.1007","volume":"134","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0337-6877","authenticated-orcid":false,"given":"Jiebin","family":"Yan","sequence":"first","affiliation":[]},{"given":"Jiayu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Junjie","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Pengfei","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Xuelin","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ziwen","family":"Tan","sequence":"additional","affiliation":[]},{"given":"Yuming","family":"Fang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,21]]},"reference":[{"key":"2626_CR1","unstructured":"Bavishi, R., Elsen, E., Hawthorne, C., Nye, M., Odena, A., Somani, A., & Ta\u015f\u0131rlar, S. (2023). Introducing our multimodal models. Available: https:\/\/www.adept.ai\/blog\/fuyu-8b."},{"key":"2626_CR2","doi-asserted-by":"crossref","unstructured":"Chen, C., Yang, S., Wu, H., Liao, L., Zhang, Z., Wang, A., Sun, W., Yan, Q., & Lin, W. (2024). Q-Ground: Image quality grounding with large multi-modality models. In ACM International Conference on Multimedia (pp. 486\u2013495).","DOI":"10.1145\/3664647.3680575"},{"key":"2626_CR3","doi-asserted-by":"crossref","unstructured":"Chen, S., Zhang, Y., Li, Y., Chen, Z., & Wang, Z. (2018). Spherical structural similarity index for objective omnidirectional video quality assessment. In IEEE International Conference on Multimedia and Expo (pp. 1\u20136).","DOI":"10.1109\/ICME.2018.8486584"},{"key":"2626_CR4","unstructured":"Chen, T., Kornblith, S., Norouzi, M., & Hinton, G. (2020). A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning (pp. 1597\u20131607)."},{"key":"2626_CR5","doi-asserted-by":"crossref","unstructured":"Dai, J., Qi, H., Xiong, Y., Li, Y., Zhang, G., Hu, H., & Wei, Y. (2017). Deformable convolutional networks. In IEEE International Conference on Computer Vision (pp. 764\u2013773).","DOI":"10.1109\/ICCV.2017.89"},{"key":"2626_CR6","first-page":"49250","volume":"36","author":"W Dai","year":"2023","unstructured":"Dai, W., Li, J., Li, D., Tiong, A. M. H., Zhao, J., Wang, W., Li, B., Fung, P., & Hoi, S. (2023). InstructBLIP: Towards general-purpose vision-language models with instruction tuning. Advances in Neural Information Processing Systems, 36, 49250\u201349267. arXiv:2305.06500.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2626_CR7","unstructured":"Deitke, M., Clark, C., Lee, S., Tripathi, R., Yang, Y., Park, J. S., Salehi, M., Muennighoff, N., Lo, K., Soldaini, L., & Lu, J. (2024). Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv:2409.17146 Available: https:\/\/huggingface.co\/allenai\/MolmoE-1B-0924."},{"key":"2626_CR8","first-page":"4171","volume":"1","author":"J Devlin","year":"2019","unstructured":"Devlin, J., Chang, M. W., Lee, K., & Toutanova, K. (2019). Bert: Pre-training of deep bidirectional transformers for language understanding. North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 1, 4171\u20134186.","journal-title":"North American Chapter of the Association for Computational Linguistics: Human Language Technologies"},{"issue":"4","key":"2626_CR9","doi-asserted-by":"publisher","first-page":"1258","DOI":"10.1007\/s11263-020-01419-7","volume":"129","author":"K Ding","year":"2021","unstructured":"Ding, K., Ma, K., Wang, S., & Simoncelli, E. P. (2021). Comparison of full-reference image quality models for optimization of image processing systems. International Journal of Computer Vision, 129(4), 1258\u20131281.","journal-title":"International Journal of Computer Vision"},{"key":"2626_CR10","doi-asserted-by":"crossref","unstructured":"Dong, Y., Liu, X., Gao, Y., Zhou, X., Tan, T., & Zhai, G. (2023). Light-VQA: A multi-dimensional quality assessment model for low-light video enhancement. In ACM international Conference on Multimedia (pp. 1088\u20131097).","DOI":"10.1145\/3581783.3611923"},{"key":"2626_CR11","unstructured":"Face, H. (2023). Introducing idefics: An open reproduction of state-of-the-art visual language model. Available: https:\/\/huggingface.co\/blog\/idefics."},{"key":"2626_CR12","doi-asserted-by":"publisher","first-page":"2526","DOI":"10.1109\/TIP.2021.3053465","volume":"30","author":"Y Fang","year":"2021","unstructured":"Fang, Y., Zeng, Y., Jiang, W., Zhu, H., & Yan, J. (2021). Superpixel-based quality assessment of multi-exposure image fusion for both static and dynamic scenes. IEEE Transactions on Image Processing, 30, 2526\u20132537.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2626_CR13","doi-asserted-by":"publisher","first-page":"580","DOI":"10.1609\/aaai.v36i1.19937","volume":"36","author":"Y Fang","year":"2022","unstructured":"Fang, Y., Huang, L., Yan, J., Liu, X., & Liu, Y. (2022). Perceptual quality assessment of omnidirectional images. AAAI Conference on Artificial Intelligence, 36, 580\u2013588.","journal-title":"AAAI Conference on Artificial Intelligence"},{"key":"2626_CR14","doi-asserted-by":"crossref","unstructured":"Fu, J., Hou, C., Zhou, W., Xu, J., & Chen, Z. (2022). Adaptive hypergraph convolutional network for no-reference 360-degree image quality assessment. In ACM International Conference on Multimedia (pp. 961\u2013969).","DOI":"10.1145\/3503161.3548337"},{"key":"2626_CR15","unstructured":"Google. (2023). Gemini pro Available: https:\/\/deepmind.google\/technologies\/gemini."},{"key":"2626_CR16","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"2626_CR17","unstructured":"Jia, C., Yang, Y., Xia, Y., Chen, Y. T., Parekh, Z., Pham, H., Le, Q., Sung, Y. H., Li, Z., & Duerig, T. (2021). Scaling up visual and vision-language representation learning with noisy text supervision. In International Conference on Machine Learning (pp. 4904\u20134916)."},{"key":"2626_CR18","doi-asserted-by":"publisher","first-page":"2364","DOI":"10.1109\/TIP.2021.3052073","volume":"30","author":"H Jiang","year":"2021","unstructured":"Jiang, H., Jiang, G., Yu, M., Zhang, Y., Yang, Y., Peng, Z., Chen, F., & Zhang, Q. (2021). Cubemap-based perception-driven blind quality assessment for 360-degree images. IEEE Transactions on Image Processing, 30, 2364\u20132377.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2626_CR19","doi-asserted-by":"crossref","unstructured":"Ke, J., Ye, K., Yu, J., Wu, Y., Milanfar, P., & Yang, F. (2023). Vila: Learning image aesthetics from user comments with vision-language pretraining. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 10041\u201310051).","DOI":"10.1109\/CVPR52729.2023.00968"},{"issue":"4","key":"2626_CR20","doi-asserted-by":"publisher","first-page":"917","DOI":"10.1109\/TCSVT.2019.2898732","volume":"30","author":"HG Kim","year":"2019","unstructured":"Kim, H. G., Lim, H. T., & Ro, Y. M. (2019). Deep virtual reality image quality assessment with human perception guider for omnidirectional image. IEEE Transactions on Circuits and Systems for Video Technology, 30(4), 917\u2013928.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2626_CR21","unstructured":"Kwon, D., Kim, D., Ki, S., Jo, Y., Lee, H. E., & Kim, S. J. (2024). CLIP-Guided attribute aware pretraining for generalizable image quality assessment. arXiv:2406.01020."},{"key":"2626_CR22","unstructured":"Li, B., Zhang, K., Zhang, H., Guo, D., Zhang, R., Li, F., Zhang, Y., Liu, Z., & Li, C. (2024a). LLaVA-NeXT: Stronger llms supercharge multimodal capabilities in the wild. Available: https:\/\/llava-vl.github.io\/blog\/2024-05-10-llava-next-stronger-llms\/."},{"key":"2626_CR23","unstructured":"Li, B., Zhang, Y., Guo, D., Zhang, R., Li, F., Zhang, H., Zhang, K., Li, Y., Liu, Z., & Li, C. (2024b). LLaVA-OneVision: Easy visual task transfer. Available: https:\/\/huggingface.co\/llava-hf\/llava-onevision-qwen2-7b-ov-hf, arXiv:2408.03326."},{"key":"2626_CR24","unstructured":"Li, B., Zhang, Y., Guo, D., Zhang, R., Li, F., Zhang, H., Zhang, K., Li, Y., Liu, Z., & Li, C. (2024c). LLaVA-OneVision: Easy visual task transfer. Available: https:\/\/huggingface.co\/llava-hf\/llava-onevision-qwen2-0.5b-si-hf, arXiv:2408.03326."},{"key":"2626_CR25","doi-asserted-by":"crossref","unstructured":"Li, D., Jiang, T., & Jiang, M. (2019). Quality assessment of in-the-wild videos. In ACM International Conference on Multimedia (pp. 2351\u20132359).","DOI":"10.1145\/3343031.3351028"},{"key":"2626_CR26","unstructured":"Li, F., Zhang, R., Zhang, H., Zhang, Y., Li, B., Li, W., Ma, Z., & Li, C. (2024d). LLaVA-NeXT-Interleave: Tackling multi-image, video, and 3D in large multimodal models. Available: https:\/\/huggingface.co\/llava-hf\/llava-interleave-qwen-0.5b-hf, arXiv:2407.07895."},{"key":"2626_CR27","unstructured":"Li, F., Zhang, R., Zhang, H., Zhang, Y., Li, B., Li, W., Ma, Z., & Li, C. (2024e). Llava-next-interleave: Tackling multi-image, video, and 3D in large multimodal models. Available: https:\/\/huggingface.co\/llava-hf\/llava-interleave-qwen-7b-hf, arXiv:2407.07895."},{"key":"2626_CR28","unstructured":"Li, J., Li, D., Xiong, C., & Hoi, S. (2022). BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning (pp. 12888\u201312900)."},{"issue":"4","key":"2626_CR29","doi-asserted-by":"publisher","first-page":"851","DOI":"10.1109\/TPAMI.2018.2889948","volume":"42","author":"K Ma","year":"2018","unstructured":"Ma, K., Duanmu, Z., Wang, Z., Wu, Q., Liu, W., Yong, H., Li, H., & Zhang, L. (2018). Group maximum differentiation competition: Model comparison with few samples. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(4), 851\u2013864.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2626_CR30","doi-asserted-by":"publisher","first-page":"4149","DOI":"10.1109\/TIP.2022.3181496","volume":"31","author":"PC Madhusudana","year":"2022","unstructured":"Madhusudana, P. C., Birkbeck, N., Wang, Y., Adsumilli, B., & Bovik, A. C. (2022). Image quality assessment using contrastive learning. IEEE Transactions on Image Processing, 31, 4149\u20134161.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"3","key":"2626_CR31","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1109\/LSP.2012.2227726","volume":"20","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Soundararajan, R., & Bovik, A. C. (2012). Making a \u201ccompletely blind\u2019\u2019 image quality analyzer. IEEE Signal Processing Letters, 20(3), 209\u2013212.","journal-title":"IEEE Signal Processing Letters"},{"key":"2626_CR32","unstructured":"OpenAI. (2023). GPT-4 technical report. Tech. rep., OpenAI."},{"key":"2626_CR33","doi-asserted-by":"publisher","first-page":"1613","DOI":"10.1109\/TIP.2022.3144892","volume":"31","author":"Z Pan","year":"2022","unstructured":"Pan, Z., Yuan, F., Lei, J., Fang, Y., Shao, X., & Kwong, S. (2022). VCRNet: Visual compensation restoration network for no-reference image quality assessment. IEEE Transactions on Image Processing, 31, 1613\u20131627.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2626_CR34","unstructured":"Peng, Z., Wang, W., Dong, L., Hao, Y., Huang, S., Ma, S., & Wei, F. (2023). Kosmos-2: Grounding multimodal large language models to the world. ArXiv abs\/2306, available: https:\/\/huggingface.co\/microsoft\/kosmos-2-patch14-224."},{"issue":"8","key":"2626_CR35","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. (2019). Language models are unsupervised multitask learners. OpenAI blog, 1(8), 9.","journal-title":"OpenAI blog"},{"key":"2626_CR36","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., & Krueger, G. (2021). Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (pp. 8748\u20138763)."},{"key":"2626_CR37","unstructured":"Ramesh, A., Pavlov, M., Goh, G., Gray, S., Voss, C., Radford, A., Chen, M., & Sutskever, I. (2021). Zero-shot text-to-image generation. In International Conference on Machine Learning (pp. 8821\u20138831)."},{"key":"2626_CR38","doi-asserted-by":"crossref","unstructured":"Shen, A., Salehi, B., Baldwin, T., & Qi, J. (2019). A joint model for multimodal document quality assessment. In 2019 ACM\/IEEE Joint Conference on Digital Libraries (pp. 107\u2013110).","DOI":"10.1109\/JCDL.2019.00024"},{"key":"2626_CR39","doi-asserted-by":"crossref","unstructured":"Su, S., Yan, Q., Zhu, Y., Zhang, C., Ge, X., Sun, J., & Zhang, Y. (2020). Blindly assess image quality in the wild guided by a self-adaptive hyper network. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 3667\u20133676).","DOI":"10.1109\/CVPR42600.2020.00372"},{"key":"2626_CR40","unstructured":"Sun, Q., Fang, Y., Wu, L., Wang, X., & Cao, Y. (2023). EVA-CLIP: Improved training techniques for CLIP at scale. arXiv:2303.15389."},{"issue":"1","key":"2626_CR41","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1109\/JSTSP.2019.2955024","volume":"14","author":"W Sun","year":"2019","unstructured":"Sun, W., Min, X., Zhai, G., Gu, K., Duan, H., & Ma, S. (2019). MC360IQA: A multi-channel CNN for blind 360-degree image quality assessment. IEEE Journal of Selected Topics in Signal Processing, 14(1), 64\u201377.","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"issue":"9","key":"2626_CR42","first-page":"1408","volume":"24","author":"Y Sun","year":"2017","unstructured":"Sun, Y., Lu, A., & Yu, L. (2017). Weighted-to-spherically-uniform quality evaluation for omnidirectional video. IEEE Signal Processing Letters, 24(9), 1408\u20131412.","journal-title":"IEEE Signal Processing Letters"},{"key":"2626_CR43","volume":"85","author":"L Tang","year":"2020","unstructured":"Tang, L., Tian, C., Li, L., Hu, B., Yu, W., & Xu, K. (2020). Perceptual quality assessment for multimodal medical image fusion. Signal Processing: Image Communication, 85, Article 115852.","journal-title":"Signal Processing: Image Communication"},{"key":"2626_CR44","doi-asserted-by":"crossref","unstructured":"Tang, Z., Wang, Z., Peng, B., & Dong, J. (2025). CLIP-AGIQA: Boosting the performance of AI-generated image quality assessment with CLIP. In IEEE International Conference on Pattern Recognition (pp. 48\u201361).","DOI":"10.1007\/978-3-031-78125-4_4"},{"key":"2626_CR45","unstructured":"VQEG. (Techical report 2000). Final report from the video quality experts group on the validation of objective models of video quality assessment."},{"key":"2626_CR46","doi-asserted-by":"publisher","first-page":"2555","DOI":"10.1609\/aaai.v37i2.25353","volume":"37","author":"J Wang","year":"2023","unstructured":"Wang, J., Chan, K. C., & Loy, C. C. (2023). Exploring CLIP for assessing the look and feel of images. AAAI Conference on Artificial Intelligence, 37, 2555\u20132563.","journal-title":"AAAI Conference on Artificial Intelligence"},{"key":"2626_CR47","unstructured":"Wang, J., Duan, H., Zhai, G., & Min, X. (2024a). Quality assessment for AI generated images with instruction tuning. arXiv:2405.07346."},{"key":"2626_CR48","unstructured":"Wang, P., Bai, S., Tan, S., Wang, S., Fan, Z., Bai, J., Chen, K., Liu, X., Wang, J., Ge, W., Fan, Y., Dang, K., Du, M., Ren, X., Men, R., Liu, D., Zhou, C., Zhou, J., & Lin, J. (2024b). Qwen2-VL: Enhancing vision-language model\u2019s perception of the world at any resolution. arXiv:2409.12191 Available: https:\/\/huggingface.co\/Qwen\/Qwen2-VL-7B-Instruct."},{"key":"2626_CR49","unstructured":"Wang, P., Bai, S., Tan, S., Wang, S., Fan, Z., Bai, J., Chen, K., Liu, X., Wang, J., Ge, W., Fan, Y., Dang, K., Du, M., Ren, X., Men, R., Liu, D., Zhou, C., Zhou, J., & Lin, J. (2024c). Qwen2-VL: Enhancing vision-language model\u2019s perception of the world at any resolution. arXiv:2409.12191 Available: https:\/\/huggingface.co\/Cylingo\/Xinyuan-VL-2B."},{"key":"2626_CR50","doi-asserted-by":"crossref","unstructured":"Wang, P., Sun, W., Zhang, Z., Jia, J., Jiang, Y., Zhang, Z., Min, X., & Zhai, G. (2024d). Large multi-modality model assisted AI-generated image quality assessment. In ACM International Conference on Multimedia (pp. 7803\u20137812).","DOI":"10.1145\/3664647.3681471"},{"key":"2626_CR51","doi-asserted-by":"crossref","unstructured":"Wang, Z., & Rehman, A. (2017). Begin with the end in mind: A unified end-to-end quality-of-experience monitoring, optimization and management framework. In SMPTE Motion Imaging Journal (pp. 1\u201311).","DOI":"10.5594\/M001774"},{"issue":"12","key":"2626_CR52","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1167\/8.12.8","volume":"8","author":"Z Wang","year":"2008","unstructured":"Wang, Z., & Simoncelli, E. P. (2008). Maximum differentiation (mad) competition: A methodology for comparing computational models of perceptual quantities. Journal of Vision, 8(12), 8\u20138.","journal-title":"Journal of Vision"},{"key":"2626_CR53","doi-asserted-by":"crossref","unstructured":"Wu, H., Chen, C., Hou, J., Liao, L., Wang, A., Sun, W., Yan, Q., & Lin, W. (2022). FAST-VQA: Efficient end-to-end video quality assessment with fragment sampling. In European Conference on Computer Vision (pp. 538\u2013554).","DOI":"10.1007\/978-3-031-20068-7_31"},{"key":"2626_CR54","unstructured":"Wu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Wang, A., Li, C., Sun, W., Yan, Q., Zhai, G., & Lin, W. (2024a). Q-Bench: A benchmark for general-purpose foundation models on low-level vision. In International Conference on Learning Representations."},{"key":"2626_CR55","doi-asserted-by":"crossref","unstructured":"Wu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Wang, A., Xu, K., Li, C., Hou, J., Zhai, G., & Xue, G. (2024b). Q-Instruct: Improving low-level visual abilities for multi-modality foundation models. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 25490\u201325500).","DOI":"10.1109\/CVPR52733.2024.02408"},{"key":"2626_CR56","doi-asserted-by":"crossref","unstructured":"Wu, H., Zhu, H., Zhang, Z., Zhang, E., Chen, C., Liao, L., Li, C., Wang, A., Sun, W., Yan, Q., & Liu, X. (2024c). Towards open-ended visual quality comparison. In European Conference on Computer Vision (pp. 360\u2013377).","DOI":"10.1007\/978-3-031-72646-0_21"},{"key":"2626_CR57","first-page":"64957","volume":"36","author":"T Wu","year":"2023","unstructured":"Wu, T., Shi, S., Cai, H., Cao, M., Xiao, J., Zheng, Y., & Yang, Y. (2023). Assessor360: Multi-sequence network for blind omnidirectional image quality assessment. Advances in Neural Information Processing Systems, 36, 64957\u201364970.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2626_CR58","doi-asserted-by":"crossref","unstructured":"Wu, T., Ma, K., Liang, J., Yang, Y., & Zhang, L. (2024d). A comprehensive study of multimodal large language models for image quality assessment. In European Conference on Computer Vision (pp. 143\u2013160).","DOI":"10.1007\/978-3-031-72904-1_9"},{"issue":"1","key":"2626_CR59","doi-asserted-by":"publisher","first-page":"291","DOI":"10.1109\/TBC.2024.3511927","volume":"71","author":"F Xing","year":"2024","unstructured":"Xing, F., Li, M., Wang, Y. G., Zhu, G., & Cao, X. (2024). CLIPVQA: Video quality assessment via CLIP. IEEE Transactions on Broadcasting, 71(1), 291\u2013306.","journal-title":"IEEE Transactions on Broadcasting"},{"issue":"5","key":"2626_CR60","doi-asserted-by":"publisher","first-page":"1724","DOI":"10.1109\/TCSVT.2020.3015186","volume":"31","author":"J Xu","year":"2020","unstructured":"Xu, J., Zhou, W., & Chen, Z. (2020). Blind omnidirectional image quality assessment with viewport oriented graph convolutional networks. IEEE Transactions on Circuits and Systems for Video Technology, 31(5), 1724\u20131737.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2626_CR61","first-page":"15903","volume":"36","author":"J Xu","year":"2024","unstructured":"Xu, J., Liu, X., Wu, Y., Tong, Y., Li, Q., Ding, M., Tang, J., & Dong, Y. (2024). Imagereward: Learning and evaluating human preferences for text-to-image generation. Nerual Information Processing Systems, 36, 15903\u201315935.","journal-title":"Nerual Information Processing Systems"},{"key":"2626_CR62","doi-asserted-by":"publisher","first-page":"1768","DOI":"10.1007\/s11263-021-01450-2","volume":"129","author":"J Yan","year":"2021","unstructured":"Yan, J., Zhong, Y., Fang, Y., Wang, Z., & Ma, K. (2021). Exposing semantic segmentation failures via maximum discrepancy competition. International Journal of Computer Vision, 129, 1768\u20131786.","journal-title":"International Journal of Computer Vision"},{"issue":"05","key":"2626_CR63","doi-asserted-by":"publisher","first-page":"1430","DOI":"10.11834\/jig.210790","volume":"27","author":"J Yan","year":"2022","unstructured":"Yan, J., Fang, Y., & Liu, X. (2022). The review of distortion-related image quality assessment. Journal of Image and Graphics, 27(05), 1430\u20131466.","journal-title":"Journal of Image and Graphics"},{"issue":"10","key":"2626_CR64","first-page":"2196","volume":"46","author":"J Yan","year":"2023","unstructured":"Yan, J., Fang, Y., Yao, Y., & Sui, X. (2023). A survey on recent advanced in video quality assessment. Chinese Journal of Computers, 46(10), 2196\u20132224.","journal-title":"Chinese Journal of Computers"},{"issue":"3","key":"2626_CR65","doi-asserted-by":"publisher","first-page":"2782","DOI":"10.1109\/TCSVT.2024.3497994","volume":"35","author":"J Yan","year":"2024","unstructured":"Yan, J., Rao, J., Chen, J., Tan, Z., Liu, W., & Fang, Y. (2024). Multitask auxiliary network for perceptual quality assessment of non-uniformly distorted omnidirectional images. IEEE Transactions on Circuits and Systems for Video Technology, 35(3), 2782\u20132793.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"issue":"12","key":"2626_CR66","doi-asserted-by":"publisher","first-page":"3699","DOI":"10.11834\/jig.240188","volume":"29","author":"J Yan","year":"2024","unstructured":"Yan, J., Tan, Z., Wu, K., Liu, X., & Yuming, F. (2024). Viewport-independent and deformation-unaware no-reference omnidirectional image quality assessment. Journal of Image and Graphics, 29(12), 3699\u20133711.","journal-title":"Journal of Image and Graphics"},{"key":"2626_CR67","doi-asserted-by":"publisher","first-page":"2695","DOI":"10.1109\/TMM.2025.3535372","volume":"27","author":"J Yan","year":"2025","unstructured":"Yan, J., Rao, J., Liu, X., Fang, Y., Zuo, Y., & Liu, W. (2025). Subjective and objective quality assessment of non-uniformly distorted omnidirectional images. IEEE Transactions on Multimedia, 27, 2695\u20132707.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2626_CR68","doi-asserted-by":"publisher","first-page":"1326","DOI":"10.1109\/TIP.2025.3539468","volume":"34","author":"J Yan","year":"2025","unstructured":"Yan, J., Tan, Z., Fang, Y., Chen, J., Jiang, W., & Wang, Z. (2025). Omnidirectional image quality captioning: A large-scale database and a new model. IEEE Transactions on Image Processing, 34, 1326\u20131339.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"5","key":"2626_CR69","first-page":"1","volume":"21","author":"J Yan","year":"2025","unstructured":"Yan, J., Wu, K., Chen, J., Tan, Z., Fang, Y., & Liu, W. (2025). Viewport-unaware blind omnidirectional image quality assessment: A flexible and effective paradigm. ACM Transactions on Multimedia Computing, Communications and Applications, 21(5), 1\u201319.","journal-title":"ACM Transactions on Multimedia Computing, Communications and Applications"},{"key":"2626_CR70","doi-asserted-by":"crossref","unstructured":"Yang, L., Xu, M., Liu, T., Huo, L., & Gao, X. (2022a). Tvformer: Trajectory-guided visual quality assessment on 360$$^{\\circ }$$ images with transformers. In ACM International Conference on Multimedia (pp. 799\u2013808).","DOI":"10.1145\/3503161.3547748"},{"key":"2626_CR71","doi-asserted-by":"crossref","unstructured":"Yang, S., Wu, T., Shi, S., Lao, S., Gong, Y., Cao, M., Wang, J., & Yang, Y. (2022b). MANIQA: Multi-dimension attention network for no-reference image quality assessment. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 1191\u20131200).","DOI":"10.1109\/CVPRW56347.2022.00126"},{"key":"2626_CR72","doi-asserted-by":"crossref","unstructured":"Yi, R., Tian, H., Gu, Z., Lai, Y. K., & Rosin, P. L. (2023). Towards artistic image aesthetics assessment: a large-scale dataset and a new method. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 22388\u201322397).","DOI":"10.1109\/CVPR52729.2023.02144"},{"key":"2626_CR73","doi-asserted-by":"crossref","unstructured":"You, Z., Li, Z., Gu, J., Yin, Z., Xue, T., & Dong, C. (2024). Depicting beyond scores: Advancing image quality assessment through multi-modal language models. In European Conference on Computer Vision (pp. 259\u2013276).","DOI":"10.1007\/978-3-031-72970-6_15"},{"key":"2626_CR74","doi-asserted-by":"crossref","unstructured":"Yu, M., Lakshman, H., & Girod, B. (2015). A framework to evaluate omnidirectional video coding schemes. In 2015 IEEE International symposium on mixed and augmented reality (pp. 31\u201336).","DOI":"10.1109\/ISMAR.2015.12"},{"key":"2626_CR75","first-page":"57","volume":"9970","author":"V Zakharchenko","year":"2016","unstructured":"Zakharchenko, V., Choi, K. P., & Park, J. H. (2016). Quality metric for spherical panoramic video. Optics and Photonics for Information Processing X, 9970, 57\u201365.","journal-title":"Optics and Photonics for Information Processing X"},{"key":"2626_CR76","doi-asserted-by":"publisher","first-page":"1600","DOI":"10.1109\/TIP.2024.3362135","volume":"33","author":"LA Zeng","year":"2024","unstructured":"Zeng, L. A., & Zheng, W. S. (2024). Multimodal action quality assessment. IEEE Transactions on Image Processing, 33, 1600\u20131613.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2626_CR77","doi-asserted-by":"crossref","unstructured":"Zhang, B., Zhang, P., Dong, X., Zang, Y., & Wang, J. (2024). Long-CLIP: Unlocking the long-text capability of CLIP. In European Conference on Computer Vision (pp. 310\u2013325).","DOI":"10.1007\/978-3-031-72983-6_18"},{"issue":"1","key":"2626_CR78","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1109\/TCSVT.2018.2886771","volume":"30","author":"W Zhang","year":"2018","unstructured":"Zhang, W., Ma, K., Yan, J., Deng, D., & Wang, Z. (2018). Blind image quality assessment using a deep bilinear convolutional neural network. IEEE Transactions on Circuits and Systems for Video Technology, 30(1), 36\u201347.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2626_CR79","doi-asserted-by":"publisher","first-page":"3474","DOI":"10.1109\/TIP.2021.3061932","volume":"30","author":"W Zhang","year":"2021","unstructured":"Zhang, W., Ma, K., Zhai, G., & Yang, X. (2021). Uncertainty-aware blind image quality assessment in the laboratory and wild. IEEE Transactions on Image Processing, 30, 3474\u20133486.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2626_CR80","doi-asserted-by":"crossref","unstructured":"Zhang, W., Zhai, G., Wei, Y., Yang, X., & Ma, K. (2023). Blind image quality assessment via vision-language correspondence: A multitask learning perspective. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 14071\u201314081).","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"2626_CR81","unstructured":"Zhang, Z., Wu, H., Li, C., Zhou, Y., Sun, W., Min, X., Chen, Z., Liu, X., Lin, W., & Zhai, G. (2025). A-Bench: Are LMMs masters at evaluating AI-generated images? In International Conference on Learning Representations."},{"key":"2626_CR82","doi-asserted-by":"crossref","unstructured":"Zhao, K., Yuan, K., Sun, M., & Wen, X. (2023). Zoom-vqa: Patches, frames and CLIPs integration for video quality assessment. In IEEE International Conference on Computer Vision and Pattern Recognition (pp. 1302\u20131310).","DOI":"10.1109\/CVPRW59228.2023.00137"},{"key":"2626_CR83","unstructured":"Zhou, H., Tang, L., Yang, R., Qin, G., Zhang, Y., Hu, R., & Li, X. (2024). UniQA: Unified vision-language pre-training for image quality and aesthetic assessment. arXiv:2406.01069."},{"issue":"2","key":"2626_CR84","doi-asserted-by":"publisher","first-page":"396","DOI":"10.1109\/TBC.2022.3231101","volume":"69","author":"M Zhou","year":"2023","unstructured":"Zhou, M., Chen, L., Wei, X., Liao, X., Mao, Q., Wang, H., Pu, H., Luo, J., Xiang, T., & Fang, B. (2023). Perception-oriented u-shaped transformer network for 360-degree no-reference image quality assessment. IEEE Transactions on Broadcasting, 69(2), 396\u2013405.","journal-title":"IEEE Transactions on Broadcasting"},{"key":"2626_CR85","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Yu, M., Ma, H., Shao, H., & Jiang, G. (2018). Weighted-to-spherically-uniform ssim objective quality evaluation for panoramic video. In 2018 14th IEEE International Conference on Signal Processing (pp. 54\u201357).","DOI":"10.1109\/ICSP.2018.8652269"},{"key":"2626_CR86","doi-asserted-by":"crossref","unstructured":"Zhu, H., Shi, J., Shao, Z., Yao, R., Zhou, Y., Zhao, J., & Li, L. (2024a). Attribute-driven multimodal hierarchical prompts for image aesthetic quality assessment. In ACM International Conference on Multimedia (pp. 2399\u20132408).","DOI":"10.1145\/3664647.3681175"},{"issue":"12","key":"2626_CR87","doi-asserted-by":"publisher","first-page":"12873","DOI":"10.1109\/TCSVT.2024.3434999","volume":"34","author":"H Zhu","year":"2024","unstructured":"Zhu, H., Sui, X., Chen, B., Liu, X., Chen, P., Fang, Y., & Wang, S. (2024b). 2AFC prompting of large multimodal models for image quality assessment. IEEE Transactions on Circuits and Systems for Video Technology, 34(12), 12873\u201312878.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02626-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02626-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02626-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T15:20:28Z","timestamp":1771341628000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02626-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,21]]},"references-count":87,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2626"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02626-w","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,21]]},"assertion":[{"value":"3 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of Interest"}}],"article-number":"78"}}