{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:52:33Z","timestamp":1775069553485,"version":"3.50.1"},"reference-count":58,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T00:00:00Z","timestamp":1753142400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T00:00:00Z","timestamp":1753142400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s00138-025-01724-6","type":"journal-article","created":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T03:46:19Z","timestamp":1753155979000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Quality assessment of synthetic images via spatial distortion recognition"],"prefix":"10.1007","volume":"36","author":[{"given":"Tomoya","family":"Sawada","sequence":"first","affiliation":[]},{"given":"Marie","family":"Katsurai","sequence":"additional","affiliation":[]},{"given":"Masashi","family":"Okubo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,22]]},"reference":[{"key":"1724_CR1","unstructured":"Sch\u00f6nfeld, E., Sushko, V., Zhang, D., Gall, J., Schiele, B., Khoreva, A.: You only need adversarial supervision for semantic image synthesis. In: International Conference on Learning Representations (ICLR) (2021). https:\/\/openreview.net\/forum?id=yvQKLaqNE6M. Accessed 15 Mar 2024"},{"key":"1724_CR2","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"1724_CR3","unstructured":"Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E., Ghasemipour, S., Gontijo-Lopes, R., Ayan, B.K., Salimans, T., Ho, J., Fleet, D.J., Norouzi, M.: Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 35, pp. 36479\u201336494 (2022)"},{"key":"1724_CR4","doi-asserted-by":"crossref","unstructured":"Kang, M., Zhu, J.-Y., Zhang, R., Park, J., Shechtman, E., Paris, S., Park, T.: Scaling Up GANs for Text-to-Image Synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10124\u201310134 (2023)","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"1724_CR5","doi-asserted-by":"publisher","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: CLIPScore: a reference-free evaluation metric for image captioning. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 7514\u20137528 (2021). https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.595","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"1724_CR6","unstructured":"Xu, J., Liu, X., Wu, Y., Tong, Y., Li, Q., Ding, M., Tang, J., Dong, Y.: ImageReward: Learning and Evaluating Human Preferences for Text-to-Image Generation. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 36, pp. 15903\u201315935. Curran Associates, Inc., (2023). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2023\/file\/33646ef0ed554145eab65f6250fab0c9-Paper-Conference.pdf"},{"key":"1724_CR7","unstructured":"Mitri, O.D., Wang, R., Huber, M.F.: generative adversarial networks with limited data: a survey and benchmarking (2025). arXiv:2504.05456"},{"key":"1724_CR8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2938900","author":"X Yang","year":"2019","unstructured":"Yang, X., Li, F., Liu, H.: A survey of DNN methods for blind image quality assessment. IEEE Access (2019). https:\/\/doi.org\/10.1109\/ACCESS.2019.2938900","journal-title":"IEEE Access"},{"key":"1724_CR9","doi-asserted-by":"publisher","first-page":"3282","DOI":"10.1109\/TCSVT.2019.2931589","volume":"30","author":"Y Chen","year":"2020","unstructured":"Chen, Y., Zhao, Y., Li, S., Zuo, W., Jia, W., Liu, X.: Blind quality assessment for cartoon images. IEEE Trans. Circuits Syst. Video Technol. 30, 3282\u20133288 (2020). https:\/\/doi.org\/10.1109\/TCSVT.2019.2931589","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"1724_CR10","doi-asserted-by":"publisher","unstructured":"Kang, L., Ye, P., Li, Y., Doermann, D.: Convolutional neural networks for no-reference image quality assessment. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 1733\u20131740 (2014). https:\/\/doi.org\/10.1109\/CVPR.2014.224","DOI":"10.1109\/CVPR.2014.224"},{"key":"1724_CR11","unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X., Chen, X.: Improved Techniques for Training GANs. In: Advances in Neural Information Processing Systems (NIPS), vol. 29, pp. 2234\u20132242 (2016)"},{"key":"1724_CR12","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In: Advances in Neural Information Processing Systems (NIPS), vol. 30, pp. 6626\u20136637 (2017)"},{"key":"1724_CR13","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"1724_CR14","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"1724_CR15","unstructured":"Ma, C., Shi, Z., Lu, Z., Xie, S., Chao, F., Sui, Y.: A survey on image quality assessment: insights, analysis, and future outlook (2025). arXiv:2502.08540"},{"issue":"8","key":"1724_CR16","doi-asserted-by":"publisher","first-page":"3998","DOI":"10.1109\/TIP.2018.2831899","volume":"27","author":"H Talebi","year":"2018","unstructured":"Talebi, H., Milanfar, P.: NIMA: neural image assessment. IEEE Trans. Image Process. 27(8), 3998\u20134011 (2018). https:\/\/doi.org\/10.1109\/TIP.2018.2831899","journal-title":"IEEE Trans. Image Process."},{"key":"1724_CR17","unstructured":"Avanaki, N.J., Ghildyal, A., Barman, N., Zadtootaghaj, S.: LAR-IQA: a lightweight, accurate, and robust no-reference image quality assessment model (2024). arXiv:2408.17057"},{"key":"1724_CR18","doi-asserted-by":"crossref","unstructured":"Yang, S., Wu, T., Shi, S., Lao, S., Gong, Y., Cao, M., Wang, J., Yang, Y.: MANIQA: multi-dimension attention network for no-reference image quality assessment. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp. 1191\u20131200 (2022)","DOI":"10.1109\/CVPRW56347.2022.00126"},{"key":"1724_CR19","doi-asserted-by":"publisher","unstructured":"Shi, J., Gao, P., Qin, J.: Transformer-based no-reference image quality assessment via supervised contrastive learning. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, pp. 4829\u20134837 (2024). https:\/\/doi.org\/10.1609\/aaai.v38i5.28285. https:\/\/ojs.aaai.org\/index.php\/AAAI\/article\/view\/28285","DOI":"10.1609\/aaai.v38i5.28285"},{"key":"1724_CR20","doi-asserted-by":"publisher","unstructured":"Alsaafin, M., Alsheikh, M., Anwar, S., Usman, M.: Attention down-sampling transformer, relative ranking and self-consistency for blind image quality assessment. In: Proceedings of IEEE International Conference on Image Processing (ICIP), pp. 1260\u20131266 (2024). https:\/\/doi.org\/10.1109\/ICIP51287.2024.10647621","DOI":"10.1109\/ICIP51287.2024.10647621"},{"key":"1724_CR21","doi-asserted-by":"publisher","DOI":"10.3390\/s23010427","author":"L Han","year":"2023","unstructured":"Han, L., Lv, H., Zhao, Y., Liu, H., Bi, G., Yin, Z., Fang, Y.: Conv-former: a novel network combining convolution and self-attention for image quality assessment. Sensors (2023). https:\/\/doi.org\/10.3390\/s23010427","journal-title":"Sensors"},{"key":"1724_CR22","doi-asserted-by":"publisher","unstructured":"Guo, H., Hu, S., Wang, X., Chang, M.-C., Lyu, S.: Eyes tell all: irregular pupil shapes reveal GAN-generated faces. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2904\u20132908 (2022). https:\/\/doi.org\/10.1109\/ICASSP43922.2022.9746597","DOI":"10.1109\/ICASSP43922.2022.9746597"},{"issue":"3","key":"1724_CR23","doi-asserted-by":"publisher","first-page":"1575","DOI":"10.3390\/ai5030076","volume":"5","author":"A Lokner Ladevic","year":"2024","unstructured":"Lokner Ladevic, A., Kramberger, T., Kramberger, R., Vlahek, D.: Detection of AI-generated synthetic images with a lightweight CNN. AI 5(3), 1575\u20131593 (2024). https:\/\/doi.org\/10.3390\/ai5030076","journal-title":"AI"},{"key":"1724_CR24","doi-asserted-by":"publisher","unstructured":"Zhang, Z., Li, C., Sun, W., Liu, X., Min, X., Zhai, G.: A perceptual quality assessment exploration for AIGC images. In: 2023 IEEE International Conference on Multimedia and Expo Workshops (ICMEW), pp. 440\u2013445 (2023). https:\/\/doi.org\/10.1109\/ICMEW59549.2023.00082","DOI":"10.1109\/ICMEW59549.2023.00082"},{"key":"1724_CR25","unstructured":"Yuan, J., Yang, F., Li, J., Cao, X., Che, J., Lin, J., Cao, X.: PKU-AIGIQA-4K: A Perceptual quality assessment database for both text-to-image and image-to-image AI-generated images (2024). arXiv:2404.18409"},{"key":"1724_CR26","doi-asserted-by":"publisher","unstructured":"Lin, H., Hosu, V., Saupe, D.: KADID-10k: A large-scale artificially distorted IQA database. In: Proceeings of International Conference on Quality of Multimedia Experience (QoMEX), pp. 1\u20133 (2019). https:\/\/doi.org\/10.1109\/QoMEX.2019.8743252","DOI":"10.1109\/QoMEX.2019.8743252"},{"key":"1724_CR27","doi-asserted-by":"crossref","unstructured":"Simard, P.Y., Steinkraus, D., Platt, J.C., et al.: Best practices for convolutional neural networks applied to visual document analysis. In: International Conference on Document Analysis and Recognition (ICDAR), pp. 958\u2013963 (2003)","DOI":"10.1109\/ICDAR.2003.1227801"},{"key":"1724_CR28","unstructured":"Geirhos, R., Rubisch, P., Michaelis, C., Bethge, M., Wichmann, F.A., Brendel, W.: ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In: International Conference on Learning Representations (ICLR) (2019). https:\/\/openreview.net\/forum?id=Bygh9j09KX. Accessed 15 Mar 2024"},{"key":"1724_CR29","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.-Y., Feichtenhofer, C., Darrell, T., Xie, S.: A ConvNet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11976\u201311986 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"1724_CR30","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (ICLR) (2020). https:\/\/openreview.net\/forum?id=YicbFdNTTy. Accessed 15 Mar 2024"},{"key":"1724_CR31","unstructured":"Anis, A.M., Ali, H., Sarfraz, S.: On the limitations of vision-language models in understanding image transforms (2025). arXiv:2503.09837"},{"key":"1724_CR32","doi-asserted-by":"crossref","unstructured":"Choi, Y., Uh, Y., Yoo, J., Ha, J.-W.: StarGAN v2: Diverse image synthesis for multiple domains. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8188\u20138197 (2020)","DOI":"10.1109\/CVPR42600.2020.00821"},{"key":"1724_CR33","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"1724_CR34","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of StyleGAN. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8110\u20138119 (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"1724_CR35","doi-asserted-by":"crossref","unstructured":"Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV), pp. 3730\u20133738 (2015)","DOI":"10.1109\/ICCV.2015.425"},{"key":"1724_CR36","unstructured":"Tuli, S., Dasgupta, I., Grant, E., Griffiths, T.: Are convolutional neural networks or transformers more like human vision? In: Proceedings of the Annual Meeting of the Cognitive Science Society (CogSci), pp. 1844\u20131850 (2021)"},{"key":"1724_CR37","unstructured":"Steiner, A.P., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., Beyer, L.: How to train your vit? data, augmentation, and regularization in vision transformers. Transactions on Machine Learning Research (2022)"},{"key":"1724_CR38","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (ICLR) (2018). https:\/\/openreview.net\/forum?id=Bkg6RiCqY7. Accessed 15 Mar 2024"},{"key":"1724_CR39","unstructured":"I. Loshchilov and F. Hutter: SGDR: Stochastic gradient descent with warm restarts. In: International Conference on Learning Representations (ICLR) (2017). https:\/\/openreview.net\/forum?id=Skq89Scxx. Accessed 15 Mar 2024"},{"key":"1724_CR40","unstructured":"Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., Kopf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., Bai, J., Chintala, S.: PyTorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 32, pp. 8026\u20138037 (2019)"},{"key":"1724_CR41","doi-asserted-by":"publisher","unstructured":"Wightman, R.: PyTorch Image Models. GitHub (2019). https:\/\/doi.org\/10.5281\/zenodo.4414861","DOI":"10.5281\/zenodo.4414861"},{"key":"1724_CR42","unstructured":"Micikevicius, P., Narang, S., Alben, J., Diamos, G., Elsen, E., Garcia, D., Ginsburg, B., Houston, M., Kuchaiev, O., Venkatesh, G., et al.: Mixed precision training. In: International Conference on Learning Representations (ICLR) (2018). https:\/\/openreview.net\/forum?id=r1gs9JgRZ. Accessed 15 Mar 2024"},{"key":"1724_CR43","doi-asserted-by":"publisher","unstructured":"Saito, M., Matsui, Y.: Illustration2Vec: a semantic vector representation of illustrations. In: SIGGRAPH Asia 2015 Technical Briefs, pp. 1\u20134 (2015). https:\/\/doi.org\/10.1145\/2820903.2820907","DOI":"10.1145\/2820903.2820907"},{"key":"1724_CR44","doi-asserted-by":"publisher","unstructured":"Sauer, A., Schwarz, K., Geiger, A.: StyleGAN-XL: Scaling StyleGAN to large diverse datasets. In: ACM SIGGRAPH 2022 Conference Proceedings, pp. 1\u201310 (2022). https:\/\/doi.org\/10.1145\/3528233.3530738","DOI":"10.1145\/3528233.3530738"},{"key":"1724_CR45","doi-asserted-by":"crossref","unstructured":"Feng, Q., Guo, C., Benitez-Quiroz, F., Martinez, A.M.: When Do GANs Replicate? On the Choice of Dataset Size. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6701\u20136710 (2021)","DOI":"10.1109\/ICCV48922.2021.00663"},{"key":"1724_CR46","doi-asserted-by":"crossref","unstructured":"Tinsley, P., Czajka, A., Flynn, P.: This Face Does Not Exist... But It Might Be Yours! Identity Leakage in Generative Models. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 1320\u20131328 (2021)","DOI":"10.1109\/WACV48630.2021.00136"},{"key":"1724_CR47","unstructured":"Carlini, N., Hayes, J., Nasr, M., Jagielski, M., Sehwag, V., Tram\u00e8r, F., Balle, B., Ippolito, D., Wallace, E.: Extracting training data from diffusion models. In: Proceedings of the 32nd USENIX Conference on Security Symposium, pp. 5253\u20135270 (2023)"},{"key":"1724_CR48","doi-asserted-by":"crossref","unstructured":"Pizzi, E., Roy, S.D., Ravindra, S.N., Goyal, P., Douze, M.: A self-supervised descriptor for image copy detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14532\u201314542 (2022)","DOI":"10.1109\/CVPR52688.2022.01413"},{"key":"1724_CR49","doi-asserted-by":"publisher","DOI":"10.2307\/2334029","author":"RA Bradley","year":"1952","unstructured":"Bradley, R.A., Terry, M.E.: Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika (1952). https:\/\/doi.org\/10.2307\/2334029","journal-title":"Biometrika"},{"key":"1724_CR50","doi-asserted-by":"publisher","unstructured":"Cela-Conde, C.J., Ayala, F.J., Munar, E., Maest\u00fa, F., Nadal, M., Cap\u00f3, M.A., R\u00edo, D., L\u00f3pez-Ibor, J.J., Ortiz, T., Mirasso, C., Marty, G.: Sex-related similarities and differences in the neural correlates of beauty. In: Proceedings of the National Academy of Sciences (PNAS), 3847\u20133852 (2009) https:\/\/doi.org\/10.1073\/pnas.0900304106","DOI":"10.1073\/pnas.0900304106"},{"key":"1724_CR51","doi-asserted-by":"publisher","DOI":"10.2307\/1412159","author":"C Spearman","year":"1904","unstructured":"Spearman, C.: The proof and measurement of association between two things. Am. J. Psychol. (1904). https:\/\/doi.org\/10.2307\/1412159","journal-title":"Am. J. Psychol."},{"issue":"2","key":"1724_CR52","doi-asserted-by":"publisher","first-page":"508","DOI":"10.1109\/TBC.2018.2816783","volume":"64","author":"X Min","year":"2018","unstructured":"Min, X., Zhai, G., Gu, K., Liu, Y., Yang, X.: Blind image quality estimation via distortion aggravation. IEEE Trans. Broadcast. 64(2), 508\u2013517 (2018). https:\/\/doi.org\/10.1109\/TBC.2018.2816783","journal-title":"IEEE Trans. Broadcast."},{"key":"1724_CR53","unstructured":"Yan, J., Li, J., Fu, X.: No-reference quality assessment of contrast-distorted images using contrast enhancement (2019). https:\/\/arxiv.org\/abs\/1904.08879"},{"issue":"9","key":"1724_CR54","doi-asserted-by":"publisher","first-page":"2678","DOI":"10.1109\/TIP.2011.2131660","volume":"20","author":"ND Narvekar","year":"2011","unstructured":"Narvekar, N.D., Karam, L.J.: A no-reference image blur metric based on the cumulative probability of blur detection (CPBD). IEEE Trans. Image Process. 20(9), 2678\u20132683 (2011). https:\/\/doi.org\/10.1109\/TIP.2011.2131660","journal-title":"IEEE Trans. Image Process."},{"issue":"12","key":"1724_CR55","doi-asserted-by":"publisher","first-page":"4695","DOI":"10.1109\/TIP.2012.2214050","volume":"21","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Moorthy, A.K., Bovik, A.C.: No-reference image quality assessment in the spatial domain. IEEE Trans. Image Process. 21(12), 4695\u20134708 (2012). https:\/\/doi.org\/10.1109\/TIP.2012.2214050","journal-title":"IEEE Trans. Image Process."},{"issue":"3","key":"1724_CR56","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1109\/LSP.2012.2227726","volume":"20","author":"A Mittal","year":"2013","unstructured":"Mittal, A., Soundararajan, R., Bovik, A.C.: Making a \u201cCompletely Blind\u2019\u2019 image quality analyzer. IEEE Signal Process. Lett. 20(3), 209\u2013212 (2013). https:\/\/doi.org\/10.1109\/LSP.2012.2227726","journal-title":"IEEE Signal Process. Lett."},{"key":"1724_CR57","doi-asserted-by":"publisher","unstructured":"Fritsche, M., Gu, S., Timofte, R.: Frequency separation for real-world super-resolution. In: IEEE\/CVF International Conference on Computer Vision Workshop (ICCVW), pp. 3599\u20133608 (2019). https:\/\/doi.org\/10.1109\/ICCVW.2019.00445","DOI":"10.1109\/ICCVW.2019.00445"},{"key":"1724_CR58","doi-asserted-by":"crossref","unstructured":"Wei, Y., Gu, S., Li, Y., Timofte, R., Jin, L., Song, H.: Unsupervised real-world image super resolution via domain-distance aware training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13385\u201313394 (2021)","DOI":"10.1109\/CVPR46437.2021.01318"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-025-01724-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-025-01724-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-025-01724-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,12]],"date-time":"2025-09-12T14:58:08Z","timestamp":1757689088000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-025-01724-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,22]]},"references-count":58,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["1724"],"URL":"https:\/\/doi.org\/10.1007\/s00138-025-01724-6","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"value":"0932-8092","type":"print"},{"value":"1432-1769","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,7,22]]},"assertion":[{"value":"15 March 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 June 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 July 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 July 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"The collection and use of data from participants in the subjective evaluation of this study were approved by the Doshisha University Research Ethics Committee.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Informed consent was obtained from all individual participants.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed consent"}}],"article-number":"103"}}