{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:20:23Z","timestamp":1775067623413,"version":"3.50.1"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T00:00:00Z","timestamp":1761091200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T00:00:00Z","timestamp":1761091200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,22]]},"DOI":"10.1109\/cbmi66578.2025.11339321","type":"proceedings-article","created":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T20:38:56Z","timestamp":1768941536000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Does CLIP Perceive Art the Same Way We Do?"],"prefix":"10.1109","author":[{"given":"Andrea","family":"Asperti","sequence":"first","affiliation":[{"name":"University of Bologna,Dept. of Informatics (DISI),Bologna,Italy"}]},{"given":"Leonardo","family":"Dessi","sequence":"additional","affiliation":[{"name":"University of Bologna,Dept. of Informatics (DISI),Bologna,Italy"}]},{"given":"Maria Chiara","family":"Tonetti","sequence":"additional","affiliation":[{"name":"University of Bologna,Dept. of Informatics (DISI),Bologna,Italy"}]},{"given":"Nico","family":"Wu","sequence":"additional","affiliation":[{"name":"University of Bologna,Dept. of Informatics (DISI),Bologna,Italy"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Analyzing clip\u2019s performance limitations in multi-object scenarios: A controlled high-resolution study","author":"Abbasi","year":"2025","journal-title":"arXiv preprint"},{"key":"ref2","article-title":"Does CLIP perceive art the same way we do?","volume":"abs\/2505.05229","author":"Asperti","year":"2025","journal-title":"CoRR"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/bdcc9090231"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.3390\/s25020363"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3617597"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.522"},{"key":"ref7","article-title":"Scaling rectified flow transformers for high-resolution image synthesis","volume":"abs\/2403.03206","author":"Esser","year":"2024","journal-title":"CoRR"},{"key":"ref8","article-title":"Clipdraw: Exploring text-to-drawing synthesis through language-image encoders","author":"Frans","year":"2022","journal-title":"Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01891-x"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01073"},{"key":"ref11","article-title":"What do we learn from inverting CLIP models?","volume":"abs\/2403.02580","author":"Kazemi","year":"2024","journal-title":"CoRR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.129122"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i1.25225"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00037"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.3233\/FAIA240552"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.31110\/COLINS\/2024-1\/008"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299155"},{"key":"ref18","volume":"abs\/1802.03426","author":"McInnes","year":"2018","journal-title":"UMAP: uniform manifold approximation and projection for dimension reduction"},{"key":"ref19","first-page":"16784","article-title":"GLIDE: towards photorealistic image generation and editing with text-guided diffusion models","author":"Nichol","year":"2022","journal-title":"International Conference on Machine Learning, ICML 2022, 17\u201323 July 2022, Baltimore, Maryland, USA"},{"key":"ref20","year":"2024","journal-title":"National gallery of art open data program"},{"key":"ref21","article-title":"Contrastive language-image pre-training with knowledge graphs","author":"Pan","year":"2022","journal-title":"Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3311646"},{"key":"ref23","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18\u201324 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research","author":"Radford","year":"2025"},{"key":"ref24","article-title":"Hierarchical text-conditional image generation with clip latents","author":"Ramesh","year":"2022","journal-title":"arXiv e-prints"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/WIFS61860.2024.10810719"},{"key":"ref27","article-title":"Context-aware multimodal pretraining","volume":"abs\/2411.15099","author":"Roth","year":"2024","journal-title":"CoRR"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-78125-4_2"},{"key":"ref29","article-title":"Faster image2video generation: A closer look at CLIP image embedding\u2019s impact on spatio-temporal cross-attentions","volume":"abs\/2407.19205","author":"Taghipour","year":"2024","journal-title":"CoRR"},{"key":"ref30","article-title":"Toward a holistic evaluation of robustness in CLIP models","volume":"abs\/2410.01534","author":"Tu","year":"2024","journal-title":"CoRR"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2024.101748"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s13735-024-00352-6"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s00530-024-01414-9"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00874"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01049"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref38","article-title":"CLIP-MUSED: clip-guided multi-subject visual neural information semantic decoding","volume-title":"The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7\u201311, 2024","author":"Zhou"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00246"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680885"}],"event":{"name":"2025 International Conference on Content-Based Multimedia Indexing (CBMI)","location":"Dublin, Ireland","start":{"date-parts":[[2025,10,22]]},"end":{"date-parts":[[2025,10,24]]}},"container-title":["2025 International Conference on Content-Based Multimedia Indexing (CBMI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11339229\/11339242\/11339321.pdf?arnumber=11339321","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T07:11:36Z","timestamp":1768979496000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11339321\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,22]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/cbmi66578.2025.11339321","relation":{},"subject":[],"published":{"date-parts":[[2025,10,22]]}}}