{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T16:20:44Z","timestamp":1743092444036,"version":"3.40.3"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031736605"},{"type":"electronic","value":"9783031736612"}],"license":[{"start":{"date-parts":[[2024,11,10]],"date-time":"2024-11-10T00:00:00Z","timestamp":1731196800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,10]],"date-time":"2024-11-10T00:00:00Z","timestamp":1731196800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73661-2_18","type":"book-chapter","created":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T11:09:26Z","timestamp":1731150566000},"page":"323-339","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Scaling Up Personalized Image Aesthetic Assessment via Task Vector Customization"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7853-7703","authenticated-orcid":false,"given":"Jooyeol","family":"Yun","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1071-4835","authenticated-orcid":false,"given":"Jaegul","family":"Choo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,10]]},"reference":[{"key":"18_CR1","unstructured":"Al-Shedivat, M., Li, L., Xing, E., Talwalkar, A.: On data efficiency of meta-learning. In: International Conference on Artificial Intelligence and Statistics. PMLR (2021)"},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Bradley, R.A., Terry, M.E.: Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika (1952)","DOI":"10.2307\/2334029"},{"key":"18_CR3","doi-asserted-by":"crossref","unstructured":"Chefer, H., Gur, S., Wolf, L.: Transformer interpretability beyond attention visualization. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00084"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Ciancio, A., da\u00a0Silva, E.A., Said, A., Samadani, R., Obrador, P., et\u00a0al.: No-reference blur assessment of digital pictures based on multifeature classifiers. IEEE Trans. Image Process. (2010)","DOI":"10.1109\/TIP.2010.2053549"},{"key":"18_CR5","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. In: ICLR (2020)"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Fang, Y., Zhu, H., Zeng, Y., Ma, K., Wang, Z.: Perceptual quality assessment of smartphone photography. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00373"},{"key":"18_CR7","unstructured":"Gadre, S.Y., et\u00a0al.: Datacomp: In search of the next generation of multimodal datasets. arXiv preprint arXiv:2304.14108 (2023)"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Ghadiyaram, D., Bovik, A.C.: Massive online crowdsourced study of subjective and objective picture quality. IEEE Trans. Image Process. (2015)","DOI":"10.1109\/TIP.2015.2500021"},{"key":"18_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"18_CR10","doi-asserted-by":"crossref","unstructured":"He, S., Ming, A., Li, Y., Sun, J., Zheng, S., Ma, H.: Thinking image color aesthetics assessment: models, datasets and benchmarks. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01996"},{"key":"18_CR11","doi-asserted-by":"crossref","unstructured":"He, S., Zhang, Y., Xie, R., Jiang, D., Ming, A.: Rethinking image aesthetics assessment: models, datasets and benchmarks. IJCAI (2022)","DOI":"10.24963\/ijcai.2022\/132"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Hosu, V., Lin, H., Sziranyi, T., Saupe, D.: Koniq-10k: an ecologically valid database for deep learning of blind image quality assessment. IEEE Trans. Image Process. (2020)","DOI":"10.1109\/TIP.2020.2967829"},{"key":"18_CR13","doi-asserted-by":"crossref","unstructured":"Hou, J., Lin, W., Yue, G., Liu, W., Zhao, B.: Interaction-matrix based personalized image aesthetics assessment. IEEE Trans. Multimedia (2022)","DOI":"10.1109\/TMM.2022.3189276"},{"key":"18_CR14","unstructured":"Ilharco, G., Ribeiro, M.T., Wortsman, M., Schmidt, L., Hajishirzi, H., Farhadi, A.: Editing models with task arithmetic. In: ICLR (2023)"},{"key":"18_CR15","doi-asserted-by":"publisher","unstructured":"Ilharco, G., et al.: Openclip (2021). https:\/\/doi.org\/10.5281\/zenodo.5143773, https:\/\/doi.org\/10.5281\/zenodo.5143773","DOI":"10.5281\/zenodo.5143773"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Jin, X., et al.: Aesthetic attributes assessment of images. In: ACM MM (2019)","DOI":"10.1145\/3343031.3350970"},{"key":"18_CR17","doi-asserted-by":"crossref","unstructured":"Karlsson, K., Jiang, W., Zhang, D.Q.: Mobile photo album management with multiscale timeline. In: ACM MM (2014)","DOI":"10.1145\/2647868.2655060"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Ke, J., Wang, Q., Wang, Y., Milanfar, P., Yang, F.: MUSIQ: multi-scale image quality transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"18_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"662","DOI":"10.1007\/978-3-319-46448-0_40","volume-title":"Computer Vision \u2013 ECCV 2016","author":"S Kong","year":"2016","unstructured":"Kong, S., Shen, X., Lin, Z., Mech, R., Fowlkes, C.: Photo aesthetics ranking network with attributes and content adaptation. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 662\u2013679. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_40"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Li, L., Zhu, H., Zhao, S., Ding, G., Lin, W.: Personality-assisted multi-task learning for generic and personalized image aesthetics assessment. IEEE Trans. Image Process. 29 (2020)","DOI":"10.1109\/TIP.2020.2968285"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Transductive aesthetic preference propagation for personalized image aesthetics assessment. In: ACM MM. ACM (2022)","DOI":"10.1145\/3503161.3548244"},{"key":"18_CR22","doi-asserted-by":"crossref","unstructured":"Lin, Y., Zhang, T., Sun, P., Li, Z., Zhou, S.: Fq-vit: post-training quantization for fully quantized vision transformer. IJCAI (2022)","DOI":"10.24963\/ijcai.2022\/164"},{"key":"18_CR23","unstructured":"Liu, Z., Wang, Y., Han, K., Zhang, W., Ma, S., Gao, W.: Post-training quantization for vision transformer. NeurIPS (2021)"},{"key":"18_CR24","unstructured":"Lv, P., et al.: User-guided personalized image aesthetic assessment based on deep reinforcement learning. IEEE Trans. Multimedia (2021)"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Lv, P., et al.: USAR: an interactive user-specific aesthetic ranking framework for images. In: ACM MM, pp. 1328\u20131336 (2018)","DOI":"10.1145\/3240508.3240635"},{"key":"18_CR26","unstructured":"Microsoft: Deepspeed (2023). https:\/\/www.deepspeed.ai\/"},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Murray, N., Marchesotti, L., Perronnin, F.: Ava: a large-scale database for aesthetic visual analysis. In: CVPR. IEEE (2012)","DOI":"10.1109\/CVPR.2012.6247954"},{"key":"18_CR28","doi-asserted-by":"publisher","DOI":"10.4324\/9780203726631","volume-title":"Research Design and Statistical Analysis","author":"JL Myers","year":"2013","unstructured":"Myers, J.L., Well, A.D., Lorch, R.F., Jr.: Research Design and Statistical Analysis. Routledge, Londn (2013)"},{"key":"18_CR29","unstructured":"Naseer, M.M., Ranasinghe, K., Khan, S.H., Hayat, M., Shahbaz\u00a0Khan, F., Yang, M.H.: Intriguing properties of vision transformers. In: NeurIPS (2021)"},{"key":"18_CR30","unstructured":"Paszke, A., et\u00a0al.: Pytorch: an imperative style, high-performance deep learning library. In: NeurIPS (2019)"},{"key":"18_CR31","doi-asserted-by":"crossref","unstructured":"Ren, J., Shen, X., Lin, Z., Mech, R., Foran, D.J.: Personalized image aesthetics. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.76"},{"key":"18_CR32","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. 2022 IEEE. In: CVPR (2021)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"18_CR33","unstructured":"Schuhmann, C., et\u00a0al.: Laion-5b: an open large-scale dataset for training next generation image-text models. In: NeurIPS (2022)"},{"key":"18_CR34","doi-asserted-by":"crossref","unstructured":"Wallace, B., Gokul, A., Ermon, S., Naik, N.: End-to-end diffusion latent optimization improves classifier guidance. IN: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00669"},{"key":"18_CR35","doi-asserted-by":"crossref","unstructured":"Wang, G., Yan, J., Qin, Z.: Collaborative and attentive learning for personalized image aesthetic assessment. In: IJCAI, pp. 957\u2013963 (2018)","DOI":"10.24963\/ijcai.2018\/133"},{"key":"18_CR36","unstructured":"Wortsman, M., et\u00a0al.: Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In: ICML. PMLR (2022)"},{"key":"18_CR37","doi-asserted-by":"crossref","unstructured":"Wortsman, M., et al.: Robust fine-tuning of zero-shot models. 2022 IEEE. In: CVPR (2021)","DOI":"10.1109\/CVPR52688.2022.00780"},{"key":"18_CR38","unstructured":"Xu, L., Xu, J., Yang, Y., Huang, Y., Xie, Y., Li, Y.: Clip brings better features to visual aesthetics learners. arXiv preprint arXiv:2307.15640 (2023)"},{"key":"18_CR39","doi-asserted-by":"crossref","unstructured":"Yan, X., Shao, F., Chen, H., Jiang, Q.: Hybrid CNN-transformer based meta-learning approach for personalized image aesthetics assessment. J. Vis. Commun. Image Representation 98 (2024)","DOI":"10.1016\/j.jvcir.2023.104044"},{"key":"18_CR40","doi-asserted-by":"crossref","unstructured":"Yang, Y., Xu, L., Li, L., Qie, N., Li, Y., Zhang, P., Guo, Y.: Personalized image aesthetics assessment with rich attributes. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01924"},{"key":"18_CR41","doi-asserted-by":"crossref","unstructured":"Yang, Z., Li, L., Yang, Y., Li, Y., Lin, W.: Multi-level transitional contrast learning for personalized image aesthetics assessment. IEEE Trans. Multimedia (2023)","DOI":"10.1109\/TMM.2023.3290479"},{"key":"18_CR42","doi-asserted-by":"crossref","unstructured":"Ying, Z., Niu, H., Gupta, P., Mahajan, D., Ghadiyaram, D., Bovik, A.: From patches to pictures (PaQ-2-PiQ): mapping the perceptual space of picture quality. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"18_CR43","series-title":"LNCS","first-page":"191","volume-title":"ECCV 2022","author":"Z Yuan","year":"2022","unstructured":"Yuan, Z., Xue, C., Chen, Y., Wu, Q., Sun, G.: Ptq4vit: post-training quantization for vision transformers with twin uniform quantization. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13672, pp. 191\u2013207. Springer, Cham (2022)"},{"key":"18_CR44","unstructured":"Zhu, H., Li, L., Wu, J., Zhao, S., Ding, G., Shi, G.: Personalized image aesthetics assessment via meta-learning with bilevel gradient optimization. IEEE Trans. Cybern. (2020)"},{"key":"18_CR45","unstructured":"Zhu, H., Zhou, Y., Li, L., Li, Y., Guo, Y.: Learning personalized image aesthetics from subjective and objective attributes. IEEE Trans. Multimedia (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73661-2_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T12:07:26Z","timestamp":1731154046000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73661-2_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,10]]},"ISBN":["9783031736605","9783031736612"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73661-2_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,10]]},"assertion":[{"value":"10 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}