{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T05:06:04Z","timestamp":1765343164879,"version":"3.46.0"},"publisher-location":"New York, NY, USA","reference-count":60,"publisher":"ACM","funder":[{"name":"National Science Foundation of China","award":["62476069"],"award-info":[{"award-number":["62476069"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3755666","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T07:26:55Z","timestamp":1761377215000},"page":"5784-5793","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Emotion in a Bottle: Information Bottleneck Guided Disentanglement for Emotion Domain Adaptation"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-2657-3062","authenticated-orcid":false,"given":"Jiankun","family":"Zhu","sequence":"first","affiliation":[{"name":"Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5843-6411","authenticated-orcid":false,"given":"Sicheng","family":"Zhao","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2814-1852","authenticated-orcid":false,"given":"Lulu","family":"Tian","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8220-4442","authenticated-orcid":false,"given":"Jing","family":"Jiang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Harbin City, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-0445-8901","authenticated-orcid":false,"given":"Xi","family":"Chen","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3298-2574","authenticated-orcid":false,"given":"Hongxun","family":"Yao","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Harbin, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i2.27830"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.3390\/electronics12214396"},{"key":"e_1_3_2_1_3_1","first-page":"223","article-title":"Large-scale visual sentiment ontology and detectors using adjective noun pairs","author":"Borth Damian","year":"2013","unstructured":"Damian Borth, Rongrong Ji, Tao Chen, Thomas Breuel, and Shih-Fu Chang. 2013. Large-scale visual sentiment ontology and detectors using adjective noun pairs. In ACM Multimedia. 223-232.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_4_1","volume-title":"Understanding disentangling in -VAE. arXiv preprint arXiv:1804.03599","author":"Burgess Christopher P","year":"2018","unstructured":"Christopher P Burgess, Irina Higgins, Arka Pal, Loic Matthey, Nick Watters, Guillaume Desjardins, and Alexander Lerchner. 2018. Understanding disentangling in -VAE. arXiv preprint arXiv:1804.03599 (2018)."},{"key":"e_1_3_2_1_5_1","first-page":"627","article-title":"Progressive feature alignment for unsupervised domain adaptation","author":"Chen Chaoqi","year":"2019","unstructured":"Chaoqi Chen, Weiping Xie, Wenbing Huang, Yu Rong, Xinghao Ding, Yue Huang, Tingyang Xu, and Junzhou Huang. 2019. Progressive feature alignment for unsupervised domain adaptation. In ICCV. 627-636.","journal-title":"ICCV."},{"key":"e_1_3_2_1_6_1","first-page":"367","article-title":"Object-based visual sentiment concept analysis and application","author":"Chen Tao","year":"2014","unstructured":"Tao Chen, Felix X Yu, Jiawei Chen, Yin Cui, Yan-Ying Chen, and Shih-Fu Chang. 2014. Object-based visual sentiment concept analysis and application. In ACM Multimedia. 367-376.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2022.3225049"},{"key":"e_1_3_2_1_8_1","volume-title":"Learning to compose diversified prompts for image emotion classification. Computational Visual Media","author":"Deng Sinuo","year":"2024","unstructured":"Sinuo Deng, Lifang Wu, Ge Shi, Lehao Xing, Meng Jian, Ye Xiang, and Ruihai Dong. 2024. Learning to compose diversified prompts for image emotion classification. Computational Visual Media (2024), 1-15."},{"key":"e_1_3_2_1_9_1","first-page":"23375","article-title":"Domain-agnostic mutual prompting for unsupervised domain adaptation","author":"Du Zhekai","year":"2024","unstructured":"Zhekai Du, Xinyao Li, Fengling Li, Ke Lu, Lei Zhu, and Jingjing Li. 2024. Domain-agnostic mutual prompting for unsupervised domain adaptation. In CVPR. 23375-23384.","journal-title":"CVPR."},{"key":"e_1_3_2_1_10_1","volume-title":"Rethinking Domain Adaptation and Generalization in the ERA Of Clip. In 2024 IEEE International Conference on Image Processing (ICIP). IEEE, 2585-2591","author":"Feng Ruoyu","year":"2024","unstructured":"Ruoyu Feng, Tao Yu, Xin Jin, Xiaoyuan Yu, Lei Xiao, and Zhibo Chen. 2024. Rethinking Domain Adaptation and Generalization in the ERA Of Clip. In 2024 IEEE International Conference on Image Processing (ICIP). IEEE, 2585-2591."},{"key":"e_1_3_2_1_11_1","volume-title":"Domain adaptation via prompt learning","author":"Ge Chunjiang","year":"2023","unstructured":"Chunjiang Ge, Rui Huang, Mixue Xie, Zihang Lai, Shiji Song, Shuang Li, and Gao Huang. 2023. Domain adaptation via prompt learning. IEEE Transactions on Neural Networks and Learning Systems (2023)."},{"key":"e_1_3_2_1_12_1","volume-title":"ICLR","volume":"3","author":"Higgins Irina","year":"2017","unstructured":"Irina Higgins, Loic Matthey, Arka Pal, Christopher P Burgess, Xavier Glorot, Matthew M Botvinick, Shakir Mohamed, and Alexander Lerchner. 2017. beta-vae: Learning basic visual concepts with a constrained variational framework. In ICLR, Vol. 3."},{"key":"e_1_3_2_1_13_1","first-page":"11721","article-title":"MIC: Masked image consistency for context-enhanced domain adaptation","author":"Hoyer Lukas","year":"2023","unstructured":"Lukas Hoyer, Dengxin Dai, Haoran Wang, and Luc Van Gool. 2023. MIC: Masked image consistency for context-enhanced domain adaptation. In CVPR. 11721-11732.","journal-title":"CVPR."},{"key":"e_1_3_2_1_14_1","first-page":"2994","article-title":"Reclip: Refine contrastive language image pre-training with source free domain adaptation","author":"Hu Xuefeng","year":"2024","unstructured":"Xuefeng Hu, Ke Zhang, Lu Xia, Albert Chen, Jiajia Luo, Yuyin Sun, Ken Wang, Nan Qiao, Xiao Zeng, Min Sun, et al., 2024. Reclip: Refine contrastive language image pre-training with source free domain adaptation. In WACV. 2994-3003.","journal-title":"WACV."},{"key":"e_1_3_2_1_15_1","first-page":"5070","article-title":"Label propagation for deep semi-supervised learning","author":"Iscen Ahmet","year":"2019","unstructured":"Ahmet Iscen, Giorgos Tolias, Yannis Avrithis, and Ondrej Chum. 2019. Label propagation for deep semi-supervised learning. In CVPR. 5070-5079.","journal-title":"CVPR."},{"volume-title":"S 2-ver: Semi-supervised visual emotion recognition","author":"Jia Guoli","key":"e_1_3_2_1_16_1","unstructured":"Guoli Jia and Jufeng Yang. 2022. S 2-ver: Semi-supervised visual emotion recognition. In ECCV. Springer, 493-509."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102909"},{"volume-title":"Minimum class confusion for versatile domain adaptation","author":"Jin Ying","key":"e_1_3_2_1_18_1","unstructured":"Ying Jin, Ximei Wang, Mingsheng Long, and Jianmin Wang. 2020. Minimum class confusion for versatile domain adaptation. In ECCV. Springer, 464-480."},{"key":"e_1_3_2_1_19_1","first-page":"2691","article-title":"Empowering unsupervised domain adaptation with large-scale pre-trained vision-language models","author":"Lai Zhengfeng","year":"2024","unstructured":"Zhengfeng Lai, Haoping Bai, Haotian Zhang, Xianzhi Du, Jiulong Shan, Yinfei Yang, Chen-Nee Chuah, and Meng Cao. 2024. Empowering unsupervised domain adaptation with large-scale pre-trained vision-language models. In WACV. 2691-2701.","journal-title":"WACV."},{"key":"e_1_3_2_1_20_1","first-page":"23364","article-title":"Split to Merge","author":"Li Xinyao","year":"2024","unstructured":"Xinyao Li, Yuke Li, Zhekai Du, Fengling Li, Ke Lu, and Jingjing Li. 2024. Split to Merge: Unifying Separated Modalities for Unsupervised Domain Adaptation. In CVPR. 23364-23374.","journal-title":"Unifying Separated Modalities for Unsupervised Domain Adaptation. In CVPR."},{"key":"e_1_3_2_1_21_1","volume-title":"Frequency-Aligned Knowledge Distillation for Lightweight Spatiotemporal Forecasting. arXiv preprint arXiv:2507.02939","author":"Li Yuqi","year":"2025","unstructured":"Yuqi Li, Chuanguang Yang, Hansheng Zeng, Zeyu Dong, Zhulin An, Yongjun Xu, Yingli Tian, and Hao Wu. 2025. Frequency-Aligned Knowledge Distillation for Lightweight Spatiotemporal Forecasting. arXiv preprint arXiv:2507.02939 (2025)."},{"key":"e_1_3_2_1_22_1","first-page":"97","article-title":"Learning transferable features with deep adaptation networks","author":"Long Mingsheng","year":"2015","unstructured":"Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. 2015. Learning transferable features with deep adaptation networks. In ICML. PMLR, 97-105.","journal-title":"ICML. PMLR"},{"key":"e_1_3_2_1_23_1","first-page":"229","article-title":"On shape and the computability of emotions","author":"Lu Xin","year":"2012","unstructured":"Xin Lu, Poonam Suryanarayan, Reginald B Adams Jr, Jia Li, Michelle G Newman, and James Z Wang. 2012. On shape and the computability of emotions. In ACM Multimedia. 229-238.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_24_1","first-page":"83","article-title":"Affective image classification using features inspired by psychology and art theory","author":"Machajdik Jana","year":"2010","unstructured":"Jana Machajdik and Allan Hanbury. 2010. Affective image classification using features inspired by psychology and art theory. In ACM Multimedia. 83-92.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_25_1","first-page":"860","article-title":"A mixed bag of emotions: Model, predict, and transfer emotion distributions","author":"Peng Kuan-Chuan","year":"2015","unstructured":"Kuan-Chuan Peng, Tsuhan Chen, Amir Sadovnik, and Andrew C Gallagher. 2015. A mixed bag of emotions: Model, predict, and transfer emotion distributions. In CVPR. 860-868.","journal-title":"CVPR."},{"key":"e_1_3_2_1_26_1","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al., 2021. Learning transferable visual models from natural language supervision. In ICML. PMLR, 8748-8763.","journal-title":"ICML. PMLR"},{"key":"e_1_3_2_1_27_1","first-page":"1","article-title":"Opinion mining for national security: techniques, domain applications, challenges and research opportunities","volume":"8","author":"Mat Razali Noor Afiza","year":"2021","unstructured":"Noor Afiza Mat Razali, Nur Atiqah Malizan, Nor Asiakin Hasbullah, Muslihah Wook, Norulzahrah Mohd Zainuddin, Khairul Khalil Ishak, Suzaimah Ramli, and Sazali Sukardi. 2021. Opinion mining for national security: techniques, domain applications, challenges and research opportunities. Journal of Big Data, Vol. 8 (2021), 1-46.","journal-title":"Journal of Big Data"},{"key":"e_1_3_2_1_28_1","first-page":"3723","article-title":"Maximum classifier discrepancy for unsupervised domain adaptation","author":"Saito Kuniaki","year":"2018","unstructured":"Kuniaki Saito, Kohei Watanabe, Yoshitaka Ushiku, and Tatsuya Harada. 2018. Maximum classifier discrepancy for unsupervised domain adaptation. In CVPR. 3723-3732.","journal-title":"CVPR."},{"key":"e_1_3_2_1_29_1","volume-title":"CLIP-Enhanced Unsupervised Domain Adaptation with Consistency Regularization. In 2024 International Joint Conference on Neural Networks (IJCNN). IEEE, 1-8.","author":"Shi Kuo","year":"2024","unstructured":"Kuo Shi, Jie Lu, Zhen Fang, and Guangquan Zhang. 2024. CLIP-Enhanced Unsupervised Domain Adaptation with Consistency Regularization. In 2024 International Joint Conference on Neural Networks (IJCNN). IEEE, 1-8."},{"key":"e_1_3_2_1_30_1","first-page":"4355","article-title":"Ad-clip: Adapting domains in prompt space using clip","author":"Singha Mainak","year":"2023","unstructured":"Mainak Singha, Harsh Pal, Ankit Jha, and Biplab Banerjee. 2023. Ad-clip: Adapting domains in prompt space using clip. In ICCV. 4355-4364.","journal-title":"ICCV."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11760-024-03101-8"},{"key":"e_1_3_2_1_32_1","volume-title":"The information bottleneck method. arXiv preprint physics\/0004057","author":"Tishby Naftali","year":"2000","unstructured":"Naftali Tishby, Fernando C Pereira, and William Bialek. 2000. The information bottleneck method. arXiv preprint physics\/0004057 (2000)."},{"key":"e_1_3_2_1_33_1","first-page":"1","article-title":"Deep learning and the information bottleneck principle. In 2015 ieee information theory workshop (itw)","author":"Tishby Naftali","year":"2015","unstructured":"Naftali Tishby and Noga Zaslavsky. 2015. Deep learning and the information bottleneck principle. In 2015 ieee information theory workshop (itw). IEEE, 1-5.","journal-title":"IEEE"},{"key":"e_1_3_2_1_34_1","first-page":"5018","article-title":"Deep hashing network for unsupervised domain adaptation","author":"Venkateswara Hemanth","year":"2017","unstructured":"Hemanth Venkateswara, Jose Eusebio, Shayok Chakraborty, and Sethuraman Panchanathan. 2017. Deep hashing network for unsupervised domain adaptation. In CVPR. 5018-5027.","journal-title":"CVPR."},{"key":"e_1_3_2_1_35_1","first-page":"13106","article-title":"Learning visual emotion representations from web data","author":"Wei Zijun","year":"2020","unstructured":"Zijun Wei, Jianming Zhang, Zhe Lin, Joon-Young Lee, Niranjan Balasubramanian, Minh Hoai, and Dimitris Samaras. 2020. Learning visual emotion representations from web data. In CVPR. 13106-13115.","journal-title":"CVPR."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680582"},{"key":"e_1_3_2_1_37_1","first-page":"6358","article-title":"EmoGen","author":"Yang Jingyuan","year":"2024","unstructured":"Jingyuan Yang, Jiawei Feng, and Hui Huang. 2024. EmoGen: Emotional Image Content Generation with Text-to-Image Diffusion Models. In CVPR. 6358-6368.","journal-title":"Emotional Image Content Generation with Text-to-Image Diffusion Models. In CVPR."},{"key":"e_1_3_2_1_38_1","first-page":"8686","article-title":"Solver: Scene-object interrelated visual emotion reasoning network","volume":"30","author":"Yang Jingyuan","year":"2021","unstructured":"Jingyuan Yang, Xinbo Gao, Leida Li, Xiumei Wang, and Jinshan Ding. 2021a. Solver: Scene-object interrelated visual emotion reasoning network. IEEE TIP, Vol. 30 (2021), 8686-8701.","journal-title":"IEEE TIP"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01864"},{"key":"e_1_3_2_1_40_1","first-page":"4237","article-title":"A circular-structured representation for visual emotion distribution learning","author":"Yang Jingyuan","year":"2021","unstructured":"Jingyuan Yang, Jie Li, Leida Li, Xiumei Wang, and Xinbo Gao. 2021b. A circular-structured representation for visual emotion distribution learning. In CVPR. 4237-4246.","journal-title":"CVPR."},{"key":"e_1_3_2_1_41_1","first-page":"7432","article-title":"Stimuli-aware visual emotion analysis","volume":"30","author":"Yang Jingyuan","year":"2021","unstructured":"Jingyuan Yang, Jie Li, Xiumei Wang, Yuxuan Ding, and Xinbo Gao. 2021c. Stimuli-aware visual emotion analysis. IEEE TIP, Vol. 30 (2021), 7432-7445.","journal-title":"IEEE TIP"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2803520"},{"key":"e_1_3_2_1_43_1","first-page":"308","article-title":"Building a large scale dataset for image emotion recognition: The fine print and the benchmark","author":"You Quanzeng","year":"2016","unstructured":"Quanzeng You, Jiebo Luo, Hailin Jin, and Jianchao Yang. 2016. Building a large scale dataset for image emotion recognition: The fine print and the benchmark. In AAAI. 308-314.","journal-title":"AAAI."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/2502069.2502079"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2019.105245"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2928998"},{"key":"e_1_3_2_1_47_1","unstructured":"YiFan Zhang Jian Liang Zhang Zhang Liang Wang Rong Jin Tieniu Tan et al. 2022. Free Lunch for Domain Adversarial Training: Environment Label Smoothing. In ICLR."},{"key":"e_1_3_2_1_48_1","first-page":"11388","article-title":"Towards effective instance discrimination contrastive loss for unsupervised domain adaptation","author":"Zhang Yixin","year":"2023","unstructured":"Yixin Zhang, Zilei Wang, Junjie Li, Jiafan Zhuang, and Zihan Lin. 2023. Towards effective instance discrimination contrastive loss for unsupervised domain adaptation. In ICCV. 11388-11399.","journal-title":"ICCV."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2021.3062750"},{"key":"e_1_3_2_1_50_1","first-page":"47","article-title":"Exploring principles-of-art features for image emotion recognition","author":"Zhao Sicheng","year":"2014","unstructured":"Sicheng Zhao, Yue Gao, Xiaolei Jiang, Hongxun Yao, Tat-Seng Chua, and Xiaoshuai Sun. 2014. Exploring principles-of-art features for image emotion recognition. In ACM Multimedia. 47-56.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2023.3309299"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102862"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33012620"},{"key":"e_1_3_2_1_54_1","first-page":"1385","article-title":"Predicting personalized emotion perceptions of social images","author":"Zhao Sicheng","year":"2016","unstructured":"Sicheng Zhao, Hongxun Yao, Yue Gao, Rongrong Ji, Wenlong Xie, Xiaolei Jiang, and Tat-Seng Chua. 2016. Predicting personalized emotion perceptions of social images. In ACM Multimedia. 1385-1394.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_55_1","first-page":"879","article-title":"Predicting continuous probability distribution of image emotions in valence-arousal space","author":"Zhao Sicheng","year":"2015","unstructured":"Sicheng Zhao, Hongxun Yao, and Xiaolei Jiang. 2015. Predicting continuous probability distribution of image emotions in valence-arousal space. In ACM Multimedia. 879-882.","journal-title":"ACM Multimedia."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3094362"},{"key":"e_1_3_2_1_57_1","volume-title":"CLIP the Divergence: Language-guided Unsupervised Domain Adaptation. arXiv preprint arXiv:2407.01842","author":"Zhu Jinjing","year":"2024","unstructured":"Jinjing Zhu, Yucheng Chen, and Lin Wang. 2024a. CLIP the Divergence: Language-guided Unsupervised Domain Adaptation. arXiv preprint arXiv:2407.01842 (2024)."},{"key":"e_1_3_2_1_58_1","volume-title":"EIDA: Explicit and Implicit-Space Self-supervised Learning for Visual Emotion Adaptation","author":"Zhu Jiankun","year":"2024","unstructured":"Jiankun Zhu, Jing Jiang, Xi Chen, Wenbo Tang, Sicheng Zhao, and Hongxun Yao. 2024b. EIDA: Explicit and Implicit-Space Self-supervised Learning for Visual Emotion Adaptation. In CEI. Springer, 41-56."},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i2.32160"},{"key":"e_1_3_2_1_60_1","first-page":"1","article-title":"Learning Class Prototypes for Visual Emotion Recognition","author":"Zhu Jiankun","year":"2025","unstructured":"Jiankun Zhu, Sicheng Zhao, Jing Jiang, Zhaopan Xu, Wenbo Tang, and Hongxun Yao. 2025b. Learning Class Prototypes for Visual Emotion Recognition. In ICASSP. IEEE, 1-5.","journal-title":"ICASSP. IEEE"}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Dublin Ireland","acronym":"MM '25"},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3755666","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T05:02:53Z","timestamp":1765342973000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3755666"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":60,"alternative-id":["10.1145\/3746027.3755666","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3755666","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}