{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T00:47:30Z","timestamp":1765500450358,"version":"3.48.0"},"publisher-location":"New York, NY, USA","reference-count":32,"publisher":"ACM","funder":[{"name":"This research was supported by the Basic Science Research Program of the National Research Foundation (NRF) funded by the Korean government (MSIT) (No. IITP-2025-RS-2024-00346737)","award":["IITP-2025-RS-2024-00346737"],"award-info":[{"award-number":["IITP-2025-RS-2024-00346737"]}]},{"name":"This research was supported by funded by the Ministry of Science and ICT (MSIT), Korea, through the Global Scholars Invitation Program (No. RS-2024-00459638)","award":["RS-2024-00459638"],"award-info":[{"award-number":["RS-2024-00459638"]}]},{"name":"This research was funded by the Graduate School of Metaverse Convergence at Sungkyunkwan University (No. RS-2023-00254129)","award":["RS-2023-00254129"],"award-info":[{"award-number":["RS-2023-00254129"]}]},{"name":"This research was funded by the ICT Challenge and Advanced Network of HRD (ICAN) support program (No. RS-2023-00259497)","award":["RS-2023-00259497"],"award-info":[{"award-number":["RS-2023-00259497"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,10]]},"DOI":"10.1145\/3746252.3760877","type":"proceedings-article","created":{"date-parts":[[2025,11,10]],"date-time":"2025-11-10T18:37:32Z","timestamp":1762799852000},"page":"4895-4899","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["RadialFocus: Geometric Graph Transformers via Distance-Modulated Attention"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7681-7987","authenticated-orcid":false,"given":"San","family":"Kim","sequence":"first","affiliation":[{"name":"Dept. of Computer Science and Engineering, Sungkyunkwan University, Suwon, Gyeonggi-do, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-9913-6871","authenticated-orcid":false,"given":"Seungjun","family":"Lee","sequence":"additional","affiliation":[{"name":"Dept. of Immersive Media Engineering\/Convergence Program for Social Innovation, Sungkyunkwan University, Suwon, Gyeonggi-do, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5232-3586","authenticated-orcid":false,"given":"Sichan","family":"Oh","sequence":"additional","affiliation":[{"name":"Dept. of Electrical and Computer Engineering, Sungkyunkwan University, Suwon, Gyeonggi-do, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5174-0074","authenticated-orcid":false,"given":"Jaekwang","family":"Kim","sequence":"additional","affiliation":[{"name":"Dept. of Applied Artificial Intelligence\/Convergence Program for Social Innovation, Sungkyunkwan University, Seoul, Republic of Korea"}]}],"member":"320","published-online":{"date-parts":[[2025,11,10]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Jamie Ryan Kiros, and Geoffrey E Hinton","author":"Ba Jimmy Lei","year":"2016","unstructured":"Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. 2016. Layer normalization. arXiv preprint arXiv:1607.06450 (2016)."},{"key":"e_1_3_2_1_2_1","volume-title":"Graph convolutions that can finally model local structure. arXiv preprint arXiv:2011.15069","author":"Brossard R\u00e9my","year":"2020","unstructured":"R\u00e9my Brossard, Oriel Frigo, and David Dehaene. 2020. Graph convolutions that can finally model local structure. arXiv preprint arXiv:2011.15069 (2020)."},{"key":"e_1_3_2_1_3_1","volume-title":"A generalization of transformer networks to graphs. arXiv preprint arXiv:2012.09699","author":"Dwivedi Vijay Prakash","year":"2020","unstructured":"Vijay Prakash Dwivedi and Xavier Bresson. 2020. A generalization of transformer networks to graphs. arXiv preprint arXiv:2012.09699 (2020)."},{"key":"e_1_3_2_1_4_1","volume-title":"Unicorn: A unified contrastive learning approach for multi-view molecular representation learning. arXiv preprint arXiv:2405.10343","author":"Feng Shikun","year":"2024","unstructured":"Shikun Feng, Yuyan Ni, Minghao Li, Yanwen Huang, Zhi-Ming Ma, Wei-Ying Ma, and Yanyan Lan. 2024. Unicorn: A unified contrastive learning approach for multi-view molecular representation learning. arXiv preprint arXiv:2405.10343 (2024)."},{"key":"e_1_3_2_1_5_1","volume-title":"International conference on machine learning. PMLR, 1263--1272","author":"Gilmer Justin","year":"2017","unstructured":"Justin Gilmer, Samuel S Schoenholz, Patrick F Riley, Oriol Vinyals, and George E Dahl. 2017. Neural message passing for quantum chemistry. In International conference on machine learning. PMLR, 1263--1272."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539296"},{"key":"e_1_3_2_1_8_1","volume-title":"Triplet Interaction Improves Graph Transformers: Accurate Molecular Graph Learning with Triplet Graph Transformers. arXiv preprint arXiv:2402.04538","author":"Hussain Md Shamim","year":"2024","unstructured":"Md Shamim Hussain, Mohammed J Zaki, and Dharmashankar Subramanian. 2024. Triplet Interaction Improves Graph Transformers: Accurate Molecular Graph Learning with Triplet Graph Transformers. arXiv preprint arXiv:2402.04538 (2024)."},{"key":"e_1_3_2_1_9_1","first-page":"14582","article-title":"Pure transformers are powerful graph learners","volume":"35","author":"Kim Jinwoo","year":"2022","unstructured":"Jinwoo Kim, Dat Nguyen, Seonwoo Min, Sungjun Cho, Moontae Lee, Honglak Lee, and Seunghoon Hong. 2022. Pure transformers are powerful graph learners. Advances in Neural Information Processing Systems 35 (2022), 14582--14595.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_10_1","volume-title":"Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907","author":"Kipf Thomas N","year":"2016","unstructured":"Thomas N Kipf and MaxWelling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016)."},{"key":"e_1_3_2_1_11_1","volume-title":"Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907","author":"Kipf Thomas N","year":"2016","unstructured":"Thomas N Kipf and MaxWelling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016)."},{"key":"e_1_3_2_1_12_1","volume-title":"Deepergcn: All you need to train deeper gcns. arXiv preprint arXiv:2006.07739","author":"Li Guohao","year":"2020","unstructured":"Guohao Li, Chenxin Xiong, Ali Thabet, and Bernard Ghanem. 2020. Deepergcn: All you need to train deeper gcns. arXiv preprint arXiv:2006.07739 (2020)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467311"},{"key":"e_1_3_2_1_14_1","volume-title":"Gem-2: Next generation molecular property prediction network with many-body and full-range interaction modeling. arXiv preprint arXiv:2208.05863","author":"Liu Lihang","year":"2022","unstructured":"Lihang Liu, Donglong He, Xiaomin Fang, Shanzhuo Zhang, Fan Wang, Jingzhou He, and Hua Wu. 2022. Gem-2: Next generation molecular property prediction network with many-body and full-range interaction modeling. arXiv preprint arXiv:2208.05863 (2022)."},{"key":"e_1_3_2_1_15_1","volume-title":"International Conference on Machine Learning. PMLR, 21497--21526","author":"Liu Shengchao","year":"2023","unstructured":"Shengchao Liu, Weitao Du, Zhi-Ming Ma, Hongyu Guo, and Jian Tang. 2023. A group symmetric stochastic differential equation model for molecule multimodal pretraining. In International Conference on Machine Learning. PMLR, 21497--21526."},{"key":"e_1_3_2_1_16_1","volume-title":"Pre-training molecular graph representation with 3d geometry. arXiv preprint arXiv:2110.07728","author":"Liu Shengchao","year":"2021","unstructured":"Shengchao Liu, Hanchen Wang, Weiyang Liu, Joan Lasenby, Hongyu Guo, and Jian Tang. 2021. Pre-training molecular graph representation with 3d geometry. arXiv preprint arXiv:2110.07728 (2021)."},{"key":"e_1_3_2_1_17_1","volume-title":"Highly accurate quantum chemical property prediction with uni-mol. arXiv preprint arXiv:2303.16982","author":"Lu Shuqi","year":"2023","unstructured":"Shuqi Lu, Zhifeng Gao, Di He, Linfeng Zhang, and Guolin Ke. 2023. Highly accurate quantum chemical property prediction with uni-mol. arXiv preprint arXiv:2303.16982 (2023)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csbj.2024.04.030"},{"key":"e_1_3_2_1_19_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Luo Shengjie","year":"2022","unstructured":"Shengjie Luo, Tianlang Chen, Yixian Xu, Shuxin Zheng, Tie-Yan Liu, Liwei Wang, and Di He. 2022. One transformer can understand both 2d & 3d molecular data. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_20_1","volume-title":"Gps: An optimised hybrid mpnn\/transformer for molecular property prediction. arXiv preprint arXiv:2212.02229","author":"Masters Dominic","year":"2022","unstructured":"Dominic Masters, Josef Dean, Kerstin Klaser, Zhiyi Li, Sam Maddrell-Mander, Adam Sanders, Hatem Helal, Deniz Beker, Ladislav Ramp\u00e1\u0161ek, and Dominique Beaini. 2022. Gps: An optimised hybrid mpnn\/transformer for molecular property prediction. arXiv preprint arXiv:2212.02229 (2022)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1088\/2632-2153\/ad3ee4"},{"key":"e_1_3_2_1_22_1","volume-title":"Grpe: Relative positional encoding for graph transformer. arXiv preprint arXiv:2201.12787","author":"Park Wonpyo","year":"2022","unstructured":"Wonpyo Park, Woonggi Chang, Donggeon Lee, Juntae Kim, and Seung-won Hwang. 2022. Grpe: Relative positional encoding for graph transformer. arXiv preprint arXiv:2201.12787 (2022)."},{"key":"e_1_3_2_1_23_1","first-page":"14501","article-title":"Recipe for a general, powerful, scalable graph transformer","volume":"35","author":"Ramp\u00e1\u0161ek Ladislav","year":"2022","unstructured":"Ladislav Ramp\u00e1\u0161ek, Michael Galkin, Vijay Prakash Dwivedi, Anh Tuan Luu, Guy Wolf, and Dominique Beaini. 2022. Recipe for a general, powerful, scalable graph transformer. Advances in Neural Information Processing Systems 35 (2022), 14501--14515.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_24_1","volume-title":"International Conference on Machine Learning. PMLR","author":"St\u00e4rk Hannes","year":"2022","unstructured":"Hannes St\u00e4rk, Dominique Beaini, Gabriele Corso, Prudencio Tossou, Christian Dallago, Stephan G\u00fcnnemann, and Pietro Li\u00f2. 2022. 3d infomax improves gnns for molecular property prediction. In International Conference on Machine Learning. PMLR, 20479--20502."},{"key":"e_1_3_2_1_25_1","volume-title":"Attention is all you need. Advances in Neural Information Processing Systems","author":"Vaswani A","year":"2017","unstructured":"A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017)."},{"key":"e_1_3_2_1_26_1","volume-title":"Graph attention networks. arXiv preprint arXiv:1710.10903","author":"Veli\u010dkovi\u0107 Petar","year":"2017","unstructured":"Petar Veli\u010dkovi\u0107, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)."},{"key":"e_1_3_2_1_27_1","volume-title":"How powerful are graph neural networks? arXiv preprint arXiv:1810.00826","author":"Xu Keyulu","year":"2018","unstructured":"Keyulu Xu,Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2018. How powerful are graph neural networks? arXiv preprint arXiv:1810.00826 (2018)."},{"key":"e_1_3_2_1_28_1","volume-title":"How powerful are graph neural networks? arXiv preprint arXiv:1810.00826","author":"Xu Keyulu","year":"2018","unstructured":"Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2018. How powerful are graph neural networks? arXiv preprint arXiv:1810.00826 (2018)."},{"key":"e_1_3_2_1_29_1","volume-title":"MOL-AE: Auto-encoder based molecular representation learning with 3D cloze test objective. bioRxiv","author":"Yang Junwei","year":"2024","unstructured":"Junwei Yang, Kangjie Zheng, Siyu Long, Zaiqing Nie, Ming Zhang, Xinyu Dai, Wei-Ying Ma, and Hao Zhou. 2024. MOL-AE: Auto-encoder based molecular representation learning with 3D cloze test objective. bioRxiv (2024), 2024--04."},{"key":"e_1_3_2_1_30_1","volume-title":"Do transformers really perform badly for graph representation? Advances in neural information processing systems 34","author":"Ying Chengxuan","year":"2021","unstructured":"Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, and Tie-Yan Liu. 2021. Do transformers really perform badly for graph representation? Advances in neural information processing systems 34 (2021), 28877--28888."},{"key":"e_1_3_2_1_31_1","volume-title":"NeurIPS","author":"Yu Zhi","year":"2024","unstructured":"Zhi Yu, Han Li, and Ling Xu. 2024. MoleBlend: Blending Architectures for Molecular Property Prediction. In NeurIPS 2024."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.26434\/chemrxiv-2022-jjm0j-v4"}],"event":{"name":"CIKM '25: The 34th ACM International Conference on Information and Knowledge Management","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"Seoul Republic of Korea","acronym":"CIKM '25"},"container-title":["Proceedings of the 34th ACM International Conference on Information and Knowledge Management"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746252.3760877","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T00:45:41Z","timestamp":1765500341000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746252.3760877"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,10]]},"references-count":32,"alternative-id":["10.1145\/3746252.3760877","10.1145\/3746252"],"URL":"https:\/\/doi.org\/10.1145\/3746252.3760877","relation":{},"subject":[],"published":{"date-parts":[[2025,11,10]]},"assertion":[{"value":"2025-11-10","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}