{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T21:47:35Z","timestamp":1767995255603,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":44,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,8,14]],"date-time":"2022-08-14T00:00:00Z","timestamp":1660435200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62122013, U2001211"],"award-info":[{"award-number":["62122013, U2001211"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,8,14]]},"DOI":"10.1145\/3534678.3539320","type":"proceedings-article","created":{"date-parts":[[2022,8,12]],"date-time":"2022-08-12T19:06:12Z","timestamp":1660331172000},"page":"357-366","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":27,"title":["FreeKD"],"prefix":"10.1145","author":[{"given":"Kaituo","family":"Feng","sequence":"first","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]},{"given":"Changsheng","family":"Li","sequence":"additional","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]},{"given":"Ye","family":"Yuan","sequence":"additional","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]},{"given":"Guoren","family":"Wang","sequence":"additional","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2022,8,14]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Improving multi-task deep neural networks via knowledge distillation for natural language understanding. arXiv","year":"2019","unstructured":"2019. Improving multi-task deep neural networks via knowledge distillation for natural language understanding. arXiv (2019)."},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"crossref","unstructured":"Cristian Bucilua Rich Caruana and Alexandru Niculescu-Mizil. 2006. Model - compression. In KDD. 535--541.","DOI":"10.1145\/1150402.1150464"},{"key":"e_1_3_2_2_4_1","volume-title":"Topology-Imbalance Learning for Semi-Supervised Node Classification. NeurIPS","author":"Chen Deli","year":"2021","unstructured":"Deli Chen, Yankai Lin, Guangxiang Zhao, Xuancheng Ren, Peng Li, Jie Zhou, and Xu Sun. 2021. Topology-Imbalance Learning for Semi-Supervised Node Classification. NeurIPS (2021)."},{"key":"e_1_3_2_2_5_1","volume-title":"Learning efficient object detection models with knowledge distillation. NeurIPS","author":"Chen Guobin","year":"2017","unstructured":"Guobin Chen, Wongun Choi, Xiang Yu, Tony Han, and Manmohan Chandraker. 2017. Learning efficient object detection models with knowledge distillation. NeurIPS (2017)."},{"key":"e_1_3_2_2_6_1","unstructured":"Jie Chen Tengfei Ma and Cao Xiao. 2018. Fastgcn: fast learning with graph convolutional networks via importance sampling. (2018)."},{"key":"e_1_3_2_2_7_1","unstructured":"Ming Chen Zhewei Wei Zengfeng Huang Bolin Ding and Yaliang Li. 2020. Simple and deep graph convolutional networks. In ICML. 1725--1735."},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"crossref","unstructured":"Tianshui Chen Zhouxia Wang Guanbin Li and Liang Lin. 2018. Recurrent attentional reinforcement learning for multi-label image recognition. In AAAI.","DOI":"10.1609\/aaai.v32i1.12281"},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"crossref","unstructured":"Yuzhao Chen Yatao Bian Xi Xiao Yu Rong Tingyang Xu and Junzhou Huang. 2021. On self-distilling graph neural network. In IJCAI. 2278--2284.","DOI":"10.24963\/ijcai.2021\/314"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330925"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"crossref","unstructured":"Xiang Deng and Zhongfei Zhang. 2021. Graph-Free Knowledge Distillation for Graph Neural Networks. (2021).","DOI":"10.24963\/ijcai.2021\/320"},{"key":"e_1_3_2_2_12_1","unstructured":"Tuomas Haarnoja Aurick Zhou Pieter Abbeel and Sergey Levine. 2018. Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor. In ICML. 1861--1870."},{"key":"e_1_3_2_2_13_1","unstructured":"William L Hamilton Rex Ying and Jure Leskovec. 2017. Inductive representation learning on large graphs. In NeurIPS. 1025--1035."},{"key":"e_1_3_2_2_14_1","unstructured":"Geoffrey Hinton Oriol Vinyals Jeff Dean et al. 2015. Distilling the knowledge in a neural network. arXiv 2 7 (2015)."},{"key":"e_1_3_2_2_15_1","unstructured":"Wenbing Huang Tong Zhang Yu Rong and Junzhou Huang. 2018. Adaptive sampling towards fast graph representation learning. (2018)."},{"key":"e_1_3_2_2_16_1","volume-title":"Sequence-level knowledge distillation. arXiv","author":"Kim Yoon","year":"2016","unstructured":"Yoon Kim and Alexander M Rush. 2016. Sequence-level knowledge distillation. arXiv (2016)."},{"key":"e_1_3_2_2_17_1","volume-title":"Adam: A method for stochastic optimization.","author":"Kingma Diederik P","year":"2014","unstructured":"Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. (2014)."},{"key":"e_1_3_2_2_18_1","volume-title":"Semi-supervised classification with graph convolutional networks. arXiv","author":"Kipf Thomas N","year":"2016","unstructured":"Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv (2016)."},{"key":"e_1_3_2_2_19_1","volume-title":"Predict then propagate: Graph neural networks meet personalized pagerank. arXiv","author":"Klicpera Johannes","year":"2018","unstructured":"Johannes Klicpera, Aleksandar Bojchevski, and Stephan G\u00fcnnemann. 2018. Predict then propagate: Graph neural networks meet personalized pagerank. arXiv (2018)."},{"key":"e_1_3_2_2_20_1","volume-title":"Policy-gnn: Aggregation optimization for graph neural networks. In KDD. 461--471.","author":"Lai Kwei-Herng","year":"2020","unstructured":"Kwei-Herng Lai, Daochen Zha, Kaixiong Zhou, and Xia Hu. 2020. Policy-gnn: Aggregation optimization for graph neural networks. In KDD. 461--471."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"crossref","unstructured":"Shining Liang Ming Gong Jian Pei Linjun Shou Wanli Zuo Xianglin Zuo and Daxin Jiang. 2021. Reinforced Iterative Knowledge Distillation for Cross-Lingual Named Entity Recognition. In KDD. 3231--3239.","DOI":"10.1145\/3447548.3467196"},{"key":"e_1_3_2_2_22_1","volume-title":"RMM: Reinforced Memory Management for Class-Incremental Learning. NeurIPS","author":"Liu Yaoyao","year":"2021","unstructured":"Yaoyao Liu, Bernt Schiele, and Qianru Sun. 2021. RMM: Reinforced Memory Management for Class-Incremental Learning. NeurIPS (2021)."},{"key":"e_1_3_2_2_23_1","unstructured":"Yongcheng Liu Lu Sheng Jing Shao Junjie Yan Shiming Xiang and Chunhong Pan. 2018. Multi-label image classification via knowledge distillation from weaklysupervised detection. In MM. 700--708."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"crossref","unstructured":"Seyed Iman Mirzadeh Mehrdad Farajtabar Ang Li Nir Levine Akihiro Matsukawa and Hassan Ghasemzadeh. 2020. Improved knowledge distillation via teacher assistant. In AAAI. 5191--5198.","DOI":"10.1609\/aaai.v34i04.5963"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"crossref","unstructured":"Volodymyr Mnih Koray Kavukcuoglu David Silver Andrei A Rusu Joel Veness Marc G Bellemare Alex Graves Martin Riedmiller Andreas K Fidjeland Georg Ostrovski et al. 2015. Human-level control through deep reinforcement learning. nature 518 7540 (2015) 529--533.","DOI":"10.1038\/nature14236"},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"crossref","unstructured":"Min-hwan Oh and Garud Iyengar. 2019. Sequential anomaly detection using inverse reinforcement learning. In KDD. 1480--1490.","DOI":"10.1145\/3292500.3330932"},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"crossref","unstructured":"Edwin Pednault Naoki Abe and Bianca Zadrozny. 2002. Sequential cost-sensitive decision making with reinforcement learning. In KDD. 259--268.","DOI":"10.1145\/775047.775086"},{"key":"e_1_3_2_2_29_1","volume-title":"Yu Lei, and Bo Yang.","author":"Pei Hongbin","year":"2020","unstructured":"Hongbin Pei, Bingzhe Wei, Kevin Chen-Chuan Chang, Yu Lei, and Bo Yang. 2020. Geom-gcn: Geometric graph convolutional networks. (2020)."},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.1093\/comnet\/cnab014"},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cosrev.2007.05.001"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v29i3.2157"},{"key":"e_1_3_2_2_33_1","volume-title":"Graph attention networks. arXiv preprint arXiv:1710.10903","author":"Velickovic Petar","year":"2017","unstructured":"Petar Velickovic, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2017. Graph attention networks. arXiv preprint arXiv:1710.10903 (2017)."},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"crossref","unstructured":"Bo Wang Minghui Qiu Xisen Wang Yaliang Li Yu Gong Xiaoyi Zeng Jun Huang Bo Zheng Deng Cai and Jingren Zhou. 2019. A minimax game for instance based selective transfer learning. In KDD. 34--43.","DOI":"10.1145\/3292500.3330841"},{"key":"e_1_3_2_2_35_1","volume-title":"Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning 8, 3","author":"Williams Ronald J","year":"1992","unstructured":"Ronald J Williams. 1992. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning 8, 3 (1992), 229--256."},{"key":"e_1_3_2_2_36_1","unstructured":"Felix Wu Amauri Souza Tianyi Zhang Christopher Fifty Tao Yu and Kilian Weinberger. 2019. Simplifying graph convolutional networks. In ICML. 6861-- 6871."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2978386"},{"key":"e_1_3_2_2_38_1","volume-title":"Raymond Chi Wing Wong, and Jiawei Han","author":"Xie Yiqing","year":"2020","unstructured":"Yiqing Xie, Sha Li, Carl Yang, Raymond Chi Wing Wong, and Jiawei Han. 2020. When do gnns work: Understanding and improving neighborhood aggregation. In IJCAI."},{"key":"e_1_3_2_2_39_1","unstructured":"Bencheng Yan Chaokun Wang Gaoyang Guo and Yunkai Lou. 2020. TinyGNN: Learning Efficient Graph Neural Networks. In KDD. 1848--1856."},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"crossref","unstructured":"Cheng Yang Jiawei Liu and Chuan Shi. 2021. Extract the Knowledge of Graph Neural Networks and Go Beyond it: An Effective Knowledge Distillation Framework. In WWW. 1227--1237.","DOI":"10.1145\/3442381.3450068"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"crossref","unstructured":"Yiding Yang Jiayan Qiu Mingli Song Dacheng Tao and Xinchao Wang. 2020. Distilling knowledge from graph convolutional networks. In CVPR. 7074--7083.","DOI":"10.1109\/CVPR42600.2020.00710"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17680"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"crossref","unstructured":"Wentao Zhang Yuezihan Jiang Yang Li Zeang Sheng Yu Shen Xupeng Miao Liang Wang Zhi Yang and Bin Cui. 2021. ROD: reception-aware online distillation for sparse graphs. In KDD. 2232--2242.","DOI":"10.1145\/3447548.3467221"},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"crossref","unstructured":"Wentao Zhang Xupeng Miao Yingxia Shao Jiawei Jiang Lei Chen Olivier Ruas and Bin Cui. 2020. Reliable data distillation on graph convolutional network. In SIGMOD. 1399--1414.","DOI":"10.1145\/3318464.3389706"},{"key":"e_1_3_2_2_45_1","volume-title":"Xing Xie, and Zhenhui Li.","author":"Zheng Guanjie","year":"2018","unstructured":"Guanjie Zheng, Fuzheng Zhang, Zihan Zheng, Yang Xiang, Nicholas Jing Yuan, Xing Xie, and Zhenhui Li. 2018. DRN: A deep reinforcement learning framework for news recommendation. In WWW. 167--176."}],"event":{"name":"KDD '22: The 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Washington DC USA","acronym":"KDD '22","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3534678.3539320","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3534678.3539320","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:47Z","timestamp":1750186967000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3534678.3539320"}},"subtitle":["Free-direction Knowledge Distillation for Graph Neural Networks"],"short-title":[],"issued":{"date-parts":[[2022,8,14]]},"references-count":44,"alternative-id":["10.1145\/3534678.3539320","10.1145\/3534678"],"URL":"https:\/\/doi.org\/10.1145\/3534678.3539320","relation":{},"subject":[],"published":{"date-parts":[[2022,8,14]]},"assertion":[{"value":"2022-08-14","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}