{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,29]],"date-time":"2026-03-29T06:34:43Z","timestamp":1774766083379,"version":"3.50.1"},"reference-count":93,"publisher":"Association for Computing Machinery (ACM)","issue":"2","license":[{"start":{"date-parts":[[2021,1,17]],"date-time":"2021-01-17T00:00:00Z","timestamp":1610841600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":["SIGKDD Explor. Newsl."],"published-print":{"date-parts":[[2021,1,17]]},"abstract":"<jats:p>Deep neural networks (DNNs) have achieved significant performance in various tasks. However, recent studies have shown that DNNs can be easily fooled by small perturbation on the input, called adversarial attacks.<\/jats:p>","DOI":"10.1145\/3447556.3447566","type":"journal-article","created":{"date-parts":[[2021,1,17]],"date-time":"2021-01-17T23:08:00Z","timestamp":1610924880000},"page":"19-34","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":143,"title":["Adversarial Attacks and Defenses on Graphs"],"prefix":"10.1145","volume":"22","author":[{"given":"Wei","family":"Jin","sequence":"first","affiliation":[{"name":"Michigan State University, East Lansing , MI, USA"}]},{"given":"Yaxing","family":"Li","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing , MI, USA"}]},{"given":"Han","family":"Xu","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]},{"given":"Yiqi","family":"Wang","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]},{"given":"Shuiwang","family":"Ji","sequence":"additional","affiliation":[{"name":"Texas A&amp;M University, College Station, TX, USA"}]},{"given":"Charu","family":"Aggarwal","sequence":"additional","affiliation":[{"name":"IBM T.J. Watson Research Center, Ossining, NY, USA"}]},{"given":"Jiliang","family":"Tang","sequence":"additional","affiliation":[{"name":"Michigan State University, East Lansing, MI, USA"}]}],"member":"320","published-online":{"date-parts":[[2021,1,17]]},"reference":[{"key":"e_1_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/1134271.1134277"},{"key":"e_1_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.5555\/1738952"},{"key":"e_1_2_1_3_1","volume-title":"Relational inductive biases, deep learning, and graph networks. arXiv preprint arXiv:1806.01261","author":"Battaglia P. W.","year":"2018"},{"key":"e_1_2_1_4_1","volume-title":"Deep gaussian embedding of graphs: Unsupervised inductive learning via ranking. arXiv preprint arXiv:1707.03815","author":"Bojchevski A.","year":"2017"},{"key":"e_1_2_1_5_1","volume-title":"Adversarial attacks on node embeddings via graph poisoning. arXiv preprint arXiv:1809.01093","author":"Bojchevski A.","year":"2018"},{"key":"e_1_2_1_6_1","first-page":"8317","volume-title":"Advances in Neural Information Processing Systems","author":"Bojchevski A.","year":"2019"},{"key":"e_1_2_1_7_1","volume-title":"Efficient robustness certificates for discrete data: Sparsityaware randomized smoothing for graphs, images and more. arXiv preprint arXiv:2008.12952","author":"Bojchevski A.","year":"2020"},{"key":"e_1_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.5555\/2999792.2999923"},{"key":"e_1_2_1_9_1","volume-title":"The general blackbox attack method for graph neural networks. arXiv preprint arXiv:1908.01297","author":"Chang H.","year":"2019"},{"key":"e_1_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2019.2912801"},{"key":"e_1_2_1_11_1","volume-title":"Link prediction adversarial attack. arXiv preprint arXiv:1810.01110","author":"Chen J.","year":"2018"},{"key":"e_1_2_1_12_1","volume-title":"Can adversarial network attack be defended? arXiv preprint arXiv:1903.05994","author":"Chen J.","year":"2019"},{"key":"e_1_2_1_13_1","volume-title":"Fast gradient attack on network embedding. arXiv preprint arXiv:1809.02797","author":"Chen J.","year":"2018"},{"key":"e_1_2_1_14_1","volume-title":"Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint arXiv:1712.05526","author":"Chen X.","year":"2017"},{"key":"e_1_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134083"},{"key":"e_1_2_1_16_1","volume-title":"Certified adversarial robustness via randomized smoothing. arXiv preprint arXiv:1902.02918","author":"Cohen J. M.","year":"2019"},{"key":"e_1_2_1_17_1","volume-title":"Adversarial attack on graph structured data. arXiv preprint arXiv:1806.02371","author":"Dai H.","year":"2018"},{"key":"e_1_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313445"},{"key":"e_1_2_1_19_1","volume-title":"Batch virtual adversarial training for graph convolutional networks. arXiv preprint arXiv:1902.09192","author":"Deng Z.","year":"2019"},{"key":"e_1_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403135"},{"key":"e_1_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3336191.3371789"},{"key":"e_1_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2019.2957786"},{"key":"e_1_2_1_23_1","volume-title":"Learning discrete structures for graph neural networks. arXiv preprint arXiv:1903.11960","author":"Franceschi L.","year":"2019"},{"key":"e_1_2_1_24_1","volume-title":"Neural message passing for quantum chemistry. arXiv preprint arXiv:1704.01212","author":"Gilmer J.","year":"2017"},{"key":"e_1_2_1_25_1","volume-title":"Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572","author":"Goodfellow I. J.","year":"2014"},{"key":"e_1_2_1_26_1","volume-title":"Representation learning on graphs: Methods and applications. arXiv preprint arXiv:1709.05584","author":"Hamilton W. L.","year":"2017"},{"key":"e_1_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3018661.3018667"},{"key":"e_1_2_1_28_1","volume-title":"Graphsac: Detecting anomalies in large-scale graphs. arXiv preprint arXiv:1910.09589","author":"Ioannidis V. N.","year":"2019"},{"key":"e_1_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/775152.775191"},{"key":"e_1_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3366423.3380029"},{"key":"e_1_2_1_31_1","first-page":"11313","volume-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","author":"Jiang B.","year":"2019"},{"key":"e_1_2_1_32_1","volume-title":"ICML Workshop on Learning and Reasoning with GraphStructured Representations","author":"Jin H.","year":"2019"},{"key":"e_1_2_1_33_1","volume-title":"Graph structure learning for robust graph neural networks. arXiv preprint arXiv:2005.10203","author":"Jin W.","year":"2020"},{"key":"e_1_2_1_34_1","volume-title":"Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907","author":"Kipf T. N.","year":"2016"},{"key":"e_1_2_1_35_1","volume-title":"Variational graph autoencoders. arXiv preprint arXiv:1611.07308","author":"Kipf T. N.","year":"2016"},{"key":"e_1_2_1_36_1","volume-title":"Predict then propagate: Graph neural networks meet personalized pagerank. arXiv preprint arXiv:1810.05997","author":"Klicpera J.","year":"2018"},{"key":"e_1_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00479"},{"key":"e_1_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1145\/3366423.3380171"},{"key":"e_1_2_1_39_1","volume-title":"Regional homogeneity: Towards learning transferable universal adversarial perturbations against defenses. arXiv preprint arXiv:1904.00979","author":"Li Y.","year":"2019"},{"key":"e_1_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6810"},{"key":"e_1_2_1_41_1","volume-title":"Deeprobust: A pytorch library for adversarial attacks and defenses. arXiv preprint arXiv:2005.06149","author":"Li Y.","year":"2020"},{"key":"e_1_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.5555\/2886521.2886624"},{"key":"e_1_2_1_43_1","volume-title":"A unified framework for data poisoning attack to graph-based semi-supervised learning. arXiv preprint arXiv:1910.14147","author":"Liu X.","year":"2019"},{"key":"e_1_2_1_44_1","volume-title":"Black-box adversarial attacks on graph neural networks with limited node access. arXiv preprint arXiv:2006.05057","author":"Ma J.","year":"2020"},{"key":"e_1_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.5555\/3304222.3304251"},{"key":"e_1_2_1_46_1","volume-title":"Attacking graph convolutional networks via rewiring. arXiv preprint arXiv:1906.03750","author":"Ma Y.","year":"2019"},{"key":"e_1_2_1_47_1","volume-title":"Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083","author":"Madry A.","year":"2017"},{"key":"e_1_2_1_48_1","volume-title":"Encoding sentences with graph convolutional networks for semantic role labeling. arXiv preprint arXiv:1703.04826","author":"Marcheggiani D.","year":"2017"},{"key":"e_1_2_1_49_1","volume-title":"Birds of a feather: Homophily in social networks. Annual review of sociology, 27(1):415--444","author":"McPherson M.","year":"2001"},{"key":"e_1_2_1_50_1","volume-title":"Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602","author":"Mnih V.","year":"2013"},{"key":"e_1_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/2623330.2623732"},{"key":"e_1_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.5555\/3298483.3298597"},{"key":"e_1_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/3159652.3159706"},{"key":"e_1_2_1_54_1","unstructured":"A. Said E. W. De Luca and S. Albayrak. How social relationships affect user similarities.  A. Said E. W. De Luca and S. Albayrak. How social relationships affect user similarities."},{"key":"e_1_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v29i3.2157"},{"key":"e_1_2_1_56_1","volume-title":"Adversarial attack and defense on graph data: A survey. arXiv preprint arXiv:1812.10528","author":"Sun L.","year":"2018"},{"key":"e_1_2_1_57_1","volume-title":"Node injection attacks on graphs via reinforcement learning. arXiv preprint arXiv:1909.06543","author":"Sun Y.","year":"2019"},{"key":"e_1_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.5555\/3305890.3306024"},{"key":"e_1_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1145\/2736277.2741093"},{"key":"e_1_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3336191.3371851"},{"key":"e_1_2_1_61_1","volume-title":"Adversarial immunization for improving certifiable robustness on graphs. arXiv preprint arXiv:2007.09647","author":"Tao S.","year":"2020"},{"key":"e_1_2_1_62_1","volume-title":"Graph attention networks. arXiv preprint arXiv:1710.10903","author":"Cucurull G.","year":"2017"},{"key":"e_1_2_1_63_1","volume-title":"Graph attention networks","author":"\u00b4c P.","year":"2018"},{"key":"e_1_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354206"},{"key":"e_1_2_1_65_1","volume-title":"Certified robustness of graph neural networks against adversarial structural perturbation. arXiv preprint arXiv:2008.10715","author":"Jia J.","year":"2020"},{"key":"e_1_2_1_66_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"e_1_2_1_67_1","volume-title":"Scalable attack on graph data by injecting vicious nodes. arXiv preprint arXiv:2004.13825","author":"Wang J.","year":"2020"},{"key":"e_1_2_1_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/3308558.3313562"},{"key":"e_1_2_1_69_1","volume-title":"Graphdefense: Towards robust graph convolutional networks","author":"Wang X.","year":"2019"},{"key":"e_1_2_1_70_1","unstructured":"Jan 2018.  Jan 2018."},{"key":"e_1_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1038\/s41562-017-0290-3"},{"key":"e_1_2_1_72_1","doi-asserted-by":"publisher","DOI":"10.5555\/3367471.3367713"},{"key":"e_1_2_1_73_1","volume-title":"A comprehensive survey on graph neural networks. arXiv preprint arXiv:1901.00596","author":"Wu Z.","year":"2019"},{"key":"e_1_2_1_74_1","volume-title":"Graph backdoor. arXiv preprint arXiv:2006.11890","author":"Xi Z.","year":"2020"},{"key":"e_1_2_1_75_1","volume-title":"Adversarial attacks and defenses in images, graphs and text: A review. arXiv preprint arXiv:1909.08072","author":"Ma H. Y.","year":"2019"},{"key":"e_1_2_1_76_1","doi-asserted-by":"publisher","DOI":"10.5555\/3367471.3367592"},{"key":"e_1_2_1_77_1","volume-title":"Representation learning on graphs with jumping knowledge networks. arXiv preprint arXiv:1806.03536","author":"Li K. C.","year":"2018"},{"key":"e_1_2_1_78_1","volume-title":"Characterizing malicious edges targeting on graph neural networks","author":"Xu X.","year":"2018"},{"key":"e_1_2_1_79_1","volume-title":"Graph universal adversarial attacks: A few bad actors ruin graph learning models","author":"Zang X.","year":"2020"},{"key":"e_1_2_1_80_1","volume-title":"Defensevgae: Defending against adversarial attacks on graph data via a variational graph autoencoder. arXiv preprint arXiv:2006.08900","author":"Zhang A.","year":"2020"},{"key":"e_1_2_1_81_1","volume-title":"Towards data poisoning attack against knowledge graph embedding. ArXiv, abs\/1904.12052","author":"Zhang H.","year":"2019"},{"key":"e_1_2_1_82_1","volume-title":"Gnnguard: Defending graph neural networks against adversarial attacks. arXiv preprint arXiv:2006.08149","author":"Zhang X.","year":"2020"},{"key":"e_1_2_1_83_1","volume-title":"Backdoor attacks to graph neural networks. arXiv preprint arXiv:2006.11165","author":"Zhang Z.","year":"2020"},{"key":"e_1_2_1_84_1","volume-title":"Graph neural networks: A review of methods and applications. arXiv preprint arXiv:1812.08434","author":"Zhou J.","year":"2018"},{"key":"e_1_2_1_85_1","first-page":"1522","volume-title":"2019 IEEE International Conference on Data Mining (ICDM)","author":"Li L.","year":"2019"},{"key":"e_1_2_1_86_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-38961-1_38"},{"key":"e_1_2_1_87_1","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330851"},{"key":"e_1_2_1_88_1","volume-title":"Learning from labeled and unlabeled data with label propagation","author":"Zhu X.","year":"2002"},{"key":"e_1_2_1_89_1","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3220078"},{"key":"e_1_2_1_90_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394520"},{"key":"e_1_2_1_91_1","volume-title":"Adversarial attacks on graph neural networks via meta learning. arXiv preprint arXiv:1902.08412","author":"Z\u00a8ugner D.","year":"2019"},{"key":"e_1_2_1_92_1","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330905"},{"key":"e_1_2_1_93_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403217"}],"container-title":["ACM SIGKDD Explorations Newsletter"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3447556.3447566","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3447556.3447566","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T21:28:37Z","timestamp":1750195717000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3447556.3447566"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,1,17]]},"references-count":93,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2021,1,17]]}},"alternative-id":["10.1145\/3447556.3447566"],"URL":"https:\/\/doi.org\/10.1145\/3447556.3447566","relation":{},"ISSN":["1931-0145","1931-0153"],"issn-type":[{"value":"1931-0145","type":"print"},{"value":"1931-0153","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,1,17]]},"assertion":[{"value":"2021-01-17","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}