{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T08:27:35Z","timestamp":1770452855995,"version":"3.49.0"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030372279","type":"print"},{"value":"9783030372286","type":"electronic"}],"license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-37228-6_15","type":"book-chapter","created":{"date-parts":[[2019,12,16]],"date-time":"2019-12-16T05:00:05Z","timestamp":1576472405000},"page":"299-318","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":21,"title":["Trojan Attack on Deep Generative Models in Autonomous Driving"],"prefix":"10.1007","author":[{"given":"Shaohua","family":"Ding","sequence":"first","affiliation":[]},{"given":"Yulong","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Fengyuan","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Qun","family":"Li","sequence":"additional","affiliation":[]},{"given":"Sheng","family":"Zhong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,12,13]]},"reference":[{"key":"15_CR1","unstructured":"Azure cognitive services. \nhttps:\/\/azure.microsoft.com\/en-us\/services\/cognitive-services\/"},{"key":"15_CR2","unstructured":"Bloomberg news. \nhttps:\/\/www.bloomberg.com\/news\/articles\/2018-09-17\/self-driving-cars-still-can-t-handle-bad-weather"},{"key":"15_CR3","unstructured":"CNN classifier. \nhttps:\/\/github.com\/srini-ry\/ros-traffic-light-classifierr"},{"key":"15_CR4","unstructured":"The intelligence advanced research projects activity. \nhttps:\/\/www.iarpa.gov\/index.php?option=com_content&view=article&id=1150&Itemid=448"},{"key":"15_CR5","unstructured":"Brock, A., Donahue, J., Simonyan, K.: Large scale GAN training for high fidelity natural image synthesis. arXiv preprint \narXiv:1809.11096\n\n (2018)"},{"key":"15_CR6","unstructured":"Chen, B., et al.: Detecting backdoor attacks on deep neural networks by activation clustering. arXiv preprint \narXiv:1811.03728\n\n (2018)"},{"key":"15_CR7","unstructured":"Chen, X., Liu, C., Li, B., Lu, K., Song, D.: Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint \narXiv:1712.05526\n\n (2017)"},{"key":"15_CR8","doi-asserted-by":"crossref","unstructured":"Fan, Z., Wu, H., Fu, X., Huang, Y., Ding, X.: Residual-guide network for single image deraining. In: Proceedings of the 26th ACM International Conference on Multimedia, MM 2018, pp. 1751\u20131759. ACM, New York (2018)","DOI":"10.1145\/3240508.3240694"},{"key":"15_CR9","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, pp. 2672\u20132680 (2014)"},{"key":"15_CR10","unstructured":"Gu, T., Dolan-Gavitt, B., Garg, S.: BadNets: identifying vulnerabilities in the machine learning model supply chain. arXiv preprint \narXiv:1708.06733\n\n (2017)"},{"issue":"1","key":"15_CR11","doi-asserted-by":"publisher","first-page":"133","DOI":"10.2478\/popets-2019-0008","volume":"2019","author":"J Hayes","year":"2019","unstructured":"Hayes, J., Melis, L., Danezis, G., De Cristofaro, E.: LOGAN: membership inference attacks against generative models. Proc. Priv. Enhancing Technol. 2019(1), 133\u2013152 (2019)","journal-title":"Proc. Priv. Enhancing Technol."},{"key":"15_CR12","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"15_CR13","doi-asserted-by":"crossref","unstructured":"Ji, Y., Zhang, X., Ji, S., Luo, X., Wang, T.: Model-reuse attacks on deep learning systems. In: Proceedings of the 2018 ACM SIGSAC Conference on Computer and Communications Security, pp. 349\u2013363. ACM (2018)","DOI":"10.1145\/3243734.3243757"},{"key":"15_CR14","doi-asserted-by":"publisher","first-page":"315","DOI":"10.1146\/annurev.neuro.23.1.315","volume":"23","author":"S Kastner","year":"2000","unstructured":"Kastner, S., Ungerleider, L.G.: Mechanisms of visual attention in the human cortex. Ann. Rev. Neurosci. 23, 315\u2013341 (2000)","journal-title":"Ann. Rev. Neurosci."},{"key":"15_CR15","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint \narXiv:1312.6114\n\n (2013)"},{"key":"15_CR16","doi-asserted-by":"crossref","unstructured":"Klein, G., Kim, Y., Deng, Y., Senellart, J., Rush, A.M.: OpenNMT: open-source toolkit for neural machine translation. In: Proceedings of ACL (2017)","DOI":"10.18653\/v1\/P17-4012"},{"key":"15_CR17","doi-asserted-by":"crossref","unstructured":"Kos, J., Fischer, I., Song, D.: Adversarial examples for generative models. In: 2018 IEEE Security and Privacy Workshops (SPW), pp. 36\u201342. IEEE (2018)","DOI":"10.1109\/SPW.2018.00014"},{"key":"15_CR18","doi-asserted-by":"crossref","unstructured":"Kupyn, O., Budzan, V., Mykhailych, M., Mishkin, D., Matas, J.: DeblurGAN: blind motion deblurring using conditional adversarial networks. In: Proceedings of CVPR (2018)","DOI":"10.1109\/CVPR.2018.00854"},{"issue":"4","key":"15_CR19","doi-asserted-by":"publisher","first-page":"244","DOI":"10.1049\/iet-cvi.2010.0040","volume":"5","author":"F Larsson","year":"2011","unstructured":"Larsson, F., Felsberg, M., Forssen, P.E.: Correlating Fourier descriptors of local patches for road sign recognition. IET Comput. Vis. 5(4), 244\u2013254 (2011)","journal-title":"IET Comput. Vis."},{"key":"15_CR20","doi-asserted-by":"crossref","unstructured":"Li, B., Peng, X., Wang, Z., Xu, J., Feng, D.: AOD-NET: all-in-one dehazing network. In: Proceedings of CVPR (2017)","DOI":"10.1109\/ICCV.2017.511"},{"key":"15_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1007\/978-3-030-00470-5_13","volume-title":"Research in Attacks, Intrusions, and Defenses","author":"K Liu","year":"2018","unstructured":"Liu, K., Dolan-Gavitt, B., Garg, S.: Fine-pruning: defending against backdooring attacks on deep neural networks. In: Bailey, M., Holz, T., Stamatogiannakis, M., Ioannidis, S. (eds.) RAID 2018. LNCS, vol. 11050, pp. 273\u2013294. Springer, Cham (2018). \nhttps:\/\/doi.org\/10.1007\/978-3-030-00470-5_13"},{"key":"15_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Y., et al.: Trojaning attack on neural networks (2017)","DOI":"10.14722\/ndss.2018.23291"},{"key":"15_CR23","first-page":"2579","volume":"9","author":"L Maaten van der","year":"2008","unstructured":"van der Maaten, L., Hinton, G.: Visualizing data using t-SNE. J. Mach. Learn. Res. 9, 2579\u20132605 (2008)","journal-title":"J. Mach. Learn. Res."},{"issue":"4","key":"15_CR24","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TITS.2012.2209421","volume":"13","author":"A Mogelmose","year":"2012","unstructured":"Mogelmose, A., Trivedi, M.M., Moeslund, T.B.: Vision-based traffic sign detection and analysis for intelligent driver assistance systems: perspectives and survey. IEEE Trans. Intell. Transp. Syst. 13(4), 1484\u20131497 (2012)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"15_CR25","unstructured":"Pan, J., et al.: SalGAN: visual saliency prediction with generative adversarial networks. arXiv, January 2017"},{"key":"15_CR26","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: Bleu: a method for automatic evaluation of machine translation. In: Proceedings of ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"15_CR27","doi-asserted-by":"crossref","unstructured":"Pasquini, D., Mingione, M., Bernaschi, M.: Out-domain examples for generative models (2019)","DOI":"10.1109\/EuroSPW.2019.00037"},{"key":"15_CR28","doi-asserted-by":"crossref","unstructured":"Qian, R., Tan, R.T., Yang, W., Su, J., Liu, J.: Attentive generative adversarial network for raindrop removal from a single image. In: Proceedings of CVPR (2018)","DOI":"10.1109\/CVPR.2018.00263"},{"key":"15_CR29","unstructured":"Radford, A., Metz, L., Chintala, S.: Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint \narXiv:1511.06434\n\n (2015)"},{"key":"15_CR30","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. In: Advances in Neural Information Processing Systems, pp. 3104\u20133112 (2014)"},{"key":"15_CR31","unstructured":"Tran, B., Li, J., Madry, A.: Spectral signatures in backdoor attacks. In: Advances in Neural Information Processing Systems, pp. 8011\u20138021 (2018)"},{"key":"15_CR32","unstructured":"Uric\u00e1r, M., Kr\u00edzek, P., Hurych, D., Sobh, I., Yogamani, S., Denny, P.: Yes, we GAN: applying adversarial techniques for autonomous driving. CoRR abs\/1902.03442 (2019). \nhttp:\/\/arxiv.org\/abs\/1902.03442"},{"key":"15_CR33","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Tacotron: towards end-to-end speech synthesis. arXiv preprint \narXiv:1703.10135\n\n (2017)","DOI":"10.21437\/Interspeech.2017-1452"},{"issue":"4","key":"15_CR34","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A., Sheikh, H., Simoncelli, E.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"15_CR35","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"294","DOI":"10.1007\/978-3-030-01234-2_18","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Zhang","year":"2018","unstructured":"Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 294\u2013310. Springer, Cham (2018). \nhttps:\/\/doi.org\/10.1007\/978-3-030-01234-2_18"}],"container-title":["Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering","Security and Privacy in Communication Networks"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-37228-6_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,12,16]],"date-time":"2019-12-16T05:01:30Z","timestamp":1576472490000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-37228-6_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"ISBN":["9783030372279","9783030372286"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-37228-6_15","relation":{},"ISSN":["1867-8211","1867-822X"],"issn-type":[{"value":"1867-8211","type":"print"},{"value":"1867-822X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019]]},"assertion":[{"value":"13 December 2019","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"SecureComm","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Security and Privacy in Communication Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Orlando, VA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2019","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2019","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 October 2019","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"securecomm2019","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/securecomm.org","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"149","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"18","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"26% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3,5","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}