{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,9]],"date-time":"2026-05-09T09:43:52Z","timestamp":1778319832231,"version":"3.51.4"},"reference-count":69,"publisher":"Springer Science and Business Media LLC","issue":"7676","license":[{"start":{"date-parts":[[2017,10,1]],"date-time":"2017-10-01T00:00:00Z","timestamp":1506816000000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Nature"],"published-print":{"date-parts":[[2017,10]]},"DOI":"10.1038\/nature24270","type":"journal-article","created":{"date-parts":[[2017,10,17]],"date-time":"2017-10-17T16:13:48Z","timestamp":1508256828000},"page":"354-359","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6630,"title":["Mastering the game of Go without human knowledge"],"prefix":"10.1038","volume":"550","author":[{"given":"David","family":"Silver","sequence":"first","affiliation":[]},{"given":"Julian","family":"Schrittwieser","sequence":"additional","affiliation":[]},{"given":"Karen","family":"Simonyan","sequence":"additional","affiliation":[]},{"given":"Ioannis","family":"Antonoglou","sequence":"additional","affiliation":[]},{"given":"Aja","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Arthur","family":"Guez","sequence":"additional","affiliation":[]},{"given":"Thomas","family":"Hubert","sequence":"additional","affiliation":[]},{"given":"Lucas","family":"Baker","sequence":"additional","affiliation":[]},{"given":"Matthew","family":"Lai","sequence":"additional","affiliation":[]},{"given":"Adrian","family":"Bolton","sequence":"additional","affiliation":[]},{"given":"Yutian","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Timothy","family":"Lillicrap","sequence":"additional","affiliation":[]},{"given":"Fan","family":"Hui","sequence":"additional","affiliation":[]},{"given":"Laurent","family":"Sifre","sequence":"additional","affiliation":[]},{"given":"George","family":"van den Driessche","sequence":"additional","affiliation":[]},{"given":"Thore","family":"Graepel","sequence":"additional","affiliation":[]},{"given":"Demis","family":"Hassabis","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,10,19]]},"reference":[{"key":"BFnature24270_CR1","doi-asserted-by":"crossref","unstructured":"Friedman, J., Hastie, T. & Tibshirani, R. The Elements of Statistical Learning: Data Mining, Inference, and Prediction (Springer, 2009)","DOI":"10.1007\/978-0-387-84858-7"},{"key":"BFnature24270_CR2","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y. & Hinton, G. Deep learning. Nature 521, 436\u2013444 (2015)","journal-title":"Nature"},{"key":"BFnature24270_CR3","unstructured":"Krizhevsky, A., Sutskever, I. & Hinton, G. ImageNet classification with deep convolutional neural networks. In Adv. Neural Inf. Process. Syst. Vol. 25 (eds Pereira, F., Burges, C. J. C., Bottou, L. & Weinberger, K. Q. ) 1097\u20131105 (2012)"},{"key":"BFnature24270_CR4","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S . & Sun, J. Deep residual learning for image recognition. In Proc. 29th IEEE Conf. Comput. Vis. Pattern Recognit. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"BFnature24270_CR5","unstructured":"Hayes-Roth, F., Waterman, D. & Lenat, D. Building Expert Systems (Addison-Wesley, 1984)"},{"key":"BFnature24270_CR6","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih, V. et al. Human-level control through deep reinforcement learning. Nature 518, 529\u2013533 (2015)","journal-title":"Nature"},{"key":"BFnature24270_CR7","unstructured":"Guo, X., Singh, S. P., Lee, H., Lewis, R. L. & Wang, X. Deep learning for real-time Atari game play using offline Monte-Carlo tree search planning. In Adv. Neural Inf. Process. Syst. Vol. 27 (eds Ghahramani, Z., Welling, M., Cortes, C., Lawrence, N. D. & Weinberger, K. Q. ) 3338\u20133346 (2014)"},{"key":"BFnature24270_CR8","unstructured":"Mnih, V . et al. Asynchronous methods for deep reinforcement learning. In Proc. 33rd Int. Conf. Mach. Learn. Vol. 48 (eds Balcan, M. F. & Weinberger, K. Q. ) 1928\u20131937 (2016)"},{"key":"BFnature24270_CR9","unstructured":"Jaderberg, M . et al. Reinforcement learning with unsupervised auxiliary tasks. In 5th Int. Conf. Learn. Representations (2017)"},{"key":"BFnature24270_CR10","unstructured":"Dosovitskiy, A. & Koltun, V. Learning to act by predicting the future. In 5th Int. Conf. Learn. Representations (2017)"},{"key":"BFnature24270_CR11","doi-asserted-by":"crossref","unstructured":"Man\u00b4dziuk, J. in Challenges for Computational Intelligence ( Duch, W. & Man\u00b4dziuk, J. ) 407\u2013442 (Springer, 2007)","DOI":"10.1007\/978-3-540-71984-7_15"},{"key":"BFnature24270_CR12","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1038\/nature16961","volume":"529","author":"D Silver","year":"2016","unstructured":"Silver, D. et al. Mastering the game of Go with deep neural networks and tree search. Nature 529, 484\u2013489 (2016)","journal-title":"Nature"},{"key":"BFnature24270_CR13","doi-asserted-by":"crossref","unstructured":"Coulom, R. Efficient selectivity and backup operators in Monte-Carlo tree search. In 5th Int. Conf. Computers and Games (eds Ciancarini, P. & van den Herik, H. J. ) 72\u201383 (2006)","DOI":"10.1007\/978-3-540-75538-8_7"},{"key":"BFnature24270_CR14","doi-asserted-by":"crossref","unstructured":"Kocsis, L. & Szepesv\u00e1ri, C. Bandit based Monte-Carlo planning. In 15th Eu. Conf. Mach. Learn. 282\u2013293 (2006)","DOI":"10.1007\/11871842_29"},{"key":"BFnature24270_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TCIAIG.2012.2186810","volume":"4","author":"C Browne","year":"2012","unstructured":"Browne, C. et al. A survey of Monte Carlo tree search methods. IEEE Trans. Comput. Intell. AI Games 4, 1\u201349 (2012)","journal-title":"Comput. Intell. AI Games"},{"key":"BFnature24270_CR16","doi-asserted-by":"publisher","first-page":"193","DOI":"10.1007\/BF00344251","volume":"36","author":"K Fukushima","year":"1980","unstructured":"Fukushima, K. Neocognitron: a self organizing neural network model for a mechanism of pattern recognition unaffected by shift in position. Biol. Cybern. 36, 193\u2013202 (1980)","journal-title":"Biol. Cybern."},{"key":"BFnature24270_CR17","unstructured":"LeCun, Y. & Bengio, Y. in The Handbook of Brain Theory and Neural Networks Ch. 3 (ed. Arbib, M. ) 276\u2013278 (MIT Press, 1995)"},{"key":"BFnature24270_CR18","unstructured":"Ioffe, S. & Szegedy, C. Batch normalization: accelerating deep network training by reducing internal covariate shift. In Proc. 32nd Int. Conf. Mach. Learn. Vol. 37 448\u2013456 (2015)"},{"key":"BFnature24270_CR19","doi-asserted-by":"publisher","first-page":"947","DOI":"10.1038\/35016072","volume":"405","author":"RHR Hahnloser","year":"2000","unstructured":"Hahnloser, R. H. R., Sarpeshkar, R., Mahowald, M. A., Douglas, R. J. & Seung, H. S. Digital selection and analogue amplification coexist in a cortex-inspired silicon circuit. Nature 405, 947\u2013951 (2000)","journal-title":"Nature"},{"key":"BFnature24270_CR20","unstructured":"Howard, R. Dynamic Programming and Markov Processes (MIT Press, 1960)"},{"key":"BFnature24270_CR21","doi-asserted-by":"crossref","unstructured":"Sutton, R . & Barto, A. Reinforcement Learning: an Introduction (MIT Press, 1998)","DOI":"10.1109\/TNN.1998.712192"},{"key":"BFnature24270_CR22","doi-asserted-by":"publisher","first-page":"310","DOI":"10.1007\/s11768-011-1005-3","volume":"9","author":"DP Bertsekas","year":"2011","unstructured":"Bertsekas, D. P. Approximate policy iteration: a survey and some new methods. J. Control Theory Appl. 9, 310\u2013335 (2011)","journal-title":"J. Control Theory Appl."},{"key":"BFnature24270_CR23","unstructured":"Scherrer, B. Approximate policy iteration schemes: a comparison. In Proc. 31st Int. Conf. Mach. Learn. Vol. 32 1314\u20131322 (2014)"},{"key":"BFnature24270_CR24","doi-asserted-by":"publisher","first-page":"203","DOI":"10.1007\/s10472-011-9258-6","volume":"61","author":"CD Rosin","year":"2011","unstructured":"Rosin, C. D. Multi-armed bandits with episode context. Ann. Math. Artif. Intell. 61, 203\u2013230 (2011)","journal-title":"Ann. Math. Artif. Intell."},{"key":"BFnature24270_CR25","doi-asserted-by":"crossref","unstructured":"Coulom, R. Whole-history rating: a Bayesian rating system for players of time-varying strength. In Int. Conf. Comput. Games (eds van den Herik, H. J., Xu, X . Ma, Z . & Winands, M. H. M. ) Vol. 5131 113\u2013124 (Springer, 2008)","DOI":"10.1007\/978-3-540-87608-3_11"},{"key":"BFnature24270_CR26","doi-asserted-by":"publisher","first-page":"55","DOI":"10.3233\/KES-2010-0206","volume":"15","author":"GJ Laurent","year":"2011","unstructured":"Laurent, G. J., Matignon, L. & Le Fort-Piat, N. The world of independent learners is not Markovian. Int. J. Knowledge-Based Intelligent Engineering Systems 15, 55\u201364 (2011)","journal-title":"Int. J. Knowledge-Based Intelligent Engineering Systems"},{"key":"BFnature24270_CR27","unstructured":"Foerster, J. N . et al. Stabilising experience replay for deep multi-agent reinforcement learning. In Proc. 34th Int. Conf. Mach. Learn. Vol. 70 1146\u20131155 (2017)"},{"key":"BFnature24270_CR28","unstructured":"Heinrich, J . & Silver, D. Deep reinforcement learning from self-play in imperfect-information games. In NIPS Deep Reinforcement Learning Workshop (2016)"},{"key":"BFnature24270_CR29","unstructured":"Jouppi, N. P . et al. In-datacenter performance analysis of a Tensor Processing Unit. Proc. 44th Annu. Int. Symp. Comp. Architecture Vol. 17 1\u201312 (2017)"},{"key":"BFnature24270_CR30","unstructured":"Maddison, C. J., Huang, A., Sutskever, I . & Silver, D. Move evaluation in Go using deep convolutional neural networks. In 3rd Int. Conf. Learn. Representations. (2015)"},{"key":"BFnature24270_CR31","unstructured":"Clark, C . & Storkey, A. J. Training deep convolutional neural networks to play Go. In Proc. 32nd Int. Conf. Mach. Learn. Vol. 37 1766\u20131774 (2015)"},{"key":"BFnature24270_CR32","unstructured":"Tian, Y. & Zhu, Y. Better computer Go player with neural network and long-term prediction. In 4th Int. Conf. Learn. Representations (2016)"},{"key":"BFnature24270_CR33","doi-asserted-by":"publisher","unstructured":"Cazenave, T. Residual networks for computer Go. IEEE Trans. Comput. Intell. AI Games https:\/\/doi.org\/10.1109\/TCIAIG.2017.2681042 (2017)","DOI":"10.1109\/TCIAIG.2017.2681042"},{"key":"BFnature24270_CR34","unstructured":"Huang, A. AlphaGo master online series of games. https:\/\/deepmind.com\/research\/AlphaGo\/match-archive\/master (2017)"},{"key":"BFnature24270_CR35","first-page":"687","volume":"6","author":"AG Barto","year":"1994","unstructured":"Barto, A. G. & Duff, M. Monte Carlo matrix inversion and reinforcement learning. Adv. Neural Inf. Process. Syst. 6, 687\u2013694 (1994)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"BFnature24270_CR36","first-page":"123","volume":"22","author":"SP Singh","year":"1996","unstructured":"Singh, S. P. & Sutton, R. S. Reinforcement learning with replacing eligibility traces. Mach. Learn. 22, 123\u2013158 (1996)","journal-title":"Mach. Learn."},{"key":"BFnature24270_CR37","unstructured":"Lagoudakis, M. G. & Parr, R. Reinforcement learning as classification: leveraging modern classifiers. In Proc. 20th Int. Conf. Mach. Learn. 424\u2013431 (2003)"},{"key":"BFnature24270_CR38","first-page":"1629","volume":"16","author":"B Scherrer","year":"2015","unstructured":"Scherrer, B., Ghavamzadeh, M., Gabillon, V., Lesner, B. & Geist, M. Approximate modified policy iteration and its application to the game of Tetris. J. Mach. Learn. Res. 16, 1629\u20131676 (2015)","journal-title":"J. Mach. Learn. Res."},{"key":"BFnature24270_CR39","doi-asserted-by":"crossref","unstructured":"Littman, M. L. Markov games as a framework for multi-agent reinforcement learning. In Proc. 11th Int. Conf. Mach. Learn. 157\u2013163 (1994)","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"BFnature24270_CR40","unstructured":"Enzenberger, M. The integration of a priori knowledge into a Go playing neural network. http:\/\/www.cgl.ucsf.edu\/go\/Programs\/neurogo-html\/neurogo.html (1996)"},{"key":"BFnature24270_CR41","doi-asserted-by":"crossref","unstructured":"Enzenberger, M. in Advances in Computer Games (eds Van Den Herik, H. J., Iida, H. & Heinz, E. A. ) 97\u2013108 (2003)","DOI":"10.1007\/978-0-387-35706-5_7"},{"key":"BFnature24270_CR42","first-page":"9","volume":"3","author":"R Sutton","year":"1988","unstructured":"Sutton, R. Learning to predict by the method of temporal differences. Mach. Learn. 3, 9\u201344 (1988)","journal-title":"Mach. Learn."},{"key":"BFnature24270_CR43","first-page":"817","volume":"6","author":"NN Schraudolph","year":"1994","unstructured":"Schraudolph, N. N., Dayan, P. & Sejnowski, T. J. Temporal difference learning of position evaluation in the game of Go. Adv. Neural Inf. Process. Syst. 6, 817\u2013824 (1994)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"BFnature24270_CR44","doi-asserted-by":"publisher","first-page":"183","DOI":"10.1007\/s10994-012-5280-0","volume":"87","author":"D Silver","year":"2012","unstructured":"Silver, D., Sutton, R. & M\u00fcller, M. Temporal-difference search in computer Go. Mach. Learn. 87, 183\u2013219 (2012)","journal-title":"Mach. Learn."},{"key":"BFnature24270_CR45","unstructured":"Silver, D. Reinforcement Learning and Simulation-Based Search in Computer Go. PhD thesis, Univ. Alberta, Edmonton, Canada (2009)"},{"key":"BFnature24270_CR46","doi-asserted-by":"publisher","first-page":"1856","DOI":"10.1016\/j.artint.2011.03.007","volume":"175","author":"S Gelly","year":"2011","unstructured":"Gelly, S. & Silver, D. Monte-Carlo tree search and rapid action value estimation in computer Go. Artif. Intell. 175, 1856\u20131875 (2011)","journal-title":"Artif. Intell."},{"key":"BFnature24270_CR47","first-page":"198","volume":"30","author":"R Coulom","year":"2007","unstructured":"Coulom, R. Computing Elo ratings of move patterns in the game of Go. Int. Comput. Games Assoc. J. 30, 198\u2013208 (2007)","journal-title":"Int. Comput. Games Assoc. J."},{"key":"BFnature24270_CR48","unstructured":"Gelly, S., Wang, Y., Munos, R. & Teytaud, O. Modification of UCT with patterns in Monte-Carlo Go. Report No. 6062 (INRIA, 2006)"},{"key":"BFnature24270_CR49","doi-asserted-by":"publisher","first-page":"243","DOI":"10.1023\/A:1007634325138","volume":"40","author":"J Baxter","year":"2000","unstructured":"Baxter, J., Tridgell, A. & Weaver, L. Learning to play chess using temporal differences. Mach. Learn. 40, 243\u2013263 (2000)","journal-title":"Mach. Learn."},{"key":"BFnature24270_CR50","unstructured":"Veness, J., Silver, D., Blair, A. & Uther, W. Bootstrapping from game tree search. In Adv. Neural Inf. Process. Syst. 1937\u20131945 (2009)"},{"key":"BFnature24270_CR51","unstructured":"Lai, M. Giraffe: Using Deep Reinforcement Learning to Play Chess. MSc thesis, Imperial College London (2015)"},{"key":"BFnature24270_CR52","unstructured":"Schaeffer, J., Hlynka, M . & Jussila, V. Temporal difference learning applied to a high-performance game-playing program. In Proc. 17th Int. Jt Conf. Artif. Intell. Vol. 1 529\u2013534 (2001)"},{"key":"BFnature24270_CR53","doi-asserted-by":"publisher","first-page":"215","DOI":"10.1162\/neco.1994.6.2.215","volume":"6","author":"G Tesauro","year":"1994","unstructured":"Tesauro, G. TD-gammon, a self-teaching backgammon program, achieves master-level play. Neural Comput. 6, 215\u2013219 (1994)","journal-title":"Neural Comput."},{"key":"BFnature24270_CR54","doi-asserted-by":"crossref","unstructured":"Buro, M. From simple features to sophisticated evaluation functions. In Proc. 1st Int. Conf. Comput. Games 126\u2013145 (1999)","DOI":"10.1007\/3-540-48957-6_8"},{"key":"BFnature24270_CR55","doi-asserted-by":"publisher","first-page":"241","DOI":"10.1016\/S0004-3702(01)00166-7","volume":"134","author":"B Sheppard","year":"2002","unstructured":"Sheppard, B. World-championship-caliber Scrabble. Artif. Intell. 134, 241\u2013275 (2002)","journal-title":"Artif. Intell."},{"key":"BFnature24270_CR56","doi-asserted-by":"publisher","first-page":"508","DOI":"10.1126\/science.aam6960","volume":"356","author":"M Moravc\u02c7\u00edk","year":"2017","unstructured":"Moravc\u02c7\u00edk, M. et al. DeepStack: expert-level artificial intelligence in heads-up no-limit poker. Science 356, 508\u2013513 (2017)","journal-title":"Science"},{"key":"BFnature24270_CR57","unstructured":"Tesauro, G & Galperin, G. On-line policy improvement using Monte-Carlo search. In Adv. Neural Inf. Process. Syst. 1068\u20131074 (1996)"},{"key":"BFnature24270_CR58","doi-asserted-by":"crossref","unstructured":"Tesauro, G. Neurogammon: a neural-network backgammon program. In Proc. Int. Jt Conf. Neural Netw. Vol. 3, 33\u201339 (1990)","DOI":"10.1109\/IJCNN.1990.137821"},{"key":"BFnature24270_CR59","doi-asserted-by":"publisher","first-page":"601","DOI":"10.1147\/rd.116.0601","volume":"11","author":"AL Samuel","year":"1967","unstructured":"Samuel, A. L. Some studies in machine learning using the game of checkers II - recent progress. IBM J. Res. Develop. 11, 601\u2013617 (1967)","journal-title":"IBM J. Res. Develop."},{"key":"BFnature24270_CR60","doi-asserted-by":"publisher","first-page":"1238","DOI":"10.1177\/0278364913495721","volume":"32","author":"J Kober","year":"2013","unstructured":"Kober, J., Bagnell, J. A. & Peters, J. Reinforcement learning in robotics: a survey. Int. J. Robot. Res. 32, 1238\u20131274 (2013)","journal-title":"Int. J. Robot. Res."},{"key":"BFnature24270_CR61","unstructured":"Zhang, W. & Dietterich, T. G. A reinforcement learning approach to job-shop scheduling. In Proc. 14th Int. Jt Conf. Artif. Intell. 1114\u20131120 (1995)"},{"key":"BFnature24270_CR62","doi-asserted-by":"crossref","unstructured":"Cazenave, T., Balbo, F. & Pinson, S. Using a Monte-Carlo approach for bus regulation. In Int. IEEE Conf. Intell. Transport. Syst. 1\u20136 (2009)","DOI":"10.1109\/ITSC.2009.5309838"},{"key":"BFnature24270_CR63","unstructured":"Evans, R. & Gao, J. Deepmind AI reduces Google data centre cooling bill by 40%. https:\/\/deepmind.com\/blog\/deepmind-ai-reduces-google-data-centre-cooling-bill-40\/ (2016)"},{"key":"BFnature24270_CR64","doi-asserted-by":"crossref","unstructured":"Abe, N . et al. Empirical comparison of various reinforcement learning strategies for sequential targeted marketing. In IEEE Int. Conf. Data Mining 3\u201310 (2002)","DOI":"10.1109\/ICDM.2002.1183879"},{"key":"BFnature24270_CR65","unstructured":"Silver, D., Newnham, L., Barker, D., Weller, S. & McFall, J. Concurrent reinforcement learning from customer interactions. In Proc. 30th Int. Conf. Mach. Learn. Vol. 28 924\u2013932 (2013)"},{"key":"BFnature24270_CR66","unstructured":"Tromp, J. Tromp\u2013Taylor rules. http:\/\/tromp.github.io\/go.html (1995)"},{"key":"BFnature24270_CR67","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1016\/S0004-3702(01)00121-7","volume":"134","author":"M M\u00fcller","year":"2002","unstructured":"M\u00fcller, M. Computer Go. Artif. Intell. 134, 145\u2013179 (2002)","journal-title":"Artif. Intell."},{"key":"BFnature24270_CR68","doi-asserted-by":"publisher","first-page":"148","DOI":"10.1109\/JPROC.2015.2494218","volume":"104","author":"B Shahriari","year":"2016","unstructured":"Shahriari, B., Swersky, K., Wang, Z., Adams, R. P. & de Freitas, N. Taking the human out of the loop: a review of Bayesian optimization. Proc. IEEE 104, 148\u2013175 (2016)","journal-title":"Proc. IEEE"},{"key":"BFnature24270_CR69","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1007\/978-3-642-17928-0_4","volume":"6515","author":"RB Segal","year":"2011","unstructured":"Segal, R. B. On the scalability of parallel UCT. Comput. Games 6515, 36\u201347 (2011)","journal-title":"Comput. Games"}],"container-title":["Nature"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/www.nature.com\/articles\/nature24270.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/www.nature.com\/articles\/nature24270","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/www.nature.com\/doifinder\/10.1038\/nature24270","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"},{"URL":"http:\/\/www.nature.com\/articles\/nature24270.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,26]],"date-time":"2025-06-26T15:00:28Z","timestamp":1750950028000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.nature.com\/articles\/nature24270"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,10]]},"references-count":69,"journal-issue":{"issue":"7676","published-print":{"date-parts":[[2017,10]]}},"alternative-id":["BFnature24270"],"URL":"https:\/\/doi.org\/10.1038\/nature24270","relation":{},"ISSN":["0028-0836","1476-4687"],"issn-type":[{"value":"0028-0836","type":"print"},{"value":"1476-4687","type":"electronic"}],"subject":[],"published":{"date-parts":[[2017,10]]},"assertion":[{"value":"7 April 2017","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 September 2017","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 October 2017","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"The authors declare no competing financial interests.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}