{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T18:27:09Z","timestamp":1772303229937,"version":"3.50.1"},"reference-count":48,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,10]]},"DOI":"10.1109\/icnp.2019.8888034","type":"proceedings-article","created":{"date-parts":[[2019,10,31]],"date-time":"2019-10-31T23:49:43Z","timestamp":1572565783000},"page":"1-11","source":"Crossref","is-referenced-by-count":20,"title":["MACS: Deep Reinforcement Learning based SDN Controller Synchronization Policy Design"],"prefix":"10.1109","author":[{"given":"Ziyao","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Liang","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Konstantinos","family":"Poularakis","sequence":"additional","affiliation":[]},{"given":"Kin K.","family":"Leung","sequence":"additional","affiliation":[]},{"given":"Jeremy","family":"Tucker","sequence":"additional","affiliation":[]},{"given":"Ananthram","family":"Swami","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2018.2871309"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/2342441.2342446"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/2620728.2620744"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2016.7511034"},{"key":"ref31","article-title":"Rectified linear units improve restricted boltzmann machines","author":"nair","year":"0","journal-title":"2010 The 27th ICML"},{"key":"ref30","first-page":"265","article-title":"Tensorflow: a system for largescale machine learning","volume":"16","author":"abadi","year":"2016","journal-title":"OSDI"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/2043164.2018466"},{"key":"ref36","year":"0","journal-title":"OpenDayLight Controller"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/INFCOM.2010.5461964"},{"key":"ref34","year":"2017","journal-title":"Standford University"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICNP.2016.7785327"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/2342441.2342443"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2012.113012.120310"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.17487\/rfc4786"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.adhoc.2006.11.004"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/90.731185"},{"key":"ref15","author":"white","year":"2001","journal-title":"Markov Decision Processes"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.1995.478953"},{"key":"ref17","article-title":"Neural network design","volume":"20","author":"hagan","year":"1996","journal-title":"Pws Pub Boston"},{"key":"ref18","article-title":"Actor-critic algorithms","author":"konda","year":"0","journal-title":"NIPS 2000"},{"key":"ref19","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"0","journal-title":"NIPS 2000"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.camwa.2011.09.028"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2018.8485963"},{"key":"ref27","article-title":"Reinforcement learning for robots using neural networks","author":"lin","year":"1993","journal-title":"Carnegie-mellon Univ Pittsburgh Pa School of Computer Science"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2017.2723477"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/2535771.2535791"},{"key":"ref29","article-title":"Keras: The python deep learning library","author":"chollet","year":"2018","journal-title":"Astrophysics Source Code Library"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2491185.2491186"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/2377677.2377748"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/IFIPNetworking.2016.7497232"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2017.2782482"},{"key":"ref9","article-title":"Survey of consistent software-defined network updates","author":"foerster","year":"2018","journal-title":"IEEE Communications Surveys & Tutorials"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2014.2371999"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS.2018.00159"},{"key":"ref20","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv preprint arXiv 1509 02971"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/SCC.2016.12"},{"key":"ref22","article-title":"Rainbow: Combining improvements in deep reinforcement learning","author":"hessel","year":"0","journal-title":"AAAI 2018"},{"key":"ref47","volume":"abs 1812","author":"zhang","year":"2018","journal-title":"CoRR"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-learning","volume":"8","author":"watkins","year":"1992","journal-title":"Machine Learning"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/NETWKS.2016.7751168"},{"key":"ref24","article-title":"Action branching architectures for deep reinforcement learning","author":"tavakoli","year":"0","journal-title":"AAAI 2018"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/j.comnet.2013.12.004"},{"key":"ref23","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2015","journal-title":"arXiv preprint arXiv 1511 05271"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2017.7997164"},{"key":"ref26","article-title":"Double Q-learning","author":"hasselt","year":"2010","journal-title":"NIPS"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2019.8737388"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"}],"event":{"name":"2019 IEEE 27th International Conference on Network Protocols (ICNP)","location":"Chicago, IL, USA","start":{"date-parts":[[2019,10,8]]},"end":{"date-parts":[[2019,10,10]]}},"container-title":["2019 IEEE 27th International Conference on Network Protocols (ICNP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8884214\/8888028\/08888034.pdf?arnumber=8888034","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,19]],"date-time":"2022-07-19T20:21:02Z","timestamp":1658262062000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8888034\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,10]]},"references-count":48,"URL":"https:\/\/doi.org\/10.1109\/icnp.2019.8888034","relation":{},"subject":[],"published":{"date-parts":[[2019,10]]}}}