{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T15:37:43Z","timestamp":1774539463978,"version":"3.50.1"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Commun. Netw."],"published-print":{"date-parts":[[2021,12]]},"DOI":"10.1109\/tccn.2021.3080677","type":"journal-article","created":{"date-parts":[[2021,5,17]],"date-time":"2021-05-17T20:22:26Z","timestamp":1621282946000},"page":"1233-1243","source":"Crossref","is-referenced-by-count":36,"title":["Toward Joint Learning of Optimal MAC Signaling and Wireless Channel Access"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0400-3228","authenticated-orcid":false,"given":"Alvaro","family":"Valcarce","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0438-967X","authenticated-orcid":false,"given":"Jakob","family":"Hoydis","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/DySPAN.2019.8935725"},{"key":"ref11","first-page":"1","article-title":"A dynamic spectrum sharing design in the DARPA spectrum collaboration challenge","author":"wong","year":"2020","journal-title":"Proc Government Microcircuit Appl Critical Technol Conf (GOMACTech)"},{"key":"ref12","first-page":"2145","article-title":"Learning to communicate with deep multi-agent reinforcement learning","author":"foerster","year":"2016","journal-title":"Proc 30th Int Conf Neural Inf Process Syst (NIPS)"},{"key":"ref13","author":"lowe","year":"2019","journal-title":"On the pitfalls of measuring emergent communication"},{"key":"ref14","first-page":"1","article-title":"On the interaction between supervision and self-play in emergent communication","author":"lowe","year":"2020","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933891"},{"key":"ref16","article-title":"5G FAPI: PHY API specification","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1142\/S0219525901000188"},{"key":"ref19","first-page":"1146","article-title":"Stabilising experience replay for deep multi-agent reinforcement learning","author":"foerster","year":"2017","journal-title":"Proc 34th Int Conf Mach Learn (ICML)"},{"key":"ref4","author":"destounis","year":"2019","journal-title":"Learn2MAC Online learning multiple access for URLLC applications"},{"key":"ref27","author":"packer","year":"2018","journal-title":"Assessing generalization in deep reinforcement learning"},{"key":"ref3","author":"yu","year":"2019","journal-title":"Non-uniform time-step deep Q-network for carrier-sense multiple access in heterogeneous wireless networks"},{"key":"ref6","author":"naderializadeh","year":"2019","journal-title":"When multiple agents learn to schedule A distributed radio resource management framework"},{"key":"ref5","author":"yu","year":"2020","journal-title":"Multi-agent deep reinforcement learning multiple access for heterogeneous wireless networks with imperfect channels"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3000893"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/SPAWC48557.2020.9154250"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2018.8422168"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICAIIC48513.2020.9065254"},{"key":"ref1","year":"2020"},{"key":"ref20","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-307-3.50049-6"},{"key":"ref21","article-title":"Learning from delayed rewards","author":"watkins","year":"1989"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-019-09421-1"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1994.6.2.215"},{"key":"ref26","first-page":"3040","article-title":"Social influence as intrinsic motivation for multi-agent deep reinforcement learning","author":"jaques","year":"2019","journal-title":"Proc 36th Int Conf Mach Learn"},{"key":"ref25","first-page":"4399","article-title":"&#x2018;Other-play&#x2019; for zero-shot coordination","author":"hu","year":"2020","journal-title":"Proc 37th Int Conf Mach Learn"}],"container-title":["IEEE Transactions on Cognitive Communications and Networking"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6687307\/9642385\/09432398.pdf?arnumber=9432398","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:54:03Z","timestamp":1652194443000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9432398\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12]]},"references-count":27,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tccn.2021.3080677","relation":{},"ISSN":["2332-7731","2372-2045"],"issn-type":[{"value":"2332-7731","type":"electronic"},{"value":"2372-2045","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,12]]}}}