{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,5]],"date-time":"2025-10-05T19:53:04Z","timestamp":1759693984528,"version":"3.37.3"},"reference-count":12,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Des. Test"],"published-print":{"date-parts":[[2023,2]]},"DOI":"10.1109\/mdat.2022.3145344","type":"journal-article","created":{"date-parts":[[2022,1,20]],"date-time":"2022-01-20T20:23:25Z","timestamp":1642710205000},"page":"43-51","source":"Crossref","is-referenced-by-count":4,"title":["Deep Reinforcement Learning for Optimization at Early Design Stages"],"prefix":"10.1109","volume":"40","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4322-834X","authenticated-orcid":false,"given":"Lorenzo","family":"Servadei","sequence":"first","affiliation":[{"name":"Infineon Technologies AG, Neubiberg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7018-1941","authenticated-orcid":false,"given":"Jin Hwa","family":"Lee","sequence":"additional","affiliation":[{"name":"Technical University of Munich, Munich, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5033-4725","authenticated-orcid":false,"given":"Jose A.","family":"Arjona Medina","sequence":"additional","affiliation":[{"name":"Johannes Kepler University Linz, Linz, Austria"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7447-3511","authenticated-orcid":false,"given":"Michael","family":"Werner","sequence":"additional","affiliation":[{"name":"Infineon Technologies AG, Munich, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7449-2528","authenticated-orcid":false,"given":"Sepp","family":"Hochreiter","sequence":"additional","affiliation":[{"name":"Johannes Kepler University Linz, Linz, Austria"}]},{"given":"Wolfgang","family":"Ecker","sequence":"additional","affiliation":[{"name":"Infineon Technologies AG, Munich, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4993-7860","authenticated-orcid":false,"given":"Robert","family":"Wille","sequence":"additional","affiliation":[{"name":"Johannes Kepler University Linz, Linz, Austria"}]}],"member":"263","reference":[{"first-page":"13566","article-title":"RUDDER: Return decomposition for delayed rewards","author":"Arjona-Medina","key":"ref1"},{"key":"ref2","article-title":"Neural combinatorial optimization with reinforcement learning","author":"Bello","year":"2016","journal-title":"arXiv:1611.09940"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1111\/j.1475-3995.1996.tb00032.x"},{"volume-title":"Deep Learning. Cambridge","year":"2016","author":"Goodfellow","key":"ref4"},{"key":"ref5","article-title":"Solving a new 3D bin packing problem with deep reinforcement learning method","volume":"abs\/1708.05930","author":"Hu","year":"2017","journal-title":"CoRR"},{"key":"ref6","article-title":"Ranked reward: Enabling self-play reinforcement learning for combinatorial optimization","author":"Laterre","year":"2018","journal-title":"arXiv:1807.01672"},{"key":"ref7","article-title":"Chip placement with deep reinforcement learning","author":"Mirhoseini","year":"2020","journal-title":"arXiv:2004.10746"},{"key":"ref8","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.23919\/DATE.2019.8714961"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TC.2020.2968888"},{"volume-title":"Reinforcement Learning: An Introduction. (A Bradford Book)","year":"2018","author":"Sutton","key":"ref11"},{"article-title":"Pointer networks","volume-title":"Proc. NIPS","author":"Vinyals","key":"ref12"}],"container-title":["IEEE Design &amp; Test"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221038\/10024350\/09687589.pdf?arnumber=9687589","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T22:10:41Z","timestamp":1705183841000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9687589\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,2]]},"references-count":12,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/mdat.2022.3145344","relation":{},"ISSN":["2168-2356","2168-2364"],"issn-type":[{"type":"print","value":"2168-2356"},{"type":"electronic","value":"2168-2364"}],"subject":[],"published":{"date-parts":[[2023,2]]}}}