{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,9]],"date-time":"2025-11-09T03:16:27Z","timestamp":1762658187070,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2007,10]]},"DOI":"10.1109\/icsmc.2007.4414135","type":"proceedings-article","created":{"date-parts":[[2008,1,4]],"date-time":"2008-01-04T20:52:49Z","timestamp":1199479969000},"page":"1636-1641","source":"Crossref","is-referenced-by-count":4,"title":["An improved immune Q-learning algorithm"],"prefix":"10.1109","author":[{"given":"Zhengqiao","family":"Ji","sequence":"first","affiliation":[]},{"given":"Q.M. Jonathan","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Maher","family":"Sid-Ahmed","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"year":"0","key":"17"},{"key":"18","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007355226281"},{"year":"0","key":"15"},{"key":"16","doi-asserted-by":"publisher","DOI":"10.1109\/ICSMC.2006.384378"},{"year":"0","key":"13"},{"year":"0","key":"14"},{"key":"11","doi-asserted-by":"publisher","DOI":"10.1063\/1.1699114"},{"journal-title":"Immunobiology The Immune System in Health and Disease","year":"2004","author":"janeway","key":"12"},{"key":"3","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"2","first-page":"123","author":"chen","year":"1993","journal-title":"Linear Networks and Systems (Book Style)"},{"key":"1","first-page":"13","article-title":"A summary on reinforcement learning (in Chinese), Computer","volume":"25","author":"guo","year":"1998","journal-title":"Science"},{"key":"10","first-page":"237","article-title":"Reinforcement learning: A survey","volume":"4","author":"kaelbling","year":"1996","journal-title":"J AI Res"},{"key":"7","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00026-0"},{"key":"6","first-page":"695","article-title":"Combining the methods of temporal differences with neural network for real-time modeling and prediction of time series","volume":"19","author":"yang","year":"1996","journal-title":"Chinese J Comput"},{"key":"5","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"article-title":"Learning from delayed rewards","year":"1989","author":"watkins","key":"4"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","author":"sutton","key":"9"},{"key":"8","article-title":"M. Multi-agent Reinforcement Learning with Adaptive Mimetism","author":"yamaguchi","year":"0","journal-title":"Proc 5th IEEE Int Conf Emerging Technologies and Factory Automation (ETFA 96)"}],"event":{"name":"2007 IEEE International Conference on Systems, Man and Cybernetics","start":{"date-parts":[[2007,10,7]]},"location":"Montreal, QC, Canada","end":{"date-parts":[[2007,10,10]]}},"container-title":["2007 IEEE International Conference on Systems, Man and Cybernetics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/4413560\/4413561\/04414135.pdf?arnumber=4414135","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,3,16]],"date-time":"2017-03-16T14:35:26Z","timestamp":1489674926000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/4414135\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2007,10]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/icsmc.2007.4414135","relation":{},"subject":[],"published":{"date-parts":[[2007,10]]}}}