{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:32:40Z","timestamp":1763191960997,"version":"3.45.0"},"reference-count":57,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228619","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Personalizing Dialog Policy in Health Guidance via Model-based Multitask Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Taiga","family":"Sano","sequence":"first","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]},{"given":"Masahiro","family":"Kohjima","sequence":"additional","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]},{"given":"Masami","family":"Takahashi","sequence":"additional","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]},{"given":"Kaori","family":"Fujimura","sequence":"additional","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]},{"given":"Tae","family":"Sato","sequence":"additional","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]},{"given":"Yurika","family":"Katagiri","sequence":"additional","affiliation":[{"name":"NTT Corporation,Kanagawa,Japan"}]}],"member":"263","reference":[{"year":"2020","key":"ref1","article-title":"Who package of essential noncommunicable (pen) disease interventions for primary health care"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1001\/jamainternmed.2020.0618"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1536\/ihj.49.193"},{"key":"ref4","article-title":"Specific health checkups and specific health guidance"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-49212-9_46"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-022-00560-6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.2196\/22845"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3154862.3154914"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.2196\/15085"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3675094.3677590"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0277295"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/668"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11633-022-1347-y"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445957"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-024-53755-0"},{"article-title":"Reinforcement learning for spoken dialogue systems: Comparing strengths and weaknesses for practical deployment","volume-title":"Dialog-on-Dialog Workshop, Interspeech","author":"Paek","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2016.09.002"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462272"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2000.859189"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.262"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN60899.2024.10650365"},{"key":"ref22","first-page":"4302","article-title":"Deep reinforcement learning from human preferences","author":"Christiano","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref23","first-page":"53728","article-title":"Direct preference optimization: Your language model is secretly a reward model","author":"Rafailov","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"RLHF from heterogeneous feedback via personalization and preference aggregation","volume-title":"ICML 2024 Workshop: Aligning Reinforcement Learning Experimentalists and Theorists","author":"Park","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.630"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.jval.2022.01.012"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/1273496.1273624"},{"key":"ref28","first-page":"9767","article-title":"Multi-task reinforcement learning with context-based representations","volume-title":"International Conference on Machine Learning","author":"Sodhani"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/springerreference_5781"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/122344.122377"},{"key":"ref31","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"International Conference on Machine Learning","author":"Ng"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3662008.3662011"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.3389\/fcomm.2023.1129082"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.ebiom.2023.104512"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.imu.2023.101304"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.mcpdig.2024.05.007"},{"key":"ref37","first-page":"239","article-title":"Combining kernel and model based learning for hiv therapy selection","volume":"2017","author":"Parbhoo","year":"2017","journal-title":"AMIA Summits on Translational Science Proceedings"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06291-2"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1093\/bib\/bbac409"},{"volume-title":"Reinforcement learning: An introduction","year":"2018","author":"Sutton","key":"ref40"},{"key":"ref41","first-page":"5331","article-title":"Efficient off-policy meta-reinforcement learning via probabilistic context variables","volume-title":"International Conference on Machine Learning","author":"Rakelly"},{"key":"ref42","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"International Conference on Machine Learning","volume":"70","author":"Finn"},{"article-title":"A simple neural attentive meta-learner","volume-title":"International Conference on Learning Representations","author":"Mishra","key":"ref43"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN60899.2024.10649914"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"article-title":"Planning in stochastic environments with a learned model","volume-title":"International Conference on Learning Representations","author":"Antonoglou","key":"ref46"},{"key":"ref47","first-page":"3149","article-title":"Lightgbm: A highly efficient gradient boosting decision tree","author":"Ke","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-39593-2_8"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0762"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.4278\/0890-1171-12.1.38"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1037\/0022-3514.59.6.1216"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1257\/002205102320161311"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1257\/000282802762024700"},{"year":"2024","key":"ref55","article-title":"Gpt-4o mini"},{"key":"ref56","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Decoupled weight decay regularization","volume-title":"International Conference on Learning Representations","author":"Loshchilov","key":"ref57"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228619.pdf?arnumber=11228619","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:28:00Z","timestamp":1763191680000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228619\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":57,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228619","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}