{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,15]],"date-time":"2026-03-15T05:16:33Z","timestamp":1773551793663,"version":"3.50.1"},"reference-count":32,"publisher":"Springer Science and Business Media LLC","issue":"26","license":[{"start":{"date-parts":[[2024,5,22]],"date-time":"2024-05-22T00:00:00Z","timestamp":1716336000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,5,22]],"date-time":"2024-05-22T00:00:00Z","timestamp":1716336000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61836011"],"award-info":[{"award-number":["61836011"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Comput &amp; Applic"],"published-print":{"date-parts":[[2024,9]]},"DOI":"10.1007\/s00521-024-09860-2","type":"journal-article","created":{"date-parts":[[2024,5,22]],"date-time":"2024-05-22T08:02:09Z","timestamp":1716364929000},"page":"16163-16177","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Alternate inference-decision reinforcement learning with generative adversarial inferring for bridge bidding"],"prefix":"10.1007","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3131-7740","authenticated-orcid":false,"given":"Jiao","family":"Wang","sequence":"first","affiliation":[]},{"given":"Shijia","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,5,22]]},"reference":[{"issue":"3","key":"9860_CR1","doi-asserted-by":"publisher","first-page":"287","DOI":"10.1007\/s10994-006-6225-2","volume":"63","author":"A Amit","year":"2006","unstructured":"Amit A, Markovitch S (2006) Learning to bid in bridge. Mach Learn 63(3):287\u2013327","journal-title":"Mach Learn"},{"key":"9860_CR2","unstructured":"Bard N, Johanson M, Burch N, et\u00a0al (2013) Online implicit agent modelling. In: Proceedings of the 2013 international conference on Autonomous agents and multi-agent systems, pp 255\u2013262"},{"issue":"5","key":"9860_CR3","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TSMC.1983.6313077","volume":"13","author":"AG Barto","year":"1983","unstructured":"Barto AG, Sutton RS, Anderson CW (1983) Neuronlike adaptive elements that can solve difficult learning control problems. IEEE Trans Syst Man Cybern 13(5):834\u2013846","journal-title":"IEEE Trans Syst Man Cybern"},{"key":"9860_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2022.108404","volume":"242","author":"H Chen","year":"2022","unstructured":"Chen H, Liu Q, Fu K et al (2022) Accurate policy detection and efficient knowledge reuse against multi-strategic opponents. Knowl-Based Syst 242:108404","journal-title":"Knowl-Based Syst"},{"key":"9860_CR5","doi-asserted-by":"crossref","unstructured":"DeLooze LL, Downey J (2007) Bridge bidding with imperfect information. In: 2007 IEEE symposium on computational intelligence and games, pp 368\u2013373","DOI":"10.1109\/CIG.2007.368122"},{"issue":"1","key":"9860_CR6","first-page":"836","volume":"5","author":"M Dharmalingam","year":"2014","unstructured":"Dharmalingam M, Amalraj R (2014) A solution to the double dummy bridge problem in contract bridge influenced by supervised learning module adapted by artificial neural network. Soft Comput Models Ind Environ Appl 5(1):836\u2013843","journal-title":"Soft Comput Models Ind Environ Appl"},{"issue":"28","key":"9860_CR7","doi-asserted-by":"publisher","first-page":"20445","DOI":"10.1007\/s00521-023-08843-z","volume":"35","author":"J Dong","year":"2023","unstructured":"Dong J, Shi J, Gao Y et al (2023) Game: Gaussian mixture error-based meta-learning architecture. Neural Comput Appl 35(28):20445\u201320461","journal-title":"Neural Comput Appl"},{"key":"9860_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.117288","volume":"203","author":"M Elhefnawy","year":"2022","unstructured":"Elhefnawy M, Ouali MS, Ragab A (2022) Multi-output regression using polygon generation and conditional generative adversarial networks. Expert Syst Appl 203:117288","journal-title":"Expert Syst Appl"},{"key":"9860_CR9","unstructured":"Gong Q, Jiang Y, Tian Y (2020) Simple is better: Training an end-to-end contract bridge bidding agent without human knowledge. https:\/\/openreview.net\/forum?id=SklViCEFPH"},{"key":"9860_CR10","first-page":"2672","volume":"3","author":"IJ Goodfellow","year":"2014","unstructured":"Goodfellow IJ, Pouget-Abadie J, Mirza M et al (2014) Generative adversarial networks. Adv Neural Inf Process Syst 3:2672\u20132680","journal-title":"Adv Neural Inf Process Syst"},{"key":"9860_CR11","unstructured":"He H, Boyd-Graber J, Kwok K, et\u00a0al (2016) Opponent modeling in deep reinforcement learning. In: Proceedings of the 33rd international conference on international conference on machine learning, p 1804\u20131813"},{"key":"9860_CR12","unstructured":"Ho CY, Lin HT (2015) Contract bridge bidding by learning. In: AAAI Workshop: Computer Poker and Imperfect Information, https:\/\/aaai.org\/papers\/aaaiw-ws0105-15-10162\/"},{"key":"9860_CR13","doi-asserted-by":"crossref","unstructured":"Jiang Q, Li K, Du B, et\u00a0al (2019) Deltadou: Expert-level doudizhu ai through self-play. In: Proceedings of the twenty-eighth international joint conference on artificial intelligence, pp 1265\u20131271","DOI":"10.24963\/ijcai.2019\/176"},{"key":"9860_CR14","unstructured":"Li J, Koyamada S, Ye Q, et\u00a0al (2020) Suphx: Mastering mahjong with deep reinforcement learning. arXiv:2003.13590"},{"key":"9860_CR15","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1016\/j.ins.2022.08.101","volume":"615","author":"Y Ma","year":"2022","unstructured":"Ma Y, Shen M, Zhang N et al (2022) Om-tcn: a dynamic and agile opponent modeling approach for competitive games. Inf Sci 615:405\u2013414","journal-title":"Inf Sci"},{"issue":"1","key":"9860_CR16","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1109\/TCIAIG.2015.2491611","volume":"9","author":"R Mealing","year":"2017","unstructured":"Mealing R, Shapiro JL (2017) Opponent modeling by expectation-maximization and sequence prediction in simplified poker. IEEE Trans Comput Intell AI Games 9(1):11\u201324","journal-title":"IEEE Trans Comput Intell AI Games"},{"issue":"2","key":"9860_CR17","doi-asserted-by":"publisher","first-page":"278","DOI":"10.1109\/TNN.2008.2005526","volume":"20","author":"K Mossakowski","year":"2009","unstructured":"Mossakowski K, Mandziuk J (2009) Learning without human expertise: a case study of the double dummy bridge problem. IEEE Trans Neural Netw 20(2):278\u2013299","journal-title":"IEEE Trans Neural Netw"},{"key":"9860_CR18","unstructured":"Ng AY, Harada D, Russell SJ (1999) Policy invariance under reward transformations: Theory and application to reward shaping. In: Proceedings of the sixteenth international conference on machine learning, pp 278\u2013287"},{"key":"9860_CR19","unstructured":"Raileanu R, Denton E, Szlam A, et\u00a0al (2018) Modeling others using oneself in multi-agent reinforcement learning. In: International conference on machine learning, pp 4257\u20134266"},{"key":"9860_CR20","doi-asserted-by":"crossref","unstructured":"Rebstock D, Solinas C, Buro M, et\u00a0al (2019) Policy based inference in trick-taking card games. In: 2019 IEEE conference on games, pp 1\u20138","DOI":"10.1109\/CIG.2019.8848029"},{"key":"9860_CR21","unstructured":"Rong J, Qin T, An B (2019) Competitive bridge bidding with deep neural networks. In: Proceedings of the 18th international conference on autonomous agents and multiagent systems, pp 16\u201324"},{"key":"9860_CR22","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1038\/nature16961","volume":"529","author":"D Silver","year":"2016","unstructured":"Silver D, Huang A, Maddison CJ et al (2016) Mastering the game of go with deep neural networks and tree search. Nature 529:484\u2013489","journal-title":"Nature"},{"issue":"2","key":"9860_CR23","doi-asserted-by":"publisher","first-page":"287","DOI":"10.1007\/s00521-015-2056-z","volume":"28","author":"D Strnad","year":"2017","unstructured":"Strnad D, Nerat A, Kohek \u0160 (2017) Neural network models for group behavior prediction: a case of soccer match attendance. Neural Comput Appl 28(2):287\u2013300","journal-title":"Neural Comput Appl"},{"key":"9860_CR24","unstructured":"Synnaeve G, Lin Z, Gehring J, et\u00a0al (2018) Forward modeling for partial observation strategy games-a starcraft defogger. In: Advances in neural information processing systems, pp 10738\u201310748"},{"key":"9860_CR25","unstructured":"Tian Y, Gong Q, Jiang Y (2020a) Joint policy search for multi-agent collaboration with imperfect information. In: Advances in neural information processing systems, pp 19931\u201319942"},{"key":"9860_CR26","doi-asserted-by":"crossref","unstructured":"Tian Z, Wen Y, Gong Z, et\u00a0al (2019) A regularized opponent model with maximum entropy objective. In: Proceedings of the twenty-eighth international joint conference on artificial intelligence, pp 602\u2013608","DOI":"10.24963\/ijcai.2019\/85"},{"issue":"5","key":"9860_CR27","first-page":"7261","volume":"34","author":"Z Tian","year":"2020","unstructured":"Tian Z, Zou S, Davies I et al (2020) Learning to communicate implicitly by actions. Proc AAAI Conf Art Intell 34(5):7261\u20137268","journal-title":"Proc AAAI Conf Art Intell"},{"key":"9860_CR28","doi-asserted-by":"crossref","unstructured":"Yan X, Xia L, Yang J, et\u00a0al (2020) Opponent modeling in poker games. In: IEEE data driven control and learning systems conference, pp 1090\u20131097","DOI":"10.1109\/DDCLS49620.2020.9275228"},{"issue":"4","key":"9860_CR29","doi-asserted-by":"publisher","first-page":"365","DOI":"10.1109\/TG.2018.2866036","volume":"10","author":"CK Yeh","year":"2018","unstructured":"Yeh CK, Hsieh CY, Lin HT (2018) Automatic bridge bidding using deep reinforcement learning. IEEE Trans Games 10(4):365\u2013377","journal-title":"IEEE Trans Games"},{"key":"9860_CR30","unstructured":"Zha D, Xie J, Ma W, et\u00a0al (2021) Douzero: Mastering doudizhu with self-play deep reinforcement learning. In: Proceedings of the 38th international conference on machine learning, pp 12333\u201312344"},{"key":"9860_CR31","doi-asserted-by":"crossref","unstructured":"Zhang X, Liu W, Yang F (2020) A neural model for automatic bidding of contract bridge. IEEE 22nd international conference on high performance computing and communications pp 999\u20131005","DOI":"10.1109\/HPCC-SmartCity-DSS50907.2020.00134"},{"key":"9860_CR32","doi-asserted-by":"crossref","unstructured":"Zhao Y, Zhao J, Hu X, et\u00a0al (2022) Douzero+: improving doudizhu AI by opponent modeling and coach-guided learning. In: 2022 IEEE conference on games (CoG), pp 127\u2013134","DOI":"10.1109\/CoG51982.2022.9893710"}],"container-title":["Neural Computing and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-024-09860-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00521-024-09860-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-024-09860-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,23]],"date-time":"2024-08-23T18:10:33Z","timestamp":1724436633000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00521-024-09860-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,22]]},"references-count":32,"journal-issue":{"issue":"26","published-print":{"date-parts":[[2024,9]]}},"alternative-id":["9860"],"URL":"https:\/\/doi.org\/10.1007\/s00521-024-09860-2","relation":{},"ISSN":["0941-0643","1433-3058"],"issn-type":[{"value":"0941-0643","type":"print"},{"value":"1433-3058","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,5,22]]},"assertion":[{"value":"7 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 April 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 May 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}