{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,8]],"date-time":"2026-03-08T01:37:18Z","timestamp":1772933838798,"version":"3.50.1"},"reference-count":43,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,8]]},"DOI":"10.1109\/bigdata66926.2025.11401043","type":"proceedings-article","created":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T20:57:57Z","timestamp":1772830677000},"page":"1343-1350","source":"Crossref","is-referenced-by-count":0,"title":["CAREER1: Reasoning Models for Career Path Prediction via Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Shuzhang","family":"Cai","sequence":"first","affiliation":[{"name":"Naveen Jindal School of Management, The University of Texas at Dallas,Richardson,Texas,USA"}]},{"given":"Ying","family":"Xie","sequence":"additional","affiliation":[{"name":"Naveen Jindal School of Management, The University of Texas at Dallas,Richardson,Texas,USA"}]},{"given":"Shaojie","family":"Tang","sequence":"additional","affiliation":[{"name":"School of Management, The State University of New York at Buffalo,Buffalo,New York,USA"}]}],"member":"263","reference":[{"key":"ref1","author":"Shao","year":"2024","journal-title":"Deepseekmath: Pushing the limits of mathematical reasoning in open language models"},{"key":"ref2","author":"Guo","year":"2025","journal-title":"Deepseek-r1: Incentivizing reasoning capability in 11 ms via reinforcement learning"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.9969"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3041021.3054200"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330969"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/BigData50022.2020.9377992"},{"key":"ref7","first-page":"5","article-title":"Looking further into the future: Career pathway prediction","author":"Yamashita","year":"2022","journal-title":"WSDM Computational Jobs Marketplace"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3690624.3709329"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/SOLI48380.2019.8955009"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3379984"},{"key":"ref11","author":"Decorte","year":"2023","journal-title":"Career path prediction using resume representation learning and skillbased matching"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449959"},{"key":"ref13","author":"Vafa","year":"2022","journal-title":"Career: A foundation model for labor sequence data"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467388"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.2139\/ssrn.4464002"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3645358"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0839"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i12.33426"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3615077"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2024.3357498"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539342"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_32"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599253"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/079017-2314"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3583780.3614785"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3604915.3608779"},{"key":"ref27","author":"Schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref28","author":"Luo","year":"2025","journal-title":"Ursa: Understanding and verifying chain-of-thought reasoning in multimodal mathematics"},{"key":"ref29","author":"Chen","year":"2025","journal-title":"Bridging supervised learning and reinforcement learning in math reasoning"},{"key":"ref30","author":"Bae","year":"2025","journal-title":"Online difficulty filtering for reasoning oriented reinforcement learning"},{"key":"ref31","author":"Zhu","year":"2024","journal-title":"Deepseek-coder-v2: Breaking the barrier of closed-source models in code intelligence"},{"key":"ref32","author":"Xie","year":"2025","journal-title":"Teaching language models to critique via reinforcement learning"},{"key":"ref33","author":"Huang","year":"2025","journal-title":"Vision-r1: Incentivizing reasoning capability in multimodal large language models"},{"key":"ref34","author":"Feng","year":"2025","journal-title":"Video-r1: Reinforcing video reasoning in mllms"},{"key":"ref35","author":"Wang","year":"2025","journal-title":"Crowdvlm-r1: Expanding r1 ability to vision language model for crowd counting using fuzzy group relative policy reward"},{"key":"ref36","author":"Liu","year":"2025","journal-title":"Segzero: Reasoning-chain guided segmentation via cognitive reinforcement"},{"key":"ref37","author":"Wang","year":"2025","journal-title":"Reasoningtrack: Chain-of-thought reasoning for long-term visionlanguage tracking"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.emnlp-main.22"},{"issue":"2","key":"ref39","first-page":"3","volume":"1","author":"Lu","year":"2025","journal-title":"Ui-r1: Enhancing action prediction of gui agents by reinforcement learning"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.24894\/sprengglossarium_r00438"},{"key":"ref41","author":"Gandhi","year":"2025","journal-title":"Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars"},{"key":"ref42","author":"Zeng","year":"2025","journal-title":"Simplerlzoo: Investigating and taming zero reinforcement learning for open base models in the wild"},{"key":"ref43","author":"Face","year":"2025","journal-title":"Open r1: A fully open reproduction of deepseek-r1"}],"event":{"name":"2025 IEEE International Conference on Big Data (BigData)","location":"Macau, China","start":{"date-parts":[[2025,12,8]]},"end":{"date-parts":[[2025,12,11]]}},"container-title":["2025 IEEE International Conference on Big Data (BigData)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11400704\/11400712\/11401043.pdf?arnumber=11401043","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T06:53:14Z","timestamp":1772866394000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11401043\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,8]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/bigdata66926.2025.11401043","relation":{},"subject":[],"published":{"date-parts":[[2025,12,8]]}}}