{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T16:06:05Z","timestamp":1776182765839,"version":"3.50.1"},"reference-count":68,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62402158"],"award-info":[{"award-number":["62402158"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["72188101"],"award-info":[{"award-number":["72188101"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22A2094"],"award-info":[{"award-number":["U22A2094"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Major Project of Anhui Province","award":["202203a05020011"],"award-info":[{"award-number":["202203a05020011"]}]},{"name":"Major Project of Anhui Province","award":["202423k09020001"],"award-info":[{"award-number":["202423k09020001"]}]},{"name":"Major Project of Anhui Province","award":["2408085J040"],"award-info":[{"award-number":["2408085J040"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["JZ2024HGTG0309"],"award-info":[{"award-number":["JZ2024HGTG0309"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["JZ2024AHST0337"],"award-info":[{"award-number":["JZ2024AHST0337"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["JZ2023YQTD0072"],"award-info":[{"award-number":["JZ2023YQTD0072"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Comput. Soc. Syst."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tcss.2024.3497725","type":"journal-article","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T13:41:22Z","timestamp":1733146882000},"page":"539-551","source":"Crossref","is-referenced-by-count":21,"title":["PsycoLLM: Enhancing LLM for Psychological Understanding and Evaluation"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4090-7494","authenticated-orcid":false,"given":"Jinpeng","family":"Hu","sequence":"first","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3070-0142","authenticated-orcid":false,"given":"Tengteng","family":"Dong","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9873-6233","authenticated-orcid":false,"given":"Gang","family":"Luo","sequence":"additional","affiliation":[{"name":"Key Laboratory of Brain Health Intelligent Evaluation and Intervention, Ministry of Education, Beijing Institute of Technology, Beijing, P. R. China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7146-8036","authenticated-orcid":false,"given":"Hui","family":"Ma","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-2452-3023","authenticated-orcid":false,"given":"Peng","family":"Zou","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Hefei Comprehensive National Science Center, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9750-7032","authenticated-orcid":false,"given":"Xiao","family":"Sun","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2594-254X","authenticated-orcid":false,"given":"Dan","family":"Guo","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0201-1638","authenticated-orcid":false,"given":"Xun","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China (USTC), Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3094-7735","authenticated-orcid":false,"given":"Meng","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology (HFUT), Hefei, China"}]}],"member":"263","reference":[{"issue":"240","key":"ref1","first-page":"1","article-title":"PALM: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref2","article-title":"Baichuan 2: Open large-scale language models","author":"Yang","year":"2023"},{"key":"ref3","article-title":"LLaMa: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.714"},{"key":"ref5","article-title":"Zero-shot information extraction via chatting with ChatGPT","author":"Wei","year":"2023"},{"key":"ref6","article-title":"DoctorGLM: Fine-tuning your Chinese doctor is not a herculean task","author":"Xiong","year":"2023"},{"key":"ref7","article-title":"Chatlaw: Open-source legal large language model with integrated external knowledge bases","author":"Cui","year":"2023"},{"key":"ref8","article-title":"BianQue: Balancing the questioning and suggestion ability of health LLMS with multi-turn health conversations polished by ChatGPT","author":"Chen","year":"2023"},{"key":"ref9","article-title":"Psy-LLM: Scaling up global mental health psychological services with AI-based large language models","author":"Lai","year":"2023"},{"key":"ref10","first-page":"1170","article-title":"SoulChat: Improving llms\u2019 empathy, listening, and comfort abilities through fine-tuning with multi-turn empathy conversations","volume-title":"Proc. Findings Assoc. Comput. Linguistics (EMNLP)","author":"Chen","year":"2023"},{"key":"ref11","article-title":"Smile: Single-turn to multi-turn inclusive language expansion via ChatGPT for mental health support","author":"Qiu","year":"2023"},{"key":"ref12","article-title":"Emollm","year":"2024"},{"key":"ref13","article-title":"PsyBench: a balanced and in-depth psychological Chinese evaluation benchmark for foundation models","author":"Zhang","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref15","article-title":"Improving language understanding by generative pre-training","volume-title":"OpenAI Blog","author":"Radford","year":"2018"},{"issue":"8","key":"ref16","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.4324\/9781003022022-6"},{"key":"ref18","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown","year":"2020"},{"key":"ref19","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref20","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref21","article-title":"Alpaca: A strong, replicable instruction-following model","author":"Taori","year":"2023"},{"key":"ref22","article-title":"Mistral 7b","author":"Jiang","year":"2023"},{"key":"ref23","article-title":"ChatGLM: A family of large language models from GLM-130b to GLM-4 all tools","author":"GLM","year":"2024"},{"key":"ref24","first-page":"320","article-title":"GLM: General language model pretraining with autoregressive blank infilling","volume-title":"Proc. Annu. Meeting Assoc. Comput. Linguistics","author":"Du","year":"2022"},{"key":"ref25","article-title":"GLM-130b: An open bilingual pre-trained model","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zeng","year":"2023"},{"key":"ref26","article-title":"Yi: Open foundation models by 01. AI","author":"Young","year":"2024"},{"key":"ref27","article-title":"QWEN technical report","author":"Bai","year":"2023"},{"key":"ref28","article-title":"C-Eval: A multi-level multi-discipline Chinese evaluation suite for foundation models","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track","author":"Huang","year":"2024"},{"key":"ref29","first-page":"2299","article-title":"AGIEVal: A human-centric benchmark for evaluating foundation models","volume-title":"Proc. Findings Assoc. Comput. Linguistics (NAACL)","author":"Zhong","year":"2024"},{"key":"ref30","article-title":"Measuring massive multitask language understanding","volume-title":"Proc. Int. Conf. Learn. Representations (ICLR)","author":"Hendrycks","year":"2021"},{"key":"ref31","first-page":"6184","article-title":"CMB: A comprehensive medical benchmark in Chinese","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics: Human Lang. Technol.","author":"Wang","year":"2024"},{"key":"ref32","article-title":"LAiW: A Chinese legal large language models benchmark (a technical report)","author":"Dai","year":"2023"},{"key":"ref33","article-title":"PsyEval: A comprehensive large language model evaluation benchmark for mental health","author":"Jin","year":"2023"},{"key":"ref34","first-page":"1","article-title":"A benchmark for understanding dialogue safety in mental health support","volume-title":"Proc. CCF Int. Conf. Natural Lang. Process. Chin. Comput.","author":"Qiu","year":"2023"},{"key":"ref35","first-page":"4980","article-title":"Word graph guided summarization for radiology findings","volume-title":"Proc. Findings Assoc. Comput. Linguistics (ACL-IJCNLP)","author":"Hu","year":"2021"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.320"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.335"},{"key":"ref38","first-page":"2682","article-title":"MentSum: A resource for exploring summarization of mental health online posts","volume-title":"Proc. 13th Lang. Resour. Eval. Conf.","author":"Sotudeh","year":"2022"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-020-68764-y"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-022-11207-7"},{"key":"ref41","article-title":"Lawyer llama technical report","author":"Huang","year":"2023"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1093\/jamia\/ocae045"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.7759\/cureus.40895"},{"key":"ref44","article-title":"OWL: A large language model for IT operations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Guo","year":"2024"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.725"},{"key":"ref46","article-title":"HuatuoGPT-II, one-stage training for medical adaption of LLMS","author":"Chen","year":"2023"},{"key":"ref47","article-title":"DISC-LawLLM: Fine-tuning large language models for intelligent legal services","author":"Yue","year":"2023"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2023.3283009"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2024.3350087"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2022.3154442"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.370"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1145\/3589334.3648137"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2024.3397403"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2023.3343689"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.nlp4science-1.8"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1115\/DETC2024-143961"},{"key":"ref57","article-title":"Sunnie: An anthropomorphic LLM-based conversational agent for mental well-being activity recommendation","author":"Wu","year":"2024"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.130"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1145\/3643540"},{"key":"ref60","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref61","first-page":"74","article-title":"ROUGE: A Rackage for automatic evaluation of summaries","author":"Lin","year":"2004","journal-title":"Text Summarization Branches Out"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref63","article-title":"BERTScore: Evaluating text generation with BERT","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2019"},{"key":"ref64","article-title":"Llama3-chinese","author":"Zhang","year":"2024"},{"key":"ref65","article-title":"MindChat: Psychological large language model","author":"Yan","year":"2023"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.671"},{"key":"ref67","article-title":"Training verifiers to solve math word problems","author":"Cobbe","year":"2021"},{"key":"ref68","first-page":"62991","article-title":"C-Eval: A multi-level multi-discipline Chinese evaluation suite for foundation models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Huang","year":"2023"}],"container-title":["IEEE Transactions on Computational Social Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6570650\/10948539\/10772313.pdf?arnumber=10772313","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T18:37:11Z","timestamp":1767724631000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10772313\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":68,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tcss.2024.3497725","relation":{},"ISSN":["2329-924X","2373-7476"],"issn-type":[{"value":"2329-924X","type":"electronic"},{"value":"2373-7476","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}