{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T00:27:57Z","timestamp":1767918477389,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":37,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,2,27]],"date-time":"2023-02-27T00:00:00Z","timestamp":1677456000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"National Natural Science Foundation of China","award":["62072323"],"award-info":[{"award-number":["62072323"]}]},{"name":"Science and Technology Commission of Shanghai Municipality Grant","award":["22511105902"],"award-info":[{"award-number":["22511105902"]}]},{"name":"Shanghai Municipal Science and Technology Major Project","award":["2021SHZDZX0103"],"award-info":[{"award-number":["2021SHZDZX0103"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,2,27]]},"DOI":"10.1145\/3539597.3570431","type":"proceedings-article","created":{"date-parts":[[2023,2,22]],"date-time":"2023-02-22T23:27:00Z","timestamp":1677108420000},"page":"465-480","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":9,"title":["Can Pre-trained Language Models Understand Chinese Humor?"],"prefix":"10.1145","author":[{"given":"Yuyan","family":"Chen","sequence":"first","affiliation":[{"name":"Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China"}]},{"given":"Zhixu","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China"}]},{"given":"Jiaqing","family":"Liang","sequence":"additional","affiliation":[{"name":"School of Data Science, Fudan University, Shanghai, China"}]},{"given":"Yanghua","family":"Xiao","sequence":"additional","affiliation":[{"name":"Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University &amp; Fudan-Aishu Cognitive Intelligence Joint Research Center, Shanghai, China"}]},{"given":"Bang","family":"Liu","sequence":"additional","affiliation":[{"name":"RALI &amp; Mila, Universit\u00e9 de Montr\u00e9al, Montr\u00e9al, Canada"}]},{"given":"Yunwen","family":"Chen","sequence":"additional","affiliation":[{"name":"DataGrand Inc., Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2023,2,27]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-3012"},{"key":"e_1_3_2_2_2_1","unstructured":"Francesco Barbieri and Horacio Saggion. 2014. Automatic Detection of Irony and Humour in Twitter. In ICCC. 155--162."},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1177\/000276487030003002"},{"key":"e_1_3_2_2_4_1","volume-title":"Proceedings of the 27th International Conference on Computational Linguistics. Association for Computational Lin- guistics","author":"Cattle Andrew","year":"2018","unstructured":"Andrew Cattle and Xiaojuan Ma. 2018. Recognizing Humour using Word Associ- ations and Humour Anchor Extraction. In Proceedings of the 27th International Conference on Computational Linguistics. Association for Computational Lin- guistics, Santa Fe, New Mexico, USA, 1849--1858. https:\/\/www.aclweb.org\/ anthology\/C18--1157"},{"key":"e_1_3_2_2_5_1","volume-title":"Predicting audience's laughter using convo- lutional neural network. arXiv preprint arXiv:1702.02584","author":"Chen Lei","year":"2017","unstructured":"Lei Chen and Chong MIn Lee. 2017. Predicting audience's laughter using convo- lutional neural network. arXiv preprint arXiv:1702.02584 (2017)."},{"key":"e_1_3_2_2_6_1","volume-title":"Proceedings of the 12th Language Resources and Evaluation Conference. European Language Resources Association","author":"Chiruzzo Luis","year":"2020","unstructured":"Luis Chiruzzo, Santiago Castro, and Aiala Ros\u00e1. 2020. HAHA 2019 Dataset: A Corpus for Humor Analysis in Spanish. In Proceedings of the 12th Language Resources and Evaluation Conference. European Language Resources Association, Marseille, France, 5106--5112. https:\/\/www.aclweb.org\/anthology\/2020.lrec- 1.628"},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_8_1","volume-title":"Humor norms for 4,997 English words. Behavior research methods 50, 3","author":"Engelthaler Tomas","year":"2018","unstructured":"Tomas Engelthaler and Thomas T Hills. 2018. Humor norms for 4,997 English words. Behavior research methods 50, 3 (2018), 1116--1124."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"crossref","unstructured":"Giovannantonio Forabosco. 1992. Cognitive aspects of the humor process: The concept of incongruity. (1992).","DOI":"10.1515\/humr.1992.5.1-2.45"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2021.01.024"},{"key":"e_1_3_2_2_11_1","volume-title":"Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821","author":"Gao Tianyu","year":"2021","unstructured":"Tianyu Gao, Xingcheng Yao, and Danqi Chen. 2021. Simcse: Simple contrastive learning of sentence embeddings. arXiv preprint arXiv:2104.08821 (2021)."},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2006.100"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1172"},{"key":"e_1_3_2_2_15_1","volume-title":"Dataset and Analysis of Creative Text Editing for Humorous Headlines. arXiv preprint arXiv:1906.00274","author":"Hossain Nabil","year":"2019","unstructured":"Nabil Hossain, John Krumm, and Michael Gamon. 2019. \" President Vows to Hair\": Dataset and Analysis of Creative Text Editing for Humorous Headlines. arXiv preprint arXiv:1906.00274 (2019)."},{"key":"e_1_3_2_2_16_1","volume-title":"Backpropagation applied to handwritten zip code recognition. Neural computation 1, 4","author":"LeCun Yann","year":"1989","unstructured":"Yann LeCun, Bernhard Boser, John S Denker, Donnie Henderson, Richard E Howard, Wayne Hubbard, and Lawrence D Jackel. 1989. Backpropagation applied to handwritten zip code recognition. Neural computation 1, 4 (1989), 541--551."},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"e_1_3_2_2_18_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_2_19_1","volume-title":"Proceedings of Cognitive Science Conference. Citeseer, 1513--1518","author":"Mihalcea Rada","year":"2005","unstructured":"Rada Mihalcea and Carlo Strapparava. 2005. Computational laughing: Auto- matic recognition of humorous one-liners. In Proceedings of Cognitive Science Conference. Citeseer, 1513--1518."},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1051"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1515\/humor-2012-0007"},{"key":"e_1_3_2_2_22_1","volume-title":"Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683","author":"Raffel Colin","year":"2019","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683 (2019)."},{"key":"e_1_3_2_2_23_1","unstructured":"Mariano Rodriguez Reynier Ortega-Bueno and Paolo Rosso. 2021. RoMa at HAHA-2021: Deep Reinforcement Learning to Improve a Transformed-based Model for Humor Detection. (2021)."},{"key":"e_1_3_2_2_24_1","volume-title":"CPT: A Pre-Trained Unbalanced Transformer for Both Chi- nese Language Understanding and Generation. arXiv preprint arXiv:2109.05729","author":"Shao Yunfan","year":"2021","unstructured":"Yunfan Shao, Zhichao Geng, Yitao Liu, Junqi Dai, Fei Yang, Li Zhe, Hujun Bao, and Xipeng Qiu. 2021. CPT: A Pre-Trained Unbalanced Transformer for Both Chi- nese Language Understanding and Generation. arXiv preprint arXiv:2109.05729 (2021)."},{"key":"e_1_3_2_2_25_1","volume-title":"Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Syd- ney, NSW, Australia, 6--11","volume":"3153","author":"Shrikumar Avanti","year":"2017","unstructured":"Avanti Shrikumar, Peyton Greenside, and Anshul Kundaje. 2017. Learning Im- portant Features Through Propagating Activation Differences. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Syd- ney, NSW, Australia, 6--11 August 2017 (Proceedings of Machine Learning Re- search, Vol. 70), Doina Precup and Yee Whye Teh (Eds.). PMLR, 3145--3153. http:\/\/proceedings.mlr.press\/v70\/shrikumar17a.html"},{"key":"e_1_3_2_2_26_1","volume-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034","author":"Simonyan Karen","year":"2013","unstructured":"Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. 2013. Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034 (2013)."},{"key":"e_1_3_2_2_27_1","volume-title":"What s SO Bloody Funny?","author":"Spence Mary F","year":"2006","unstructured":"Mary F Spence. 2006. What s SO Bloody Funny? (2006)."},{"key":"e_1_3_2_2_28_1","volume-title":"Chinesebert: Chinese pretraining enhanced by glyph and pinyin information. arXiv preprint arXiv:2106.16038","author":"Sun Zijun","year":"2021","unstructured":"Zijun Sun, Xiaoya Li, Xiaofei Sun, Yuxian Meng, Xiang Ao, Qing He, Fei Wu, and Jiwei Li. 2021. Chinesebert: Chinese pretraining enhanced by glyph and pinyin information. arXiv preprint arXiv:2106.16038 (2021)."},{"key":"e_1_3_2_2_29_1","volume-title":"Proceedings of the Annual Meeting of the Cognitive Science Society","volume":"26","author":"Taylor Julia M","year":"2004","unstructured":"Julia M Taylor and Lawrence J Mazlack. 2004. Computationally recognizing wordplay in jokes. In Proceedings of the Annual Meeting of the Cognitive Science Society, Vol. 26."},{"key":"e_1_3_2_2_30_1","article-title":"A Study of American Verbal Humor in The Big Bang Theory from the Perspective of Cooperative Principle","volume":"4","author":"Yang Wen","year":"2021","unstructured":"Yang Wen et al. 2021. A Study of American Verbal Humor in The Big Bang Theory from the Perspective of Cooperative Principle. Academic Journal of Humanities & Social Sciences 4, 7 (2021).","journal-title":"Academic Journal of Humanities & Social Sciences"},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D15-1284"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_2_33_1","volume-title":"IAS-BERT: An Information Gain Association Vector Semi-supervised BERT Model for Sentiment Analysis. In International Conference on Cloud Computing. Springer, 31--42","author":"Zhang Linkun","year":"2020","unstructured":"Linkun Zhang, Yuxia Lei, and Zhengyan Wang. 2020. IAS-BERT: An Information Gain Association Vector Semi-supervised BERT Model for Sentiment Analysis. In International Conference on Cloud Computing. Springer, 31--42."},{"key":"e_1_3_2_2_34_1","volume-title":"Long-Text Sentiment Analysis Based on Semantic Graph. In 2020 IEEE International Conference on Embedded Software and Systems (ICESS). IEEE, 1--6.","author":"Zhang Linkun","year":"2020","unstructured":"Linkun Zhang, Yuxia Lei, and Zhengyan Wang. 2020. Long-Text Sentiment Analysis Based on Semantic Graph. In 2020 IEEE International Conference on Embedded Software and Systems (ICESS). IEEE, 1--6."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"crossref","unstructured":"Zhengyan Zhang Yuxian Gu Xu Han Shengqi Chen Chaojun Xiao Zhenbo Sun Yuan Yao Fanchao Qi Jian Guan Pei Ke et al. 2021. CPM-2: Large-scale Cost-efficient Pre-trained Language Models.","DOI":"10.1016\/j.aiopen.2021.12.003"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2021.07.001"},{"key":"e_1_3_2_2_37_1","unstructured":"Jinlin Zhou Haixin Song Wendong Wang Yao Niu and Wenhao Rao. 2021. Takeaway Comments Sentiment Analysis Based on BERT."}],"event":{"name":"WSDM '23: The Sixteenth ACM International Conference on Web Search and Data Mining","location":"Singapore Singapore","acronym":"WSDM '23","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539597.3570431","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3539597.3570431","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:14Z","timestamp":1750186934000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539597.3570431"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,2,27]]},"references-count":37,"alternative-id":["10.1145\/3539597.3570431","10.1145\/3539597"],"URL":"https:\/\/doi.org\/10.1145\/3539597.3570431","relation":{},"subject":[],"published":{"date-parts":[[2023,2,27]]},"assertion":[{"value":"2023-02-27","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}