{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,31]],"date-time":"2026-01-31T02:01:19Z","timestamp":1769824879736,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":21,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,12,19]],"date-time":"2024-12-19T00:00:00Z","timestamp":1734566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,12,19]]},"DOI":"10.1145\/3704522.3704540","type":"proceedings-article","created":{"date-parts":[[2025,1,3]],"date-time":"2025-01-03T12:29:51Z","timestamp":1735907391000},"page":"24-31","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["XLNet-CNN: Combining Global Context Understanding of XLNet with Local Context Capture through Convolution for Improved Multi-Label Text Classification"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-2022-7029","authenticated-orcid":false,"given":"Asif","family":"Shahriar","sequence":"first","affiliation":[{"name":"Computer Science and Engineering, BRAC University, Dhaka, Bangladesh"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-6897-5381","authenticated-orcid":false,"given":"Debojit","family":"Pandit","sequence":"additional","affiliation":[{"name":"Computer Science and Engineering, BUET, Dhaka, Bangladesh, Bangladesh"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9887-4456","authenticated-orcid":false,"given":"M Saifur","family":"Rahman","sequence":"additional","affiliation":[{"name":"Computer Science and Engineering, BUET, Dhaka, Bangladesh, Bangladesh"}]}],"member":"320","published-online":{"date-parts":[[2025,1,3]]},"reference":[{"key":"e_1_3_3_1_2_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.woah-1.3"},{"key":"e_1_3_3_1_3_2","doi-asserted-by":"publisher","unstructured":"Xinying Chen Peimin Cong and Shuo Lv. 2022. A Long-Text Classification Method of Chinese News Based on BERT and CNN. IEEE Access 10 (2022) 34046\u201334057. 10.1109\/ACCESS.2022.3162614","DOI":"10.1109\/ACCESS.2022.3162614"},{"key":"e_1_3_3_1_4_2","first-page":"4171","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies , Vol.\u00a01. 4171\u20134186. https:\/\/arxiv.org\/pdf\/1810.04805.pdf"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016489"},{"key":"e_1_3_3_1_6_2","doi-asserted-by":"publisher","unstructured":"Zahra Hanifelou Peyman Adibi Sayyed\u00a0Amirhassan Monadjemi and Hossein Karshenas. 2018. KNN-based multi-label twin support vector machine with priority of labels. Neurocomputing 322 (2018) 177\u2013186. 10.1016\/j.neucom.2018.09.044","DOI":"10.1016\/j.neucom.2018.09.044"},{"key":"e_1_3_3_1_7_2","unstructured":"Diederik\u00a0P Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. International Conference on Learning Representations (ICLR) (2015). https:\/\/arxiv.org\/abs\/1412.6980"},{"key":"e_1_3_3_1_8_2","doi-asserted-by":"publisher","unstructured":"Anwesha Law and Ashish Ghosh. 2022. Multi-Label Classification Using Binary Tree of Classifiers. IEEE Transactions on Emerging Topics in Computational Intelligence 6 3 (2022) 677\u2013689. 10.1109\/TETCI.2021.3075717","DOI":"10.1109\/TETCI.2021.3075717"},{"key":"e_1_3_3_1_9_2","doi-asserted-by":"publisher","unstructured":"Jinhyuk Lee Wonjin Yoon Sungdong Kim Donghyeon Kim Sunkyu Kim Chan\u00a0Ho So and Jaewoo Kang. 2020. BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics 36 4 (2020) 1234\u20131240. 10.1093\/bioinformatics\/btz682","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"e_1_3_3_1_10_2","doi-asserted-by":"publisher","DOI":"10.1145\/3529836.3529912"},{"key":"e_1_3_3_1_11_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i15.17566"},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i17.17745"},{"key":"e_1_3_3_1_13_2","unstructured":"Masashi Nakamura Satoshi Kawazoe and Kenji Kurokawa. 2020. COVID-Twitter-BERT: A Natural Language Processing Model to Analyse COVID-19 Content on Twitter. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2005.07503 (2020). https:\/\/arxiv.org\/abs\/2005.07503"},{"key":"e_1_3_3_1_14_2","doi-asserted-by":"crossref","unstructured":"Soham Poddar Siwei Luo and Long Xing. 2022. CAVES: A Dataset to facilitate Explainable Classification and Summarization of Concerns towards COVID Vaccines. (2022). https:\/\/arxiv.org\/abs\/2204.13746","DOI":"10.1145\/3477495.3531745"},{"key":"e_1_3_3_1_15_2","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans and Ilya Sutskever. 2018. Improving Language Understanding by Generative Pre-Training. (2018). https:\/\/s3-us-west-2.amazonaws.com\/openai-assets\/research-covers\/language-unsupervised\/language_understanding_paper.pdf"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"crossref","unstructured":"Grigorios Tsoumakas and Ioannis Katakis. 2007. Multilabel classification: An overview. International Journal of Data Warehousing and Mining 3 3 (2007) 1\u201313.","DOI":"10.4018\/jdwm.2007070101"},{"key":"e_1_3_3_1_17_2","first-page":"5998","volume-title":"Advances in Neural Information Processing Systems","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan\u00a0N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. In Advances in Neural Information Processing Systems , Vol.\u00a030. Curran Associates, Inc., 5998\u20136008. https:\/\/arxiv.org\/abs\/1706.03762 https:\/\/dl.acm.org\/doi\/10.5555\/3295222.3295349"},{"key":"e_1_3_3_1_18_2","doi-asserted-by":"publisher","unstructured":"Qingyao Wu Mingkui Tan Hengjie Song Jian Chen and Michael\u00a0K. Ng. 2016. ML-FOREST: A Multi-Label Tree Ensemble Method for Multi-Label Classification. IEEE Transactions on Knowledge and Data Engineering 28 10 (2016) 2665\u20132680. 10.1109\/TKDE.2016.2581161 https:\/\/dl.acm.org\/doi\/10.1109\/TKDE.2016.2581161","DOI":"10.1109\/TKDE.2016.2581161"},{"key":"e_1_3_3_1_19_2","unstructured":"Baosong Yang Longyue Wang Derek Wong Lidia\u00a0S. Chao and Zhaopeng Tu. 2019. Convolutional Self-Attention Networks. arxiv:https:\/\/arXiv.org\/abs\/1904.03107\u00a0[cs.CL] https:\/\/arxiv.org\/abs\/1904.03107"},{"key":"e_1_3_3_1_20_2","first-page":"5754","volume-title":"Advances in Neural Information Processing Systems","author":"Yang Zhilin","year":"2019","unstructured":"Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc\u00a0V Le. 2019. XLNet: Generalized Autoregressive Pretraining for Language Understanding. In Advances in Neural Information Processing Systems. 5754\u20135764. https:\/\/arxiv.org\/abs\/1906.08237"},{"key":"e_1_3_3_1_21_2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-36204-1_37"},{"key":"e_1_3_3_1_22_2","doi-asserted-by":"publisher","unstructured":"Xiaoyan Zhu Jiaxuan Li Jingtao Ren Jiayin Wang and Guangtao Wang. 2023. Dynamic ensemble learning for multi-label classification. Information Sciences 623 (2023) 94\u2013111. 10.1016\/j.ins.2022.12.022 https:\/\/dl.acm.org\/doi\/10.1016\/j.ins.2022.12.022","DOI":"10.1016\/j.ins.2022.12.022"}],"event":{"name":"NSysS '24: 11th International Conference on Networking, Systems, and Security","location":"Khulna Karak Bangladesh","acronym":"NSysS '24"},"container-title":["Proceedings of the 11th International Conference on Networking, Systems, and Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3704522.3704540","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3704522.3704540","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:18:16Z","timestamp":1750295896000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3704522.3704540"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,19]]},"references-count":21,"alternative-id":["10.1145\/3704522.3704540","10.1145\/3704522"],"URL":"https:\/\/doi.org\/10.1145\/3704522.3704540","relation":{},"subject":[],"published":{"date-parts":[[2024,12,19]]},"assertion":[{"value":"2025-01-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}