{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T19:57:30Z","timestamp":1769543850440,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":22,"publisher":"ACM","funder":[{"name":"The Science and Technology Research Program of Chongqing Municipal Education Commission","award":["Grant No.KJQN202400642\u3001No.KJQN202100635"],"award-info":[{"award-number":["Grant No.KJQN202400642\u3001No.KJQN202100635"]}]},{"name":"Chongqing Research on the Talent Training Mode for Language Service Engineers in the \\\"Foreign Language + AI + Industry\\\" Context","award":["No. 03, 2024"],"award-info":[{"award-number":["No. 03, 2024"]}]},{"name":"Chongqing University of Posts and Telecommunications International Education Research Project","award":["No.GJJY20-1-01"],"award-info":[{"award-number":["No.GJJY20-1-01"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,7]]},"DOI":"10.1145\/3779153.3779177","type":"proceedings-article","created":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T07:35:51Z","timestamp":1769499351000},"page":"163-169","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["A Multi-Head and Multi-Level Attention Fusion Approach for Industrial Sci-Tech Achievement Classification with Large Language Model Enhancement"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-4439-0669","authenticated-orcid":false,"given":"Yilin","family":"Xie","sequence":"first","affiliation":[{"name":"Chongqing University of Posts and Telecommunications, Chongqing, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-8320-3603","authenticated-orcid":false,"given":"Kun","family":"Ding","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications, Chongqing, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-5726-9632","authenticated-orcid":false,"given":"Yumeng","family":"Fang","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications, Chongqing, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1279-1845","authenticated-orcid":false,"given":"Jing","family":"Wang","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications, Chongqing, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-9166-2052","authenticated-orcid":false,"given":"Bin","family":"Wang","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications, Chongqing, Chongqing, China"}]}],"member":"320","published-online":{"date-parts":[[2026,1,26]]},"reference":[{"key":"e_1_3_3_1_1_2","doi-asserted-by":"publisher","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N. Gomez Lukasz Kaiser and Illia Polosukhin. 2017. Attention Is All You Need. In Advances in Neural Information Processing Systems (NeurIPS\u201917) 5998\u20136008. https:\/\/doi.org\/10.48550\/arXiv.1706.03762","DOI":"10.48550\/arXiv.1706.03762"},{"key":"e_1_3_3_1_2_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2312.09287"},{"key":"e_1_3_3_1_3_2","doi-asserted-by":"publisher","unstructured":"Yingqian Cui Jie Ren Pengfei He Jiliang Tang and Yue Xing. 2024. Superiority of Multi-Head Attention in In-Context Linear Regression. https:\/\/doi.org\/10.48550\/arXiv.2401.17426","DOI":"10.48550\/arXiv.2401.17426"},{"key":"e_1_3_3_1_4_2","doi-asserted-by":"publisher","unstructured":"Da Xiao Qingye Meng Shengping Li and Xingyuan Yuan. 2024. Improving Transformers with Dynamically Composable Multi-Head Attention. https:\/\/doi.org\/10.48550\/arXiv.2405.08553","DOI":"10.48550\/arXiv.2405.08553"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1174"},{"key":"e_1_3_3_1_6_2","doi-asserted-by":"publisher","unstructured":"Ilias Chalkidis Xiang Dai Manos Fergadiotis Prodromos Malakasiotis and Desmond Elliott. 2022. An Exploration of Hierarchical Attention Transformers for Efficient Long Document Classification. https:\/\/doi.org\/10.48550\/arXiv.2210.05529","DOI":"10.48550\/arXiv.2210.05529"},{"key":"e_1_3_3_1_7_2","doi-asserted-by":"publisher","unstructured":"Jinghui Lu Maeve Henchion Ivan Bacher and Brian Mac Namee. 2021. A Sentence-level Hierarchical BERT Model for Document Classification with Limited Labelled Data. https:\/\/doi.org\/10.48550\/arXiv.2106.06738","DOI":"10.48550\/arXiv.2106.06738"},{"key":"e_1_3_3_1_8_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"e_1_3_3_1_9_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2005.11401"},{"key":"e_1_3_3_1_10_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2002.08909"},{"key":"e_1_3_3_1_11_2","doi-asserted-by":"publisher","unstructured":"Edward J. Hu Yelong Shen Phillip Wallis et al. 2021. LoRA: Low-Rank Adaptation of Large Language Models. https:\/\/doi.org\/10.48550\/arXiv.2106.09685","DOI":"10.48550\/arXiv.2106.09685"},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00254"},{"key":"e_1_3_3_1_13_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1812.08951"},{"key":"e_1_3_3_1_14_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1905.10650"},{"key":"e_1_3_3_1_15_2","doi-asserted-by":"publisher","unstructured":"Jean-Baptiste Cordonnier Andreas Loukas and Martin Jaggi. 2020. Multi-Head Attention: Collaborate Instead of Concatenate. https:\/\/doi.org\/10.48550\/arXiv.2006.16362","DOI":"10.48550\/arXiv.2006.16362"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1905.09418"},{"key":"e_1_3_3_1_17_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2004.05150"},{"key":"e_1_3_3_1_18_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2001.04451"},{"key":"e_1_3_3_1_19_2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1905.05950"},{"key":"e_1_3_3_1_20_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1002"},{"key":"e_1_3_3_1_21_2","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-023-04691-5"},{"key":"e_1_3_3_1_22_2","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-97-5600-1_22"}],"event":{"name":"BDIOT 2025: 2025 9th International Conference on Big Data and Internet of Things","location":"Chongqing China","acronym":"BDIOT 2025"},"container-title":["Proceedings of the 2025 9th International Conference on Big Data and Internet of Things"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3779153.3779177","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T07:36:02Z","timestamp":1769499362000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3779153.3779177"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,7]]},"references-count":22,"alternative-id":["10.1145\/3779153.3779177","10.1145\/3779153"],"URL":"https:\/\/doi.org\/10.1145\/3779153.3779177","relation":{},"subject":[],"published":{"date-parts":[[2025,11,7]]},"assertion":[{"value":"2026-01-26","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}