{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T18:50:45Z","timestamp":1757616645924,"version":"3.44.0"},"reference-count":38,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61976124, 61976125, 62372077"],"award-info":[{"award-number":["61976124, 61976125, 62372077"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Data Sci Anal"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s41060-024-00588-9","type":"journal-article","created":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:01:38Z","timestamp":1719187298000},"page":"2163-2173","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Lcp-mixer: a lightweight model based on concept-level perception for NLP"],"prefix":"10.1007","volume":"20","author":[{"given":"Huanling","family":"Tang","sequence":"first","affiliation":[]},{"given":"Yulin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Ruiquan","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,6,24]]},"reference":[{"unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (2017).https:\/\/proceedings.neurips.cc\/paper\/2017\/hash\/3f5ee243547dee91fbd053c1c4a845aa-Abstract.html","key":"588_CR1"},{"doi-asserted-by":"publisher","unstructured":"Kitaev, N., Kaiser, L., Levskaya, A.: Reformer: The Efficient Transformer. International Conference on Learning Representations (2019). https:\/\/doi.org\/10.1145\/3503161.3548409","key":"588_CR2","DOI":"10.1145\/3503161.3548409"},{"unstructured":"Wu, Z., Liu, Z., Lin, J., Lin, Y., Han, S.: Lite Transformer with Long-Short Range Attention. International conference on learning representations (2019). https:\/\/openreview.net\/forum?id=ByeMPlHKPH","key":"588_CR3"},{"unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. international conference on learning representations (2021). https:\/\/openreview.net\/forum?id=yicbfdntty","key":"588_CR4"},{"unstructured":"Tolstikhin, I.O., Houlsby, N., Kolesnikov, A., Beyer, L., Zhai, X., Unterthiner, T., Yung, J., Steiner, A., Keysers, D., Uszkoreit, J., et al.: MLP-Mixer: An all-MLP Architecture for Vision. Advances in Neural Information Processing Systems (2021). https:\/\/openreview.net\/forum?id=EI2KOXKdnP","key":"588_CR5"},{"issue":"4","key":"588_CR6","doi-asserted-by":"publisher","first-page":"5314","DOI":"10.1109\/TPAMI.2022.320614","volume":"45","author":"H Touvron","year":"2022","unstructured":"Touvron, H., Bojanowski, P., Caron, M., Cord, M., El-Nouby, A., Grave, E., Izacard, G., Joulin, A., Synnaeve, G., Verbeek, J., et al.: Resmlp: feedforward networks for image classification with data-efficient training. IEEE Trans. Pattern Anal. Mach. Intell. 45(4), 5314\u20135321 (2022). https:\/\/doi.org\/10.1109\/TPAMI.2022.320614","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"unstructured":"Lian, D., Yu, Z., Sun, X., Gao, S.: AS-MLP: An Axial Shifted MLP Architecture for Vision. International conference on learning representations (2022). https:\/\/openreview.net\/forum?id=fvLLcIYmXb","key":"588_CR7"},{"issue":"12","key":"588_CR8","doi-asserted-by":"publisher","first-page":"14284","DOI":"10.1109\/tpami.2023.3303397","volume":"45","author":"S Chen","year":"2023","unstructured":"Chen, S., Xie, E., Ge, C., Chen, R., Liang, D., Luo, P.: Cyclemlp: A mlp-like architecture for dense visual predictions. IEEE Trans. Pattern Anal. Mach. Intell. 45(12), 14284\u201314300 (2023). https:\/\/doi.org\/10.1109\/tpami.2023.3303397","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"unstructured":"Liu, H., Dai, Z., So, D., Le, Q.V.: Pay attention to mlps. Advances in Neural Information Processing Systems (2021).https:\/\/proceedings.neurips.cc\/paper\/2021\/hash\/4cc05b35c2f937c5bd9e7d41d3686fff-Abstract.html","key":"588_CR9"},{"doi-asserted-by":"publisher","unstructured":"Fusco, F., Pascual, D., Staar, P., Antognini, D.: pNLP-Mixer: an Efficient all-MLP Architecture for Language. Association for Computational Linguistics, Toronto, Canada. Proceedings of the 61st annual meeting of the for computational linguistics (Volume 5: Industry Track) (2023). https:\/\/doi.org\/10.18653\/v1\/2023.acl-industry.6","key":"588_CR10","DOI":"10.18653\/v1\/2023.acl-industry.6"},{"doi-asserted-by":"publisher","unstructured":"Mai, F., Pannatier, A., Fehr, F., Chen, H., Marelli, F., Fleuret, F., Henderson, J.: HyperMixer: An MLP-based Low Cost Alternative to Transformers. Association for Computational Linguistics, Toronto, Canada. Proceedings of the 61st annual meeting of the association for computational linguistics (Volume 1: Long Papers) (2023). https:\/\/doi.org\/10.18653\/v1\/2023.acl-long.871","key":"588_CR11","DOI":"10.18653\/v1\/2023.acl-long.871"},{"doi-asserted-by":"publisher","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (2018). https:\/\/doi.org\/10.18653\/v1\/N19-1423","key":"588_CR12","DOI":"10.18653\/v1\/N19-1423"},{"unstructured":"Yang, Z., Dai, Z., Yang, Y., Carbonell, J., Salakhutdinov, R.R., Le, Q.V.: Xlnet: Generalized autoregressive pretraining for language understanding. Advances in Neural Information Processing Systems (2019).https:\/\/proceedings.neurips.cc\/paper\/2019\/hash\/dc6a7e655d7e5840e66733e9ee67cc69-Abstract.html","key":"588_CR13"},{"unstructured":"Kaliamoorthi, P., Siddhant, A., Li, E., Johnson, M.: Distilling large language models into tiny and effective students using pqrnn. arXiv preprint arXiv:2101.08890 (2021) 10.48550\/arXiv.2101.08890","key":"588_CR14"},{"doi-asserted-by":"publisher","unstructured":"Li, H., Arora, A., Chen, S., Gupta, A., Gupta, S., Mehdad, Y.: MTOP: A Comprehensive Multilingual Task-Oriented Semantic Parsing Benchmark. Proceedings of the 16th Conference of the European chapter of the association for computational linguistics: main volume (2021). https:\/\/doi.org\/10.18653\/v1\/2021.eacl-main.257","key":"588_CR15","DOI":"10.18653\/v1\/2021.eacl-main.257"},{"unstructured":"Liu, Y., Ott, M., Goyal, N., Du, J., Joshi, M., Chen, D., Levy, O., Lewis, M., Zettlemoyer, L., Stoyanov, V.: Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)","key":"588_CR16"},{"unstructured":"Radford, A., Narasimhan, K., Salimans, T., Sutskever, I., et al.: Improving language understanding by generative pre-training. OpenAI (2018).https:\/\/www.cs.ubc.ca\/~amuham01\/LING530\/papers\/radford2018improving.pdf","key":"588_CR17"},{"unstructured":"Beltagy, I., Peters, M.E., Cohan, A.: Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150 (2020) 10.48550\/arXiv.2004.05150","key":"588_CR18"},{"doi-asserted-by":"publisher","unstructured":"Jiao, X., Yin, Y., Shang, L., Jiang, X., Chen, X., Li, L., Wang, F., Liu, Q.: TinyBERT: Distilling BERT for natural language understanding. association for computational linguistics, Online. findings of the association for computational linguistics: EMNLP 2020 (2020). https:\/\/doi.org\/10.18653\/v1\/2020.findings-emnlp.372","key":"588_CR19","DOI":"10.18653\/v1\/2020.findings-emnlp.372"},{"doi-asserted-by":"publisher","unstructured":"Sun, Z., Yu, H., Song, X., Liu, R., Yang, Y., Zhou, D.: MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices. Association for Computational Linguistics, Online. Proceedings of the 58th annual meeting of the association for computational linguistics (2020). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.195","key":"588_CR20","DOI":"10.18653\/v1\/2020.acl-main.195"},{"doi-asserted-by":"publisher","unstructured":"Kaliamoorthi, P., Ravi, S., Kozareva, Z.: PRADO: Projection attention networks for document classification on-device. Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP) (2019). https:\/\/doi.org\/10.18653\/v1\/d19-1506","key":"588_CR21","DOI":"10.18653\/v1\/d19-1506"},{"doi-asserted-by":"publisher","unstructured":"Guo, J., Tang, Y., Han, K., Chen, X., Wu, H., Xu, C., Xu, C., Wang, Y.: Hire-mlp: Vision mlp via hierarchical rearrangement. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2022). https:\/\/doi.org\/10.1109\/cvpr52688.2022.00090.https:\/\/openaccess.thecvf.com\/content\/CVPR2022\/html\/Guo_Hire-MLP_Vision_MLP_via_Hierarchical_Rearrangement_CVPR_2022_paper.html","key":"588_CR22","DOI":"10.1109\/cvpr52688.2022.00090"},{"doi-asserted-by":"publisher","unstructured":"Tang, Y., Han, K., Guo, J., Xu, C., Li, Y., Xu, C., Wang, Y.: An image patch is a wave: Phase-aware vision mlp. Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (2022). https:\/\/doi.org\/10.1109\/cvpr52688.2022.01066.https:\/\/openaccess.thecvf.com\/content\/CVPR2022\/html\/Tang_An_Image_Patch_Is_a_Wave_Phase-Aware_Vision_MLP_CVPR_2022_paper.html","key":"588_CR23","DOI":"10.1109\/cvpr52688.2022.01066"},{"doi-asserted-by":"publisher","unstructured":"Yu, T., Li, X., Cai, Y., Sun, M., Li, P.: S2-mlp: Spatial-shift mlp architecture for vision. Proceedings of the IEEE\/CVF winter conference on applications of computer vision (2022). https:\/\/doi.org\/10.1109\/wacv51458.2022.00367.https:\/\/openaccess.thecvf.com\/content\/WACV2022\/html\/Yu_S2-MLP_Spatial-Shift_MLP_Architecture_for_Vision_WACV_2022_paper.html","key":"588_CR24","DOI":"10.1109\/wacv51458.2022.00367"},{"doi-asserted-by":"publisher","unstructured":"Henderson, J.: The Unstoppable Rise of Computational Linguistics in Deep Learning. Association for Computational Linguistics, Online. Proceedings of the 58th annual meeting of the association for computational linguistics (2020). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.561","key":"588_CR25","DOI":"10.18653\/v1\/2020.acl-main.561"},{"key":"588_CR26","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.106471","volume":"123","author":"X Liu","year":"2023","unstructured":"Liu, X., Tang, H., Zhao, J., Dou, Q., Lu, M.: Tcamixer: a lightweight mixer based on a novel triple concepts attention mechanism for nlp. Eng. Appl. Artif. Intell. 123, 106471 (2023). https:\/\/doi.org\/10.1016\/j.engappai.2023.106471","journal-title":"Eng. Appl. Artif. Intell."},{"key":"588_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2020.101182","volume":"68","author":"J Deng","year":"2021","unstructured":"Deng, J., Cheng, L., Wang, Z.: Attention-based bilstm fused cnn with gating mechanism model for chinese long text classification. Comput. Speech Lang. 68, 101182 (2021)","journal-title":"Comput. Speech Lang."},{"key":"588_CR28","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1016\/j.neucom.2019.08.080","volume":"386","author":"J Xu","year":"2020","unstructured":"Xu, J., Cai, Y., Wu, X., Lei, X., Huang, Q., Leung, H.-F., Li, Q.: Incorporating context-relevant concepts into convolutional neural networks for short text classification. Neurocomputing 386, 42\u201353 (2020)","journal-title":"Neurocomputing"},{"unstructured":"Zaheer, M., Guruganesh, G., Dubey, K.A., Ainslie, J., Alberti, C., Ontanon, S., Pham, P., Ravula, A., Wang, Q., Yang, L., et al.: Big bird: Transformers for longer sequences. Advances in Neural Information Processing Systems (2020).https:\/\/proceedings.neurips.cc\/paper\/2020\/hash\/c8512d142a2d849725f31a9a7a361ab9-Abstract.html","key":"588_CR29"},{"doi-asserted-by":"publisher","unstructured":"Zhang, X., Wei, F., Zhou, M.: HIBERT: Document Level Pre-training of Hierarchical Bidirectional Transformers for Document Summarization. Association for Computational Linguistics, Florence, Italy. Proceedings of the 57th annual meeting of the association for computational linguistics (2019). https:\/\/doi.org\/10.18653\/v1\/P19-1499","key":"588_CR30","DOI":"10.18653\/v1\/P19-1499"},{"unstructured":"Pande, M., Budhraja, A., Nema, P., Kumar, P., Khapra, M.M.: On the importance of local information in transformer based models. arXiv preprint arXiv:2008.05828 (2020) 10.48550\/arXiv.2008.05828","key":"588_CR31"},{"doi-asserted-by":"publisher","unstructured":"Jacob, B., Kligys, S., Chen, B., Zhu, M., Tang, M., Howard, A., Adam, H., Kalenichenko, D.: Quantization and training of neural networks for efficient integer-arithmetic-only inference. Proceedings of the IEEE conference on computer vision and pattern recognition (2018). https:\/\/doi.org\/10.1109\/cvpr.2018.00286","key":"588_CR32","DOI":"10.1109\/cvpr.2018.00286"},{"unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415 (2016) 10.48550\/arXiv.1606.08415","key":"588_CR33"},{"unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)","key":"588_CR34"},{"unstructured":"Lai, G., Oguz, B., Yang, Y., Stoyanov, V.: Bridging the domain gap in cross-lingual document classification. (2019) arXiv:astro-ph\/0408350v1","key":"588_CR35"},{"doi-asserted-by":"publisher","unstructured":"Conneau, A., Khandelwal, K., Goyal, N., Chaudhary, V., Wenzek, G., Guzm\u00e1n, F., Grave, \u00c9., Ott, M., Zettlemoyer, L., Stoyanov, V.: Unsupervised Cross-lingual Representation Learning at Scale. Proceedings of the 58th annual meeting of the association for computational linguistics (2020). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.747","key":"588_CR36","DOI":"10.18653\/v1\/2020.acl-main.747"},{"doi-asserted-by":"publisher","unstructured":"Sun, C., Qiu, X., Xu, Y., Huang, X.: How to fine-tune bert for text classification? China national conference on Chinese computational linguistics (2019). https:\/\/doi.org\/10.1007\/978-3-030-32381-3_16","key":"588_CR37","DOI":"10.1007\/978-3-030-32381-3_16"},{"doi-asserted-by":"publisher","unstructured":"Lee-Thorp, J., Ainslie, J., Eckstein, I., Ontanon, S.: Fnet: Mixing tokens with fourier transforms. Association for Computational Linguistics, Seattle, United States. Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (2022). https:\/\/doi.org\/10.18653\/v1\/2022.naacl-main.319","key":"588_CR38","DOI":"10.18653\/v1\/2022.naacl-main.319"}],"container-title":["International Journal of Data Science and Analytics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-024-00588-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s41060-024-00588-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41060-024-00588-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,5]],"date-time":"2025-09-05T20:16:56Z","timestamp":1757103416000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s41060-024-00588-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,24]]},"references-count":38,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["588"],"URL":"https:\/\/doi.org\/10.1007\/s41060-024-00588-9","relation":{},"ISSN":["2364-415X","2364-4168"],"issn-type":[{"type":"print","value":"2364-415X"},{"type":"electronic","value":"2364-4168"}],"subject":[],"published":{"date-parts":[[2024,6,24]]},"assertion":[{"value":"23 March 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 June 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 June 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Informed consent was obtained from all individual participants included in the study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical and informed consent for data used"}}]}}