{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T20:35:39Z","timestamp":1772051739843,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3354972","type":"journal-article","created":{"date-parts":[[2024,1,16]],"date-time":"2024-01-16T18:24:24Z","timestamp":1705429464000},"page":"10690-10698","source":"Crossref","is-referenced-by-count":8,"title":["LF-Transformer: Latent Factorizer Transformer for Tabular Learning"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2529-4880","authenticated-orcid":false,"given":"Kwangtek","family":"Na","sequence":"first","affiliation":[{"name":"Department of Electrical and Computer Engineering, Inha University, Incheon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3141-6917","authenticated-orcid":false,"given":"Ju-Hong","family":"Lee","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Inha University, Incheon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3743-3550","authenticated-orcid":false,"given":"Eunchan","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Intelligence and Information, Seoul National University, Seoul, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref2","article-title":"Pushing the limits of semi-supervised learning for automatic speech recognition","author":"Zhang","year":"2020","journal-title":"arXiv:2010.10504"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.32604\/iasc.2023.032783"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.3837\/tiis.2023.02.016"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16826"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939785"},{"key":"ref8","article-title":"LightGBM: A highly efficient gradient boosting decision tree","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Ke"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2021.11.011"},{"key":"ref10","article-title":"CatBoost: Unbiased boosting with categorical features","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Prokhorenkova"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.2307\/2699986"},{"key":"ref12","first-page":"18932","article-title":"Revisiting deep learning models for tabular data","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Gorishniy"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/MC.2009.263"},{"issue":"2","key":"ref14","doi-asserted-by":"crossref","first-page":"139","DOI":"10.1023\/A:1007607513941","article-title":"An experimental comparison of three methods for constructing ensembles of decision trees: Bagging, boosting, and randomization","volume":"40","author":"Dietterich","year":"2000","journal-title":"Mach. Learn."},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2016.04.001"},{"key":"ref16","article-title":"Neural oblivious decision ensembles for deep learning on tabular data","author":"Popov","year":"2019","journal-title":"arXiv:1909.06312"},{"key":"ref17","article-title":"Net-DNF: Effective deep modeling of tabular data","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Katzir"},{"key":"ref18","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Vaswani"},{"key":"ref19","article-title":"SAINT: Improved neural networks for tabular data via row attention and contrastive pre-training","author":"Somepalli","year":"2021","journal-title":"arXiv:2106.01342"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1176"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-7152(96)00140-X"},{"key":"ref22","first-page":"202","article-title":"Scaling up the accuracy of Naive-Bayes classifiers: A decision-tree hybrid","volume-title":"Proc. 2nd Int. Conf. Knowl. Discovery Data Mining","volume":"96","author":"Kohavi"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-05318-5_10"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1038\/ncomms5308"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1023\/B:VISI.0000042993.50813.60"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/2020408.2020421"},{"key":"ref27","article-title":"The million song dataset","volume-title":"Proc. 12th Int. Soc. Music Inf. Retr. Conf. (ISMIR)","author":"Bertin-Mahieux"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/S0168-1699(99)00046-0"},{"key":"ref29","first-page":"1","article-title":"Yahoo! learning to rank challenge overview","volume-title":"Proc. Learn. Rank Challenge","author":"Chapelle"},{"key":"ref30","article-title":"Introducing LETOR 4.0 datasets","author":"Qin","year":"2013","journal-title":"arXiv:1306.2597"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.385"},{"key":"ref32","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/2641190.2641198"},{"key":"ref34","article-title":"Axial attention in multidimensional transformers","author":"Ho","year":"2019","journal-title":"arXiv:1912.12180"},{"key":"ref35","first-page":"8844","article-title":"MSA transformer","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rao"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.270"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10380310\/10401112.pdf?arnumber=10401112","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T01:51:45Z","timestamp":1706752305000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10401112\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3354972","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}