{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,23]],"date-time":"2025-10-23T11:24:17Z","timestamp":1761218657393,"version":"3.28.0"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,7]],"date-time":"2023-12-07T00:00:00Z","timestamp":1701907200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,7]],"date-time":"2023-12-07T00:00:00Z","timestamp":1701907200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,7]]},"DOI":"10.1109\/aics60730.2023.10470628","type":"proceedings-article","created":{"date-parts":[[2024,3,20]],"date-time":"2024-03-20T18:15:31Z","timestamp":1710958531000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["Query-Focused Submodular Demonstration Selection for In-Context Learning in Large Language Models"],"prefix":"10.1109","author":[{"given":"Paul","family":"Trust","sequence":"first","affiliation":[{"name":"School of Computer Science and Information Technology, University College Cork,Cork,Ireland"}]},{"given":"Rosane","family":"Minghim","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Technology, University College Cork,Cork,Ireland"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Bert: Pretraining of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"issue":"1","key":"ref3","first-page":"3","article-title":"Supervised machine learning: A review of classification techniques","volume":"160","author":"Kotsiantis","year":"2007","journal-title":"Emerging artificial intelligence applications in computer engineering"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1023\/A:1012487302797"},{"key":"ref5","article-title":"A neural probabilistic language model","volume":"13","author":"Bengio","year":"2000","journal-title":"Advances in neural information processing systems"},{"issue":"ARTICLE","key":"ref6","first-page":"2493","article-title":"Natural language processing (almost) from scratch","volume":"12","author":"Collobert","year":"2011","journal-title":"Journal of machine learning research"},{"key":"ref7","first-page":"4171","article-title":"BERT: Pretraining of deep bidirectional transformers for language understanding","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)","author":"Devlin","year":"2019"},{"key":"ref8","article-title":"Bloom: A 176b-parameter open-access multilingual language model","author":"Scao","year":"2022","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref10","first-page":"2300","article-title":"Do prompt-based models really understand the meaning of their prompts?","volume-title":"Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","author":"Webson"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.3003836"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1561\/9781601987570"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-018-1248-6"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2019.00054"},{"key":"ref15","first-page":"18 685","article-title":"Similar: Sub-modular information measures based active learning in realistic scenar-ios","volume":"34","author":"Kothawade","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"volume-title":"Training compute-optimal large language models","year":"2022","author":"Hoffmann","key":"ref16"},{"key":"ref17","article-title":"Opt: Open pretrained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv preprint"},{"key":"ref18","doi-asserted-by":"crossref","first-page":"4195","DOI":"10.18653\/v1\/2021.findings-emnlp.354","article-title":"Want to reduce labeling cost? GPT-3 can help","volume-title":"Findings of the Association for Computational Linguistics: EMNLP 2021","author":"Wang","year":"2021"},{"key":"ref19","first-page":"20 841","article-title":"Black-box tuning for language-model-as-a-service","volume-title":"International Conference on Machine Learning","author":"Sun","year":"2022"},{"key":"ref20","first-page":"2791","article-title":"MetaICL: Learning to learn in context","volume-title":"Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","author":"Min"},{"key":"ref21","article-title":"On the relation between sensitivity and accuracy in in-context learning","author":"Chen","year":"2022","journal-title":"arXiv preprint"},{"key":"ref22","first-page":"100","article-title":"What makes good in-context examples for GPT-3?","volume-title":"Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures","author":"Liu"},{"key":"ref23","first-page":"12 697","article-title":"Calibrate before use: Improving few-shot performance of language models","volume-title":"International Conference on Machine Learning","author":"Zhao","year":"2021"},{"key":"ref24","article-title":"Visualizing and understanding recurrent networks","author":"Karpathy","year":"2015","journal-title":"arXiv preprint"},{"key":"ref25","article-title":"Visualizing and understanding neural models in nlp","author":"Li","year":"2015","journal-title":"arXiv preprint"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/VAST.2017.8585721"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/MCG.2018.2878902"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2017.2744158"},{"key":"ref29","article-title":"Efficient estimation of word representations in vector space","author":"Mikolov","year":"2013","journal-title":"arXiv preprint"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref31","first-page":"187","article-title":"exBERT: A Visual Analysis Tool to Explore Learned Representations in Transformer Models","volume-title":"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations","author":"Hoover"},{"key":"ref32","article-title":"Lmd-iff: A visual diff tool to compare language models","author":"Strobelt","year":"2021","journal-title":"arXiv preprint"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-3019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2022.3209479"},{"key":"ref35","first-page":"3982","article-title":"Sentence-BERT: Sentence embeddings using Siamese BERT-networks","volume-title":"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)","author":"Reimers"},{"key":"ref36","first-page":"1631","article-title":"Recursive deep models for semantic compositionality over a sentiment treebank","volume-title":"Proceedings of the 2013 conference on empirical methods in natural language processing","author":"Socher","year":"2013"},{"key":"ref37","article-title":"Character-level convolutional networks for text classification","volume":"28","author":"Zhang","year":"2015","journal-title":"Advances in neural information processing systems"},{"issue":"8","key":"ref38","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref39","article-title":"The pile: An 800gb dataset of diverse text for language modeling","author":"Gao","year":"2020","journal-title":"arXiv preprint"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.11"},{"key":"ref41","article-title":"The bigscience roots corpus: A 1.6 tb composite multilingual dataset","author":"Lauren\u00e7on","year":"2023","journal-title":"arXiv preprint"}],"event":{"name":"2023 31st Irish Conference on Artificial Intelligence and Cognitive Science (AICS)","start":{"date-parts":[[2023,12,7]]},"location":"Letterkenny, Ireland","end":{"date-parts":[[2023,12,8]]}},"container-title":["2023 31st Irish Conference on Artificial Intelligence and Cognitive Science (AICS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10470453\/10470478\/10470628.pdf?arnumber=10470628","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,26]],"date-time":"2024-03-26T19:37:14Z","timestamp":1711481834000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10470628\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,7]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/aics60730.2023.10470628","relation":{},"subject":[],"published":{"date-parts":[[2023,12,7]]}}}