{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T19:19:59Z","timestamp":1770491999962,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":115,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,6,20]],"date-time":"2022-06-20T00:00:00Z","timestamp":1655683200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,6,21]]},"DOI":"10.1145\/3531146.3533206","type":"proceedings-article","created":{"date-parts":[[2022,6,20]],"date-time":"2022-06-20T14:27:10Z","timestamp":1655735230000},"page":"1516-1527","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Should attention be all we need? The epistemic and ethical implications of unification in machine learning"],"prefix":"10.1145","author":[{"given":"Nic","family":"Fishman","sequence":"first","affiliation":[{"name":"University of Oxford, United Kingdom"}]},{"given":"Leif","family":"Hancox-Li","sequence":"additional","affiliation":[{"name":"Capital One, USA"}]}],"member":"320","published-online":{"date-parts":[[2022,6,20]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Mart\u00edn Abadi Ashish Agarwal Paul Barham Eugene Brevdo Zhifeng Chen Craig Citro Greg\u00a0S. Corrado Andy Davis Jeffrey Dean Matthieu Devin Sanjay Ghemawat Ian Goodfellow Andrew Harp Geoffrey Irving Michael Isard Yangqing Jia Rafal Jozefowicz Lukasz Kaiser Manjunath Kudlur Josh Levenberg Dandelion Man\u00e9 Rajat Monga Sherry Moore Derek Murray Chris Olah Mike Schuster Jonathon Shlens Benoit Steiner Ilya Sutskever Kunal Talwar Paul Tucker Vincent Vanhoucke Vijay Vasudevan Fernanda Vi\u00e9gas Oriol Vinyals Pete Warden Martin Wattenberg Martin Wicke Yuan Yu and Xiaoqiang Zheng. 2015. TensorFlow: Large-Scale Machine Learning on Heterogeneous Systems. https:\/\/www.tensorflow.org\/ Software available from tensorflow.org."},{"key":"e_1_3_2_1_2_1","volume-title":"A learning algorithm for Boltzmann machines. Cognitive science 9, 1","author":"Ackley H","year":"1985","unstructured":"David\u00a0H Ackley, Geoffrey\u00a0E Hinton, and Terrence\u00a0J Sejnowski. 1985. A learning algorithm for Boltzmann machines. Cognitive science 9, 1 (1985), 147\u2013169."},{"key":"e_1_3_2_1_3_1","volume-title":"What is the Point of Equality?Ethics 109, 2","author":"Anderson S","year":"1999","unstructured":"Elizabeth\u00a0S Anderson. 1999. What is the Point of Equality?Ethics 109, 2 (1999), 287\u2013337."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1147\/JRD.2019.2942288"},{"key":"e_1_3_2_1_5_1","volume-title":"Finite-time analysis of the multiarmed bandit problem. Machine learning 47, 2","author":"Auer Peter","year":"2002","unstructured":"Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. 2002. Finite-time analysis of the multiarmed bandit problem. Machine learning 47, 2 (2002), 235\u2013256."},{"key":"e_1_3_2_1_6_1","unstructured":"Alexis\u00a0T Baria and Keith Cross. 2021. The brain is a computer is a brain: neuroscience\u2019s internal debate and the social significance of the Computational Metaphor. arXiv preprint arXiv:2107.14042(2021)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445922"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1162\/artl_a_00336"},{"key":"e_1_3_2_1_9_1","volume-title":"Idealism and the Sociology of Knowledge. Social studies of science 26, 4","author":"Bloor David","year":"1996","unstructured":"David Bloor. 1996. Idealism and the Sociology of Knowledge. Social studies of science 26, 4 (1996), 839\u2013856."},{"key":"e_1_3_2_1_10_1","unstructured":"John Bohannon and Sam Charrington. 2022. Trends in NLP with John Bohannon. https:\/\/twimlai.com\/trends-in-nlp-with-john-bohannon\/"},{"key":"e_1_3_2_1_11_1","unstructured":"Rishi Bommasani Drew\u00a0A Hudson Ehsan Adeli Russ Altman Simran Arora Sydney von Arx Michael\u00a0S Bernstein Jeannette Bohg Antoine Bosselut Emma Brunskill 2021. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258(2021)."},{"key":"e_1_3_2_1_12_1","volume-title":"Superintelligence: Paths, Dangers, Strategies","author":"Bostrom Nick","year":"2017","unstructured":"Nick Bostrom. 2017. Superintelligence: Paths, Dangers, Strategies. Oxford University Press."},{"key":"e_1_3_2_1_13_1","volume-title":"Artificial intelligence safety and security, Roman\u00a0V","author":"Bostrom Nick","unstructured":"Nick Bostrom and Eliezer Yudkowsky. 2018. The ethics of artificial intelligence. In Artificial intelligence safety and security, Roman\u00a0V. Yampolskiy (Ed.). Chapman and Hall\/CRC, Boca Raton, Florida, 57\u201369."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","unstructured":"Jeffrey Bowers Gaurav Malhotra Marin Dujmovi\u0107 Milton\u00a0Llera Montero Christian Tsvetkov Valerio Biscione Guillermo Puebla Federico Adolfi John Hummel Rachel\u00a0Flood Heaton Benjamin Evans Jeff Mitchell and Ryan Blything. 2022. Deep Problems with Neural Network Models of Human Vision. https:\/\/doi.org\/10.31234\/osf.io\/5zf4s","DOI":"10.31234\/osf.io"},{"key":"e_1_3_2_1_15_1","unstructured":"James Bradbury Roy Frostig Peter Hawkins Matthew\u00a0James Johnson Chris Leary Dougal Maclaurin George Necula Adam Paszke Jake VanderPlas Skye Wanderman-Milne and Qiao Zhang. 2018. JAX: composable transformations of Python+NumPy programs. http:\/\/github.com\/google\/jax Accessed May 2 2022."},{"key":"e_1_3_2_1_16_1","volume-title":"Attention Approximates Sparse Distributed Memory. Advances in Neural Information Processing Systems 34","author":"Bricken Trenton","year":"2021","unstructured":"Trenton Bricken and Cengiz Pehlevan. 2021. Attention Approximates Sparse Distributed Memory. Advances in Neural Information Processing Systems 34 (2021)."},{"key":"e_1_3_2_1_17_1","unstructured":"Alex Campolo Madelyn Sanfilippo Meredith Whittaker and Kate Crawford. 2017. AI Now 2017 Report. https:\/\/ainowinstitute.org\/AI_Now_2017_Report.pdf. Accessed: 2022-01-21."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11229-021-03368-1"},{"key":"e_1_3_2_1_19_1","volume-title":"The dappled world: A study of the boundaries of science","author":"Nancy Cartwright","unstructured":"Nancy Cartwright 1999. The dappled world: A study of the boundaries of science. Cambridge University Press."},{"key":"e_1_3_2_1_20_1","volume-title":"The Stanford Encyclopedia of Philosophy (Spring 2022 ed.), Edward\u00a0N","author":"Cat Jordi","unstructured":"Jordi Cat. 2022. The Unity of Science. In The Stanford Encyclopedia of Philosophy (Spring 2022 ed.), Edward\u00a0N. Zalta (Ed.). Metaphysics Research Lab, Stanford University."},{"key":"e_1_3_2_1_21_1","unstructured":"Sam Charrington and Georgia Gkioxari. 2022. Trends in Computer Vision with Georgia Gkioxari. https:\/\/twimlai.com\/trends-in-computer-vision-with-georgia-gkioxari\/"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3465055"},{"key":"e_1_3_2_1_23_1","unstructured":"Kathleen Creel and Deborah Hellman. forthcoming. The Algorithmic Leviathan: Arbitrariness Fairness and Opportunity in Algorithmic Decision Making Systems. Canadian Journal of Philosophy(forthcoming)."},{"key":"e_1_3_2_1_24_1","unstructured":"DeepLearning.AI. 2021. The Batch. https:\/\/read.deeplearning.ai\/the-batch\/issue-123\/ \u201cOriginally developed for natural language processing transformers are becoming the Swiss Army Knife of deep learning.\u201d."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0216125"},{"key":"e_1_3_2_1_26_1","volume-title":"Human Help Wanted: Why AI Is Terrible at Content Moderation. https:\/\/www.pcmag.com\/opinions\/human-help-wanted-why-ai-is-terrible-at-content-moderation Accessed","author":"Dickson Ben","year":"2022","unstructured":"Ben Dickson. 2019. Human Help Wanted: Why AI Is Terrible at Content Moderation. https:\/\/www.pcmag.com\/opinions\/human-help-wanted-why-ai-is-terrible-at-content-moderation Accessed Jan 18, 2022."},{"key":"e_1_3_2_1_27_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929(2020)."},{"key":"e_1_3_2_1_28_1","volume-title":"Is chess the drosophila of artificial intelligence? A social history of an algorithm. Social studies of science 42, 1","author":"Ensmenger Nathan","year":"2012","unstructured":"Nathan Ensmenger. 2012. Is chess the drosophila of artificial intelligence? A social history of an algorithm. Social studies of science 42, 1 (2012), 5\u201330."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Tom Everitt Gary Lea and Marcus Hutter. 2018. AGI safety literature review. arXiv preprint arXiv:1805.01109(2018).","DOI":"10.24963\/ijcai.2018\/768"},{"key":"e_1_3_2_1_30_1","volume-title":"n.d.. huggingface (Hugging Face). https:\/\/huggingface.co\/huggingface Accessed","author":"Face Hugging","year":"2022","unstructured":"Hugging Face. n.d.. huggingface (Hugging Face). https:\/\/huggingface.co\/huggingface Accessed Jan 17, 2022."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1177\/2053951719860542"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.2307\/2024924"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF00344251"},{"key":"e_1_3_2_1_34_1","volume-title":"Image and logic: A material culture of microphysics","author":"Peter Galison","unstructured":"Peter Galison 1997. Image and logic: A material culture of microphysics. University of Chicago Press."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458723"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1016\/S2589-7500(21)00208-9"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013681"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/DSAA.2018.00018"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"crossref","unstructured":"Lisa Gitelman (Ed.). 2013. Raw data is an oxymoron. MIT Press Cambridge Massachusetts.","DOI":"10.7551\/mitpress\/9302.001.0001"},{"key":"e_1_3_2_1_40_1","volume-title":"Deep Learning","author":"Goodfellow Ian","unstructured":"Ian Goodfellow, Yoshua Bengio, and Aaron Courville. 2016. Deep Learning. MIT Press, Cambridge, Massachusetts. http:\/\/www.deeplearningbook.org."},{"key":"e_1_3_2_1_41_1","unstructured":"Anirudh Goyal and Yoshua Bengio. 2020. Inductive biases for deep learning of higher-level cognition. arXiv preprint arXiv:2011.15091(2020)."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.23919\/SpringSim.2019.8732892"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1086\/701070"},{"key":"e_1_3_2_1_44_1","volume-title":"Situated knowledges: The science question in feminism and the privilege of partial perspective. Feminist studies 14, 3","author":"Haraway Donna","year":"1988","unstructured":"Donna Haraway. 1988. Situated knowledges: The science question in feminism and the privilege of partial perspective. Feminist studies 14, 3 (1988), 575\u2013599."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF01064504"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11229-016-1294-7"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1049\/ip-sen:20020208"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.0403723101"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/3467017"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186044"},{"key":"e_1_3_2_1_51_1","unstructured":"Ilenna\u00a0Simone Jones and Konrad\u00a0Paul Kording. 2020. Can single neurons solve MNIST? The computational power of biological dendritic trees. arXiv preprint arXiv:2009.01269(2020)."},{"key":"e_1_3_2_1_52_1","unstructured":"@karpathy (Andrej\u00a0Karpathy). 2021. The ongoing consolidation in AI is incredible....https:\/\/twitter.com\/karpathy\/status\/1468370605229547522?s=20&t=go3X-8IlBf_X-ekQ07oh7g Accessed: 2022-05-01."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1186\/s12916-019-1426-2"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1086\/289019"},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2018340118"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1086\/708249"},{"key":"e_1_3_2_1_57_1","volume-title":"Building machines that learn and think like people. Behavioral and Brain Sciences 40","author":"Lake M","year":"2017","unstructured":"Brenden\u00a0M Lake, Tomer\u00a0D Ullman, Joshua\u00a0B Tenenbaum, and Samuel\u00a0J Gershman. 2017. Building machines that learn and think like people. Behavioral and Brain Sciences 40 (2017)."},{"key":"e_1_3_2_1_58_1","unstructured":"Sage Lazzaro. 2021. Are AI ethics teams doomed to be a facade? Women who pioneered them weigh in. https:\/\/venturebeat.com\/2021\/09\/30\/are-ai-ethics-teams-doomed-to-be-a-facade-the-women-who-pioneered-them-weigh-in\/ Accessed Jan 18 2022."},{"key":"e_1_3_2_1_59_1","unstructured":"Stefan Lee Senthil Purushwalkam Michael Cogswell David Crandall and Dhruv Batra. 2015. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314(2015)."},{"key":"e_1_3_2_1_60_1","first-page":"205","article-title":"Path dependence, lock-in, and history","volume":"11","author":"Liebowitz J","year":"1995","unstructured":"Stan\u00a0J Liebowitz and Stephen\u00a0E Margolis. 1995. Path dependence, lock-in, and history. Journal of Law, Economics, & Organization 11 (1995), 205\u2013226. Issue 1.","journal-title":"Journal of Law, Economics, & Organization"},{"key":"e_1_3_2_1_61_1","volume-title":"A ConvNet for the","author":"Liu Zhuang","year":"2020","unstructured":"Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. 2022. A ConvNet for the 2020s. arXiv preprint arXiv:2201.03545(2022)."},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007113830879"},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1177\/004839310103100402"},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1910416117"},{"key":"e_1_3_2_1_65_1","volume-title":"Parallel distributed processing. Vol.\u00a02","author":"McClelland L","unstructured":"James\u00a0L McClelland, David\u00a0E Rumelhart, PDP\u00a0Research Group, 1986. Parallel distributed processing. Vol.\u00a02. MIT Press, Cambridge, Massachusetts."},{"key":"e_1_3_2_1_66_1","volume-title":"A logical calculus of the ideas immanent in nervous activity. Bulletin of mathematical biology 52, 1","author":"McCulloch S","year":"1990","unstructured":"Warren\u00a0S McCulloch and Walter Pitts. 1990. A logical calculus of the ideas immanent in nervous activity. Bulletin of mathematical biology 52, 1 (1990), 99\u2013115."},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"publisher","DOI":"10.1080\/15265161.2020.1819470"},{"key":"e_1_3_2_1_68_1","volume-title":"\u201cI Told You So","author":"Melanson Mike","year":"2022","unstructured":"Mike Melanson. 2021. Log4j Is One Big \u201cI Told You So\u201d for Open Source Communities. https:\/\/thenewstack.io\/log4j-is-one-big-i-told-you-so-for-open-source-communities\/ Accessed Jan 17, 2022."},{"key":"e_1_3_2_1_69_1","volume-title":"\u201cTransformers [...] took the machine learning world by storm","author":"John.\u00a0P. Mellow\u00a0Jr.2022. State of AI Report: Transformers Are Taking the AI World by Storm. https:\/\/exchange.scale.com\/public\/blogs\/state-of-ai-report-2021-transformers-taking-ai-world-by-storm-nathan-benaich","year":"2021","unstructured":"John.\u00a0P. Mellow\u00a0Jr.2022. State of AI Report: Transformers Are Taking the AI World by Storm. https:\/\/exchange.scale.com\/public\/blogs\/state-of-ai-report-2021-transformers-taking-ai-world-by-storm-nathan-benaich \u201cTransformers [...] took the machine learning world by storm in 2021. Originally designed to work with natural language processing models, the technology has blasted out of NLP over the last 12 months to emerge as a general-purpose architecture for ML.\u201d Accessed May 9, 2022."},{"key":"e_1_3_2_1_70_1","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015385"},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/11301.001.0001"},{"key":"e_1_3_2_1_72_1","doi-asserted-by":"crossref","unstructured":"Melanie Mitchell. 2021. Why AI is harder than we think. arXiv preprint arXiv:2104.12871(2021).","DOI":"10.1145\/3449639.3465421"},{"key":"e_1_3_2_1_73_1","volume-title":"Fundamental issues of artificial intelligence","author":"M\u00fcller C","unstructured":"Vincent\u00a0C M\u00fcller and Nick Bostrom. 2016. Future progress in artificial intelligence: A survey of expert opinion. In Fundamental issues of artificial intelligence. Springer, 555\u2013572."},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58595-2_41"},{"key":"e_1_3_2_1_75_1","doi-asserted-by":"publisher","DOI":"10.1086\/375475"},{"key":"e_1_3_2_1_76_1","volume-title":"The view from nowhere","author":"Nagel Thomas","unstructured":"Thomas Nagel. 1989. The view from nowhere. Oxford University Press."},{"key":"e_1_3_2_1_77_1","unstructured":"Behnam Neyshabur Ryota Tomioka and Nathan Srebro. 2014. In search of the real inductive bias: On the role of implicit regularization in deep learning. arXiv preprint arXiv:1412.6614(2014)."},{"key":"e_1_3_2_1_78_1","volume-title":"Algorithms of oppression","author":"Noble Safiya\u00a0Umoja","unstructured":"Safiya\u00a0Umoja Noble. 2018. Algorithms of oppression. New York University Press."},{"key":"e_1_3_2_1_79_1","doi-asserted-by":"publisher","DOI":"10.1177\/030631296026003005"},{"key":"e_1_3_2_1_80_1","volume-title":"Six principles for biologically based computational models of cortical cognition. Trends in cognitive sciences 2, 11","author":"O\u2019Reilly C","year":"1998","unstructured":"Randall\u00a0C O\u2019Reilly. 1998. Six principles for biologically based computational models of cortical cognition. Trends in cognitive sciences 2, 11 (1998), 455\u2013462."},{"key":"e_1_3_2_1_81_1","volume-title":"Accessed","author":"Ornes Stephen","year":"2022","unstructured":"Stephen Ornes. 2022. Will Transformers Take Over Artificial Intelligence?https:\/\/www.quantamagazine.org\/will-transformers-take-over-artificial-intelligence-20220310\/ \u201cWang, for example, thinks the transformer may be a big step toward achieving a kind of convergence of neural net architectures, resulting in a universal approach to computer vision \u2014 and perhaps to other AI tasks as well.\u201d Accessed May 9, 2022."},{"key":"e_1_3_2_1_82_1","volume-title":"Image Classification on ImageNet. https:\/\/paperswithcode.com\/sota\/image-classification-on-imagenet Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Image Classification on ImageNet. https:\/\/paperswithcode.com\/sota\/image-classification-on-imagenet Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_83_1","volume-title":"Machine Translation on WMT2014 English-German. https:\/\/paperswithcode.com\/sota\/machine-translation-on-wmt2014-english-german Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Machine Translation on WMT2014 English-German. https:\/\/paperswithcode.com\/sota\/machine-translation-on-wmt2014-english-german Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_84_1","volume-title":"Object Detection on COCO test-dev. https:\/\/paperswithcode.com\/sota\/object-detection-on-coco Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Object Detection on COCO test-dev. https:\/\/paperswithcode.com\/sota\/object-detection-on-coco Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_85_1","volume-title":"Question Answering on SQuAD1.1. https:\/\/paperswithcode.com\/sota\/question-answering-on-squad11 Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Question Answering on SQuAD1.1. https:\/\/paperswithcode.com\/sota\/question-answering-on-squad11 Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_86_1","volume-title":"Semantic Segmentation on ADE20K. https:\/\/paperswithcode.com\/sota\/semantic-segmentation-on-ade20k Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Semantic Segmentation on ADE20K. https:\/\/paperswithcode.com\/sota\/semantic-segmentation-on-ade20k Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_87_1","volume-title":"Sentiment Analysis on SST-2 Binary classification. https:\/\/paperswithcode.com\/sota\/sentiment-analysis-on-sst-2-binary Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Sentiment Analysis on SST-2 Binary classification. https:\/\/paperswithcode.com\/sota\/sentiment-analysis-on-sst-2-binary Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_88_1","volume-title":"Speech Recognition on LibriSpeech test-clean. https:\/\/paperswithcode.com\/sota\/speech-recognition-on-librispeech-test-clean Accessed","author":"Code Papers With","year":"2022","unstructured":"Papers With Code. 2022. Speech Recognition on LibriSpeech test-clean. https:\/\/paperswithcode.com\/sota\/speech-recognition-on-librispeech-test-clean Accessed Apr 22, 2022."},{"key":"e_1_3_2_1_89_1","doi-asserted-by":"crossref","unstructured":"Seymour Papert. 1988. One AI or many?Daedalus 117(1988) 1\u201314. Issue 1.","DOI":"10.1080\/07435808809036344"},{"key":"e_1_3_2_1_90_1","volume-title":"PyTorch: An Imperative Style","author":"Paszke Adam","unstructured":"Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In Advances in Neural Information Processing Systems 32, H.\u00a0Wallach, H.\u00a0Larochelle, A.\u00a0Beygelzimer, F.\u00a0d'Alch\u00e9-Buc, E.\u00a0Fox, and R.\u00a0Garnett (Eds.). Curran Associates, Inc., 8024\u20138035. http:\/\/papers.neurips.cc\/paper\/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf"},{"key":"e_1_3_2_1_91_1","volume-title":"Explanatory unification and the early synthesis. The British journal for the philosophy of science 56, 3","author":"Plutynski Anya","year":"2005","unstructured":"Anya Plutynski. 2005. Explanatory unification and the early synthesis. The British journal for the philosophy of science 56, 3 (2005), 595\u2013609."},{"key":"e_1_3_2_1_92_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00700"},{"key":"e_1_3_2_1_93_1","volume-title":"Conference on Neural Information Processing Systems (NeurIPS 2021)","author":"Raji Inioluwa\u00a0Deborah","year":"2021","unstructured":"Inioluwa\u00a0Deborah Raji, Emily\u00a0M Bender, Amandalynne Paullada, Emily Denton, and Alex Hanna. 2021. AI and the everything in the whole wide world benchmark. Conference on Neural Information Processing Systems (NeurIPS 2021) Track on Datasets and Benchmarks(2021)."},{"key":"e_1_3_2_1_94_1","doi-asserted-by":"publisher","DOI":"10.1101\/2020.12.15.422761"},{"key":"e_1_3_2_1_95_1","volume-title":"The morality of freedom","author":"Raz Joseph","unstructured":"Joseph Raz. 1986. The morality of freedom. Clarendon Press, Oxford."},{"key":"e_1_3_2_1_96_1","volume-title":"\u201cUnity is a common goal in many disciplines. [...] In natural language processing (NLP), the current dominant modeling network is Transformer","author":"Research Microsoft","year":"2022","unstructured":"Microsoft Research. 2021. Five reasons to embrace Transformer in computer vision. https:\/\/www.microsoft.com\/en-us\/research\/lab\/microsoft-research-asia\/articles\/five-reasons-to-embrace-transformer-in-computer-vision\/ \u201cUnity is a common goal in many disciplines. [...] In natural language processing (NLP), the current dominant modeling network is Transformer; in computer vision (CV), for a long time, the dominant architecture was convolutional neural networks (CNN); in social computing field, the dominant architecture is graph networks; and so on. However, the situation has changed since the end of last year, when Transformers began to demonstrate revolutionary performance improvements for a variety of computer vision tasks.\u201d Accessed May 9, 2022."},{"key":"e_1_3_2_1_97_1","volume-title":"A deep learning framework for neuroscience. Nature neuroscience 22, 11","author":"Richards A","year":"2019","unstructured":"Blake\u00a0A Richards, Timothy\u00a0P Lillicrap, Philippe Beaudoin, Yoshua Bengio, Rafal Bogacz, Amelia Christensen, Claudia Clopath, Rui\u00a0Ponte Costa, Archy de Berker, Surya Ganguli, 2019. A deep learning framework for neuroscience. Nature neuroscience 22, 11 (2019), 1761\u20131770."},{"key":"e_1_3_2_1_98_1","volume-title":"The Stanford Encyclopedia of Philosophy (Winter 2021 ed.), Edward\u00a0N","author":"Robeyns Ingrid","unstructured":"Ingrid Robeyns and Morten\u00a0Fibieger Byskov. 2021. The Capability Approach. In The Stanford Encyclopedia of Philosophy (Winter 2021 ed.), Edward\u00a0N. Zalta (Ed.). Metaphysics Research Lab, Stanford University."},{"key":"e_1_3_2_1_100_1","volume-title":"Towards spike-based machine intelligence with neuromorphic computing. Nature 575, 7784","author":"Roy Kaushik","year":"2019","unstructured":"Kaushik Roy, Akhilesh Jaiswal, and Priyadarshini Panda. 2019. Towards spike-based machine intelligence with neuromorphic computing. Nature 575, 7784 (2019), 607\u2013617."},{"key":"e_1_3_2_1_101_1","doi-asserted-by":"publisher","DOI":"10.1038\/323533a0"},{"key":"e_1_3_2_1_102_1","volume-title":"chess and shogi by planning with a learned model. Nature 588, 7839","author":"Schrittwieser Julian","year":"2020","unstructured":"Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan, Laurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis, Thore Graepel, 2020. Mastering Atari, Go, chess and shogi by planning with a learned model. Nature 588, 7839 (2020), 604\u2013609."},{"key":"e_1_3_2_1_103_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1907373117"},{"key":"e_1_3_2_1_104_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287598"},{"key":"e_1_3_2_1_105_1","doi-asserted-by":"publisher","DOI":"10.15252\/embr.201949177"},{"key":"e_1_3_2_1_106_1","doi-asserted-by":"publisher","DOI":"10.1086\/392725"},{"key":"e_1_3_2_1_107_1","volume-title":"The Vienna Circle and logical empiricism","author":"Sober Elliott","unstructured":"Elliott Sober. 2003. Two uses of unification. In The Vienna Circle and logical empiricism, Friedrich Stadler (Ed.). Kluwer Academic Publishers, New York, 205\u2013216."},{"key":"e_1_3_2_1_108_1","volume-title":"Distributed artificial intelligence","author":"Star Susan\u00a0Leigh","unstructured":"Susan\u00a0Leigh Star. 1989. The structure of ill-structured solutions: Boundary objects and heterogeneous distributed problem solving. In Distributed artificial intelligence. Elsevier, 37\u201354."},{"key":"e_1_3_2_1_109_1","doi-asserted-by":"publisher","DOI":"10.1086\/681768"},{"key":"e_1_3_2_1_110_1","unstructured":"Lav\u00a0R Varshney Nitish\u00a0Shirish Keskar and Richard Socher. 2019. Pretrained AI models: performativity mobility and change. arXiv preprint arXiv:1909.03290(2019)."},{"key":"e_1_3_2_1_111_1","unstructured":"Shikhar Vashishth Shyam Upadhyay Gaurav\u00a0Singh Tomar and Manaal Faruqui. 2019. Attention interpretability across NLP tasks. arXiv preprint arXiv:1909.11218(2019)."},{"key":"e_1_3_2_1_112_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan\u00a0N Gomez \u0141ukasz Kaiser and Illia Polosukhin. 2017. Attention is all you need. Advances in Neural Information Processing Systems 30."},{"key":"e_1_3_2_1_113_1","doi-asserted-by":"publisher","DOI":"10.1145\/3488666"},{"key":"e_1_3_2_1_114_1","doi-asserted-by":"publisher","DOI":"10.1109\/4235.585893"},{"key":"e_1_3_2_1_115_1","volume-title":"Training Feedback Spiking Neural Networks by Implicit Differentiation on the Equilibrium State. Advances in Neural Information Processing Systems 34","author":"Xiao Mingqing","year":"2021","unstructured":"Mingqing Xiao, Qingyan Meng, Zongpeng Zhang, Yisen Wang, and Zhouchen Lin. 2021. Training Feedback Spiking Neural Networks by Implicit Differentiation on the Equilibrium State. Advances in Neural Information Processing Systems 34 (2021)."},{"key":"e_1_3_2_1_116_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10670-009-9194-6"}],"event":{"name":"FAccT '22: 2022 ACM Conference on Fairness, Accountability, and Transparency","location":"Seoul Republic of Korea","acronym":"FAccT '22","sponsor":["ACM Association for Computing Machinery"]},"container-title":["2022 ACM Conference on Fairness Accountability and Transparency"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3531146.3533206","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3531146.3533206","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:31:30Z","timestamp":1750188690000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3531146.3533206"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,20]]},"references-count":115,"alternative-id":["10.1145\/3531146.3533206","10.1145\/3531146"],"URL":"https:\/\/doi.org\/10.1145\/3531146.3533206","relation":{},"subject":[],"published":{"date-parts":[[2022,6,20]]},"assertion":[{"value":"2022-06-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}