{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T18:41:52Z","timestamp":1771612912835,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":78,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,6,20]],"date-time":"2025-06-20T00:00:00Z","timestamp":1750377600000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF (National Science Foundation)","doi-asserted-by":"publisher","award":["2402804, 2402805"],"award-info":[{"award-number":["2402804, 2402805"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,6,21]]},"DOI":"10.1145\/3695053.3731082","type":"proceedings-article","created":{"date-parts":[[2025,6,20]],"date-time":"2025-06-20T16:43:11Z","timestamp":1750437791000},"page":"1524-1538","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["TrioSim: A Lightweight Simulator for Large-Scale DNN Workloads on Multi-GPU Systems"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-2737-0583","authenticated-orcid":false,"given":"Ying","family":"Li","sequence":"first","affiliation":[{"name":"William &amp; Mary, Williamsburg, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6308-3841","authenticated-orcid":false,"given":"Yuhui","family":"Bao","sequence":"additional","affiliation":[{"name":"Northeastern University, Boston, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-4800-7781","authenticated-orcid":false,"given":"Gongyu","family":"Wang","sequence":"additional","affiliation":[{"name":"Lightmatter, Boston, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1046-5269","authenticated-orcid":false,"given":"Xinxin","family":"Mei","sequence":"additional","affiliation":[{"name":"Jefferson Lab, Newport News, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7249-7574","authenticated-orcid":false,"given":"Pranav","family":"Vaid","sequence":"additional","affiliation":[{"name":"Stanford University, Palo Alto, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-7294-7634","authenticated-orcid":false,"given":"Anandaroop","family":"Ghosh","sequence":"additional","affiliation":[{"name":"Lightmatter, Boston, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5525-7204","authenticated-orcid":false,"given":"Adwait","family":"Jog","sequence":"additional","affiliation":[{"name":"University of Virginia, Charlottesville, VA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8218-5656","authenticated-orcid":false,"given":"Darius","family":"Bunandar","sequence":"additional","affiliation":[{"name":"Lightmatter, Boston, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3256-9942","authenticated-orcid":false,"given":"Ajay","family":"Joshi","sequence":"additional","affiliation":[{"name":"Lightmatter\/Boston University, Boston, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3532-6521","authenticated-orcid":false,"given":"Yifan","family":"Sun","sequence":"additional","affiliation":[{"name":"William &amp; Mary, Williamsburg, VA, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,6,20]]},"reference":[{"key":"e_1_3_3_2_2_2","doi-asserted-by":"publisher","DOI":"10.1145\/3466752.3480100"},{"key":"e_1_3_3_2_3_2","doi-asserted-by":"publisher","DOI":"10.1145\/3146347.3146356"},{"key":"e_1_3_3_2_4_2","unstructured":"Jehyeon Bang Yujeong Choi Myeongwoo Kim Yongdeok Kim and Minsoo Rhu. 2023. vtrain: A simulation framework for evaluating cost-effective and compute-optimal large language model training. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2312.12391 (2023)."},{"key":"e_1_3_3_2_5_2","doi-asserted-by":"publisher","DOI":"10.1145\/3559009.3569666"},{"key":"e_1_3_3_2_6_2","volume-title":"Tensor Parallel","year":"2024","unstructured":"BlackSamorez. 2024. Tensor Parallel. Retrieved July 20, 2024 from https:\/\/github.com\/BlackSamorez\/tensor_parallel"},{"key":"e_1_3_3_2_7_2","doi-asserted-by":"crossref","unstructured":"Eric Chung Jeremy Fowers Kalin Ovtcharov Michael Papamichael Adrian Caulfield Todd Massengill Ming Liu Daniel Lo Shlomi Alkalay Michael Haselman et\u00a0al. 2018. Serving dnns in real time at datacenter scale with project brainwave. iEEE Micro 38 2 (2018) 8\u201320.","DOI":"10.1109\/MM.2018.022071131"},{"key":"e_1_3_3_2_8_2","unstructured":"Hyung\u00a0Won Chung Le Hou Shayne Longpre Barret Zoph Yi Tay William Fedus Yunxuan Li Xuezhi Wang Mostafa Dehghani Siddhartha Brahma et\u00a0al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research 25 70 (2024) 1\u201353."},{"key":"e_1_3_3_2_9_2","doi-asserted-by":"publisher","DOI":"10.1109\/CGO57630.2024.10444862"},{"key":"e_1_3_3_2_10_2","unstructured":"Jacob Devlin Ming-Wei Chang Kenton Lee and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1810.04805 (2018)."},{"key":"e_1_3_3_2_11_2","doi-asserted-by":"crossref","unstructured":"Aditya Dhakal Sameer\u00a0G Kulkarni and KK Ramakrishnan. 2024. D-STACK: High Throughput DNN Inference by Effective Multiplexing and Spatio-Temporal Scheduling of GPUs. IEEE Transactions on Cloud Computing (2024).","DOI":"10.1109\/TCC.2024.3476210"},{"key":"e_1_3_3_2_12_2","unstructured":"Weiguang Ding Ruoyan Wang Fei Mao and Graham Taylor. 2014. Theano-based large-scale visual recognition with multiple gpus. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1412.2302 (2014)."},{"key":"e_1_3_3_2_13_2","doi-asserted-by":"publisher","DOI":"10.1109\/EURCON.2007.4400581"},{"key":"e_1_3_3_2_14_2","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan et\u00a0al. 2024. The llama 3 herd of models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2407.21783 (2024)."},{"key":"e_1_3_3_2_15_2","doi-asserted-by":"publisher","DOI":"10.5555\/2606265.2606953"},{"key":"e_1_3_3_2_16_2","doi-asserted-by":"publisher","DOI":"10.1109\/CICC.2011.6055363"},{"key":"e_1_3_3_2_17_2","unstructured":"Thomas\u00a0J Giuli and Mary Baker. 2002. Narses: A scalable flow-based network simulator. arXiv preprint cs\/0211024 (2002)."},{"key":"e_1_3_3_2_18_2","doi-asserted-by":"publisher","DOI":"10.5555\/2015039.2015535"},{"key":"e_1_3_3_2_19_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA47549.2020.00047"},{"key":"e_1_3_3_2_20_2","doi-asserted-by":"publisher","DOI":"10.1145\/3578244.3583736"},{"key":"e_1_3_3_2_21_2","unstructured":"Yueming Hao Xu Zhao Bin Bao David Berard Will Constable Adnan Aziz and Xu Liu. 2023. TorchBench: Benchmarking PyTorch with High API Surface Coverage. arxiv:https:\/\/arXiv.org\/abs\/2304.14226\u00a0[cs.LG]"},{"key":"e_1_3_3_2_22_2","doi-asserted-by":"publisher","DOI":"10.1109\/HCS55958.2022.9895610"},{"key":"e_1_3_3_2_23_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA.2018.00059"},{"key":"e_1_3_3_2_24_2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_3_2_25_2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"e_1_3_3_2_26_2","unstructured":"Yanping Huang Youlong Cheng Ankur Bapna Orhan Firat Dehao Chen Mia Chen HyoukJoong Lee Jiquan Ngiam Quoc\u00a0V Le Yonghui Wu et\u00a0al. 2019. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_3_2_27_2","unstructured":"Nan Jiang George Michelogiannakis Daniel Becker Brian Towles and William\u00a0J Dally. 2010. Booksim 2.0 user\u2019s guide. Standford University (2010) q1."},{"key":"e_1_3_3_2_28_2","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"e_1_3_3_2_29_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA45697.2020.00047"},{"key":"e_1_3_3_2_30_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS.2017.7975270"},{"key":"e_1_3_3_2_31_2","doi-asserted-by":"crossref","unstructured":"Ashok\u00a0V Krishnamoorthy Ron Ho Xuezhe Zheng Herb Schwetman Jon Lexau Pranay Koka GuoLiang Li Ivan Shubin and John\u00a0E Cunningham. 2009. Computer systems based on silicon photonic interconnects. Proc. IEEE 97 7 (2009) 1337\u20131361.","DOI":"10.1109\/JPROC.2009.2020712"},{"key":"e_1_3_3_2_32_2","unstructured":"Mandy La and Andrew Chien. 2020. Cerebras Systems: Journey to the Wafer-Scale Engine. University of Chicago Tech. Rep (2020)."},{"key":"e_1_3_3_2_33_2","unstructured":"Seonho Lee Amar Phanishayee and Divya Mahajan. 2024. Data-driven Forecasting of Deep Learning Performance on GPUs. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2407.13853 (2024)."},{"key":"e_1_3_3_2_34_2","unstructured":"Shen Li Yanli Zhao Rohan Varma Omkar Salpekar Pieter Noordhuis Teng Li Adam Paszke Jeff Smith Brian Vaughan Pritam Damania et\u00a0al. 2020. Pytorch distributed: Experiences on accelerating data parallel training. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2006.15704 (2020)."},{"key":"e_1_3_3_2_35_2","doi-asserted-by":"publisher","DOI":"10.1145\/3613424.3614277"},{"key":"e_1_3_3_2_36_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS57527.2023.00047"},{"key":"e_1_3_3_2_37_2","doi-asserted-by":"crossref","unstructured":"Yuan Li Ke Wang Hao Zheng Ahmed Louri and Avinash Karanth. 2022. Ascend: A scalable and energy-efficient deep neural network accelerator with photonic interconnects. IEEE Transactions on Circuits and Systems I: Regular Papers 69 7 (2022) 2730\u20132741.","DOI":"10.1109\/TCSI.2022.3169953"},{"key":"e_1_3_3_2_38_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA56546.2023.10071043"},{"key":"e_1_3_3_2_39_2","doi-asserted-by":"publisher","DOI":"10.1109\/HiPC56025.2022.00019"},{"key":"e_1_3_3_2_40_2","doi-asserted-by":"crossref","unstructured":"John\u00a0DC Little and Stephen\u00a0C Graves. 2008. Little\u2019s law. Building intuition: insights from basic operations management models and principles (2008) 81\u2013100.","DOI":"10.1007\/978-0-387-73699-0_5"},{"key":"e_1_3_3_2_41_2","doi-asserted-by":"publisher","DOI":"10.1145\/3613424.3623773"},{"key":"e_1_3_3_2_42_2","doi-asserted-by":"publisher","DOI":"10.1145\/3587135.3592200"},{"key":"e_1_3_3_2_43_2","doi-asserted-by":"publisher","DOI":"10.1145\/3297858.3304009"},{"key":"e_1_3_3_2_44_2","doi-asserted-by":"crossref","unstructured":"Christopher Monroe Robert Raussendorf Alex Ruthven Kenneth\u00a0R Brown Peter Maunz L-M Duan and Jungsang Kim. 2014. Large-scale modular quantum-computer architecture with atomic memory and photonic interconnects. Physical Review A 89 2 (2014) 022317.","DOI":"10.1103\/PhysRevA.89.022317"},{"key":"e_1_3_3_2_45_2","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO61859.2024.00063"},{"key":"e_1_3_3_2_46_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS57527.2023.00030"},{"key":"e_1_3_3_2_47_2","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476209"},{"key":"e_1_3_3_2_48_2","doi-asserted-by":"crossref","unstructured":"Cristobal\u00a0A Navarro Nancy Hitschfeld-Kahler and Luis Mateu. 2014. A survey on parallel computing and its applications in data-parallel problems using GPU architectures. Communications in Computational Physics 15 2 (2014) 285\u2013329.","DOI":"10.4208\/cicp.110113.010813a"},{"key":"e_1_3_3_2_49_2","volume-title":"NCCL Tests","year":"2024","unstructured":"NVIDIA. 2024. NCCL Tests. Retrieved July 20, 2024 from https:\/\/github.com\/NVIDIA\/nccl-tests"},{"key":"e_1_3_3_2_50_2","volume-title":"NVSwitch Technical Overview","author":"Corporation NVIDIA","year":"2018","unstructured":"NVIDIA Corporation. 2018. NVSwitch Technical Overview. Retrieved July 25, 2024 from https:\/\/images.nvidia.com\/content\/pdf\/nvswitch-technical-overview.pdf"},{"key":"e_1_3_3_2_51_2","volume-title":"NVIDIA Collective Communications Library (NCCL)","author":"Corporation NVIDIA","year":"2024","unstructured":"NVIDIA Corporation. 2024. NVIDIA Collective Communications Library (NCCL). Retrieved November 15, 2024 from https:\/\/developer.nvidia.com\/nccl"},{"key":"e_1_3_3_2_52_2","volume-title":"NVIDIA Nsight Compute","author":"Corporation NVIDIA","year":"2024","unstructured":"NVIDIA Corporation. 2024. NVIDIA Nsight Compute. Retrieved November 15, 2024 from https:\/\/developer.nvidia.com\/nsight-compute"},{"key":"e_1_3_3_2_53_2","volume-title":"NVIDIA NVLink","author":"Corporation NVIDIA","year":"2024","unstructured":"NVIDIA Corporation. 2024. NVIDIA NVLink. Retrieved July 18, 2024 from https:\/\/www.nvidia.com\/en-us\/data-center\/nvlink\/"},{"key":"e_1_3_3_2_54_2","doi-asserted-by":"crossref","unstructured":"John\u00a0D Owens Mike Houston David Luebke Simon Green John\u00a0E Stone and James\u00a0C Phillips. 2008. GPU computing. Proc. IEEE 96 5 (2008) 879\u2013899.","DOI":"10.1109\/JPROC.2008.917757"},{"key":"e_1_3_3_2_55_2","doi-asserted-by":"crossref","unstructured":"Saptadeep Pal Eiman Ebrahimi Arslan Zulfiqar Yaosheng Fu Victor Zhang Szymon Migacz David Nellans and Puneet Gupta. 2019. Optimizing multi-GPU parallelization strategies for deep learning training. Ieee Micro 39 5 (2019) 91\u2013101.","DOI":"10.1109\/MM.2019.2935967"},{"key":"e_1_3_3_2_56_2","unstructured":"Adam Paszke Sam Gross Francisco Massa Adam Lerer James Bradbury Gregory Chanan Trevor Killeen Zeming Lin Natalia Gimelshein Luca Antiga et\u00a0al. 2019. Pytorch: An imperative style high-performance deep learning library. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_3_2_57_2","volume-title":"PCI Express Base Specification","year":"2024","unstructured":"PCI-SIG. 2024. PCI Express Base Specification. Retrieved July 18, 2024 from https:\/\/pcisig.com\/specifications"},{"key":"e_1_3_3_2_58_2","volume-title":"Distributed Pipelining","author":"Contributors PyTorch","year":"2024","unstructured":"PyTorch Contributors. 2024. Distributed Pipelining. Retrieved July 27, 2024 from https:\/\/pytorch.org\/docs\/main\/distributed.pipelining.html"},{"key":"e_1_3_3_2_59_2","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et\u00a0al. 2019. Language models are unsupervised multitask learners. OpenAI blog 1 8 (2019) 9."},{"key":"e_1_3_3_2_60_2","unstructured":"Colin Raffel Noam Shazeer Adam Roberts Katherine Lee Sharan Narang Michael Matena Yanqi Zhou Wei Li and Peter\u00a0J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of machine learning research 21 140 (2020) 1\u201367."},{"key":"e_1_3_3_2_61_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS48437.2020.00018"},{"key":"e_1_3_3_2_62_2","first-page":"551","volume-title":"2021 USENIX Annual Technical Conference (USENIX ATC 21)","author":"Ren Jie","year":"2021","unstructured":"Jie Ren, Samyam Rajbhandari, Reza\u00a0Yazdani Aminabadi, Olatunji Ruwase, Shuangyan Yang, Minjia Zhang, Dong Li, and Yuxiong He. 2021. { Zero-offload} : Democratizing { billion-scale} model training. In 2021 USENIX Annual Technical Conference (USENIX ATC 21). 551\u2013564."},{"key":"e_1_3_3_2_63_2","volume-title":"gem5 Users Workshop","author":"Roarty Kyle","year":"2020","unstructured":"Kyle Roarty and Matthew\u00a0D Sinclair. 2020. Modeling modern gpu applications in gem5. In gem5 Users Workshop."},{"key":"e_1_3_3_2_64_2","doi-asserted-by":"publisher","DOI":"10.1145\/2517349.2522715"},{"key":"e_1_3_3_2_65_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA57654.2024.00088"},{"key":"e_1_3_3_2_66_2","unstructured":"Mohammad Shoeybi Mostofa Patwary Raul Puri Patrick LeGresley Jared Casper and Bryan Catanzaro. 2019. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1909.08053 (2019)."},{"key":"e_1_3_3_2_67_2","unstructured":"Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1409.1556 (2014)."},{"key":"e_1_3_3_2_68_2","doi-asserted-by":"publisher","DOI":"10.1145\/3307650.3322230"},{"key":"e_1_3_3_2_69_2","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.14303"},{"key":"e_1_3_3_2_70_2","doi-asserted-by":"publisher","DOI":"10.1145\/3146347.3146358"},{"key":"e_1_3_3_2_71_2","unstructured":"Hugo Touvron Thibaut Lavril Gautier Izacard Xavier Martinet Marie-Anne Lachaux Timoth\u00e9e Lacroix Baptiste Rozi\u00e8re Naman Goyal Eric Hambro Faisal Azhar et\u00a0al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2302.13971 (2023)."},{"key":"e_1_3_3_2_72_2","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et\u00a0al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2307.09288 (2023)."},{"key":"e_1_3_3_2_73_2","doi-asserted-by":"publisher","DOI":"10.1145\/2370816.2370865"},{"key":"e_1_3_3_2_74_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"e_1_3_3_2_75_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS57527.2023.00035"},{"key":"e_1_3_3_2_76_2","doi-asserted-by":"publisher","DOI":"10.1145\/2783258.2783323"},{"key":"e_1_3_3_2_77_2","unstructured":"Yongkang Zhang Haoxuan Yu Chenxia Han Cheng Wang Baotong Lu Yang Li Xiaowen Chu and Huaicheng Li. 2024. Missile: Fine-Grained Hardware-Level GPU Resource Isolation for Multi-Tenant DNN Inference. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2407.13996 (2024)."},{"key":"e_1_3_3_2_78_2","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00093"},{"key":"e_1_3_3_2_79_2","doi-asserted-by":"publisher","DOI":"10.1145\/3503222.3507708"}],"event":{"name":"ISCA '25: Proceedings of the 52nd Annual International Symposium on Computer Architecture","location":"Tokyo Japan","acronym":"SIGARCH '25","sponsor":["SIGARCH ACM Special Interest Group on Computer Architecture"]},"container-title":["Proceedings of the 52nd Annual International Symposium on Computer Architecture"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3695053.3731082","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3695053.3731082","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,21]],"date-time":"2025-06-21T11:08:15Z","timestamp":1750504095000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3695053.3731082"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,20]]},"references-count":78,"alternative-id":["10.1145\/3695053.3731082","10.1145\/3695053"],"URL":"https:\/\/doi.org\/10.1145\/3695053.3731082","relation":{},"subject":[],"published":{"date-parts":[[2025,6,20]]},"assertion":[{"value":"2025-06-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}