{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,23]],"date-time":"2026-04-23T07:58:14Z","timestamp":1776931094816,"version":"3.51.2"},"publisher-location":"New York, NY, USA","reference-count":42,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,3,23]]},"DOI":"10.1145\/3742413.3789104","type":"proceedings-article","created":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T11:32:24Z","timestamp":1772537544000},"page":"1340-1361","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Agents in Concert: A Case-Study of Bringing AI to the Stage in Practice"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-3182-1498","authenticated-orcid":false,"given":"Stephen","family":"Brade","sequence":"first","affiliation":[{"name":"Electical Engineering and Computer Science, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-9254-4106","authenticated-orcid":false,"given":"Teng","family":"Ma","sequence":"additional","affiliation":[{"name":"Georgia Institute of Technology, Atlanta, Georgia, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1580-3116","authenticated-orcid":false,"given":"Lancelot","family":"Blanchard","sequence":"additional","affiliation":[{"name":"MIT Media Lab, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0139-7436","authenticated-orcid":false,"given":"Kimaya","family":"Lecamwasam","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-0965-5881","authenticated-orcid":false,"given":"Carlos Mariano","family":"Salcedo","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology, Boston, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2591-4776","authenticated-orcid":false,"given":"Suwan","family":"Kim","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-4246-9755","authenticated-orcid":false,"given":"Perry","family":"Naseck","sequence":"additional","affiliation":[{"name":"MIT Media Lab, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1594-8518","authenticated-orcid":false,"given":"Andrew","family":"Li","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4324-3435","authenticated-orcid":false,"given":"Matthew","family":"R Michalek","sequence":"additional","affiliation":[{"name":"Department of Materials Science and Engineering, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8040-2045","authenticated-orcid":false,"given":"Sebastian","family":"Franjou","sequence":"additional","affiliation":[{"name":"Media Lab, MIT, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8440-2378","authenticated-orcid":false,"given":"Anna","family":"Huang","sequence":"additional","affiliation":[{"name":"Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]}],"member":"320","published-online":{"date-parts":[[2026,3,22]]},"reference":[{"key":"e_1_3_3_2_2_2","doi-asserted-by":"publisher","DOI":"10.1145\/3290605.3300233"},{"key":"e_1_3_3_2_3_2","doi-asserted-by":"publisher","DOI":"10.1145\/1178723.1178742"},{"key":"e_1_3_3_2_4_2","volume-title":"Proceedings of the First MiniCon Conference","author":"Authors Anonymous","year":"2025","unstructured":"Anonymous Authors. 2025. The jam_bot, a Real-Time System for Collaborative Free Improvisation with Music Language Models. In Proceedings of the First MiniCon Conference. URL-REDACTEDISMIR 2025 Hybrid Conference."},{"key":"e_1_3_3_2_5_2","doi-asserted-by":"publisher","DOI":"10.1145\/3600211.3604686"},{"key":"e_1_3_3_2_6_2","unstructured":"Christodoulos Benetatos Joseph VanderStel and Zhiyao Duan. 2020. BachDuet: A Deep Learning System for Human-Machine Counterpoint Improvisation. Proceedings of the International Conference on New Interfaces for Musical Expression (July 2020). https:\/\/par.nsf.gov\/biblio\/10191375-bachduet-deep-learning-system-human-machine-counterpoint-improvisation"},{"key":"e_1_3_3_2_7_2","unstructured":"Joakim Borg G\u00e9rard Assayag and Mikhail Malt. 2022. Somax 2 a Reactive Multi-Agent Environment for Co-Improvisation. https:\/\/hal.science\/hal-04001271 Published: SMC 2022 - Sound Music & Computing."},{"key":"e_1_3_3_2_8_2","doi-asserted-by":"publisher","DOI":"10.1145\/3586183.3606725"},{"key":"e_1_3_3_2_9_2","doi-asserted-by":"publisher","unstructured":"Antoine Caillon and Philippe Esling. 2021. RAVE: A variational autoencoder for fast and high-quality neural audio synthesis. 10.48550\/arXiv.2111.05011arXiv:https:\/\/arXiv.org\/abs\/2111.05011 [cs].","DOI":"10.48550\/arXiv.2111.05011"},{"key":"e_1_3_3_2_10_2","doi-asserted-by":"publisher","unstructured":"Drew Edwards Simon Dixon and Emmanouil Benetos. 2023. PiJAMA: Piano Jazz with Automatic MIDI Annotations | Transactions of the International Society for Music Information Retrieval. (Jan. 2023). 10.5334\/tismir.162","DOI":"10.5334\/tismir.162"},{"key":"e_1_3_3_2_11_2","doi-asserted-by":"crossref","unstructured":"Nicholas Evans Behzad Haki and Sergi Jord\u00e0. 2025. Repurposing a Rhythm Accompaniment System for Pipe Organ Performance. 116\u2013120. https:\/\/www.nime.org\/proc\/nime2025_16\/index.html ISSN: 2220-4806.","DOI":"10.1049\/icp.2025.3670"},{"key":"e_1_3_3_2_12_2","unstructured":"Austin Franklin. 2024. The Robo-Cajon : An Example of Live Performance with Musical Robotics. International Computer Music Association 270\u2013273. https:\/\/urn.kb.se\/resolve?urn=urn:nbn:se:mdh:diva-70059"},{"key":"e_1_3_3_2_13_2","doi-asserted-by":"publisher","DOI":"10.1145\/302979.303030"},{"key":"e_1_3_3_2_14_2","unstructured":"Cheng-Zhi\u00a0Anna Huang Hendrik\u00a0Vincent Koops Ed Newton-Rex Monica Dinculescu and Carrie\u00a0J Cai. 2020. AI song contest: Human-AI co-creation in songwriting. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2010.05388 (2020)."},{"key":"e_1_3_3_2_15_2","doi-asserted-by":"publisher","DOI":"10.1145\/2908805.2913018"},{"key":"e_1_3_3_2_16_2","doi-asserted-by":"publisher","DOI":"10.1145\/2702123.2702362"},{"key":"e_1_3_3_2_17_2","volume-title":"New Interfaces for Musical Expression (NIME)","author":"Jourdan Th\u00e9o","year":"2023","unstructured":"Th\u00e9o Jourdan and Baptiste Caramiaux. 2023. Machine learning for musical expression: A systematic literature review. In New Interfaces for Musical Expression (NIME)."},{"key":"e_1_3_3_2_18_2","doi-asserted-by":"publisher","DOI":"10.1145\/3706598.3713818"},{"key":"e_1_3_3_2_19_2","doi-asserted-by":"crossref","unstructured":"Maaike Kleinsmann and Rianne Valkenburg. 2008. Barriers and enablers for creating shared understanding in co-design projects. Design studies 29 4 (2008) 369\u2013386.","DOI":"10.1016\/j.destud.2008.03.003"},{"key":"e_1_3_3_2_20_2","doi-asserted-by":"publisher","DOI":"10.1145\/3706598.3713894"},{"key":"e_1_3_3_2_21_2","doi-asserted-by":"publisher","DOI":"10.1145\/3544549.3577061"},{"key":"e_1_3_3_2_22_2","doi-asserted-by":"publisher","unstructured":"George\u00a0E. Lewis. 2000. Too Many Notes: Computers Complexity and Culture in Voyager. Leonardo Music Journal 10 (Dec. 2000) 33\u201339. 10.1162\/096112100570585","DOI":"10.1162\/096112100570585"},{"key":"e_1_3_3_2_23_2","unstructured":"Soroush Mehri Kundan Kumar Ishaan Gulrajani Rithesh Kumar Shubham Jain Jose Sotelo Aaron Courville and Yoshua Bengio. 2017. SampleRNN: An Unconditional End-to-End Neural Audio Generation Model. https:\/\/openreview.net\/forum?id=SkxKPDv5xl"},{"key":"e_1_3_3_2_24_2","doi-asserted-by":"crossref","unstructured":"Fabio Morreale. 2021. Where does the buck stop? Ethical and political issues with AI in music creation. Transactions of the International Society for Music Information Retrieval 4 1 (2021).","DOI":"10.5334\/tismir.86"},{"key":"e_1_3_3_2_25_2","doi-asserted-by":"publisher","DOI":"10.1145\/3658852.3659072"},{"key":"e_1_3_3_2_26_2","first-page":"80","volume-title":"ISMIR","author":"Newman Michele","year":"2023","unstructured":"Michele Newman, Lidia Morris, and Jin\u00a0Ha Lee. 2023. Human-AI Music Creation: Understanding the Perceptions and Experiences of Music Creators for Ethical and Productive Collaboration.. In ISMIR. 80\u201388."},{"key":"e_1_3_3_2_27_2","doi-asserted-by":"publisher","unstructured":"J\u00e9r\u00f4me Nika Marc Chemillier and G\u00e9rard Assayag. 2017. ImproteK: Introducing Scenarios into Human-Computer Music Improvisation. Comput. Entertain. 14 2 (Jan. 2017) 4:1\u20134:27. 10.1145\/3022635","DOI":"10.1145\/3022635"},{"key":"e_1_3_3_2_28_2","doi-asserted-by":"publisher","unstructured":"Fran\u00e7ois Pachet. 2003. The Continuator: Musical Interaction With Style. Journal of New Music Research 32 3 (Sept. 2003) 333\u2013341. 10.1076\/jnmr.32.3.333.16861Publisher: Routledge _eprint: https:\/\/www.tandfonline.com\/doi\/pdf\/10.1076\/jnmr.32.3.333.16861.","DOI":"10.1076\/jnmr.32.3.333.16861"},{"key":"e_1_3_3_2_29_2","doi-asserted-by":"publisher","DOI":"10.1145\/2470654.2481303"},{"key":"e_1_3_3_2_30_2","doi-asserted-by":"publisher","unstructured":"Teresa Pelinski Andrew McPherson and Rebecca Fiebrink. 2024. Ways of knowing ways of writing: technical practice research in new musical instrument design. Journal of New Music Research 53 1-2 (March 2024) 79\u201392. 10.1080\/09298215.2024.2442348Publisher: Routledge _eprint: https:\/\/doi.org\/10.1080\/09298215.2024.2442348.","DOI":"10.1080\/09298215.2024.2442348"},{"key":"e_1_3_3_2_31_2","doi-asserted-by":"crossref","unstructured":"Elizabeth B-N Sanders and Pieter\u00a0Jan Stappers. 2008. Co-creation and the new landscapes of design. Co-design 4 1 (2008) 5\u201318.","DOI":"10.1080\/15710880701875068"},{"key":"e_1_3_3_2_32_2","doi-asserted-by":"publisher","unstructured":"Victor Shepardson Jack Armitage and Thor Magnusson. 2022. Notochord: a Flexible Probabilistic Model for Real-Time MIDI Performance. (Sept. 2022). 10.5281\/zenodo.7088404arXiv:https:\/\/arXiv.org\/abs\/2403.12000 [cs].","DOI":"10.5281\/zenodo.7088404"},{"key":"e_1_3_3_2_33_2","doi-asserted-by":"publisher","unstructured":"Christopher Small. 1999. Musicking \u2014 the meanings of performing and listening. A lecture. Music Education Research 1 1 (March 1999) 9\u201322. 10.1080\/1461380990010102Publisher: Routledge _eprint: https:\/\/doi.org\/10.1080\/1461380990010102.","DOI":"10.1080\/1461380990010102"},{"key":"e_1_3_3_2_34_2","doi-asserted-by":"crossref","unstructured":"Marc Steen. 2013. Co-design as a process of joint inquiry and imagination. Design issues 29 2 (2013) 16\u201328.","DOI":"10.1162\/DESI_a_00207"},{"key":"e_1_3_3_2_35_2","first-page":"115","volume-title":"Arts","author":"Sturm Bob\u00a0LT","year":"2019","unstructured":"Bob\u00a0LT Sturm, Maria Iglesias, Oded Ben-Tal, Marius Miron, and Emilia G\u00f3mez. 2019. Artificial intelligence and music: open questions of copyright law and engineering praxis. In Arts , Vol.\u00a08. MDPI, 115."},{"key":"e_1_3_3_2_36_2","doi-asserted-by":"publisher","DOI":"10.1145\/3640543.3645206"},{"key":"e_1_3_3_2_37_2","unstructured":"John Thickstun David Leo\u00a0Wright Hall Chris Donahue and Percy Liang. 2023. Anticipatory Music Transformer. Transactions on Machine Learning Research (Nov. 2023). https:\/\/openreview.net\/forum?id=EBNJ33Fcrl"},{"key":"e_1_3_3_2_38_2","doi-asserted-by":"publisher","DOI":"10.1145\/3643834.3661522"},{"key":"e_1_3_3_2_39_2","volume-title":"Proceedings of the First MiniCon Conference","author":"University)* Hugo F. Flores Garcia\u00a0(Northwestern","year":"2023","unstructured":"Hugo F. Flores Garcia\u00a0(Northwestern University)*, Prem Seetharaman\u00a0(Northwestern University), Rithesh Kumar\u00a0(Descript), and Bryan Pardo\u00a0(Northwestern University). 2023. VampNet: Music Generation via Masked Acoustic Token Modeling. In Proceedings of the First MiniCon Conference. https:\/\/ismir2023program.ismir.net\/poster_125.html Conference Name: Ismir 2023 Hybrid Conference."},{"key":"e_1_3_3_2_40_2","doi-asserted-by":"publisher","unstructured":"Barry\u00a0L. Vercoe. 2005. Synthetic listeners and synthetic performers. The Journal of the Acoustical Society of America 88 S1 (Aug. 2005) S70. 10.1121\/1.2029123","DOI":"10.1121\/1.2029123"},{"key":"e_1_3_3_2_41_2","doi-asserted-by":"publisher","DOI":"10.1145\/3334480.3381069"},{"key":"e_1_3_3_2_42_2","doi-asserted-by":"publisher","unstructured":"Rui Zhang Nathan\u00a0J. McNeese Guo Freeman and Geoff Musick. 2021. \"An Ideal Human\": Expectations of AI Teammates in Human-AI Teaming. Proc. ACM Hum.-Comput. Interact. 4 CSCW3 Article 246 (Jan. 2021) 25\u00a0pages. 10.1145\/3432945","DOI":"10.1145\/3432945"},{"key":"e_1_3_3_2_43_2","unstructured":"Fr\u00e4nk Zimmer. 2024. Holly+ Holly Herndon\u2019s digital AI voice twin | Sounding Future. https:\/\/www.soundingfuture.com\/en\/article\/holly-holly-herndons-digital-ai-voice-twin"}],"event":{"name":"IUI '26: 31st International Conference on Intelligent User Interfaces","location":"Paphos Cyprus","acronym":"IUI '26","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction","SIGAI ACM Special Interest Group on Artificial Intelligence"]},"container-title":["Proceedings of the 31st International Conference on Intelligent User Interfaces"],"original-title":[],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T13:03:37Z","timestamp":1773493417000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3742413.3789104"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3,22]]},"references-count":42,"alternative-id":["10.1145\/3742413.3789104","10.1145\/3742413"],"URL":"https:\/\/doi.org\/10.1145\/3742413.3789104","relation":{},"subject":[],"published":{"date-parts":[[2026,3,22]]},"assertion":[{"value":"2026-03-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}