{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T05:10:23Z","timestamp":1755925823585,"version":"3.44.0"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,8,4]],"date-time":"2025-08-04T00:00:00Z","timestamp":1754265600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,4]],"date-time":"2025-08-04T00:00:00Z","timestamp":1754265600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,8,4]]},"DOI":"10.1109\/coins65080.2025.11125776","type":"proceedings-article","created":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T23:57:49Z","timestamp":1755907069000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Bio-Inspired Drone Control: A Reinforcement Learning-Trained Spiking Neural Networks for Agile Navigation in Dynamic Environment"],"prefix":"10.1109","author":[{"given":"Yin-Ching","family":"Lee","sequence":"first","affiliation":[{"name":"Boston University,Department of Computer Science,Boston,USA"}]},{"given":"Sebastiano","family":"Mengozzi","sequence":"additional","affiliation":[{"name":"DEI University of Bologna,Bologna,Italy"}]},{"given":"Luca","family":"Zanatta","sequence":"additional","affiliation":[{"name":"ITK Norwegian University of Science and Technology,Trondheim,Norway"}]},{"given":"Andrea","family":"Bartolini","sequence":"additional","affiliation":[{"name":"DEI University of Bologna,Bologna,Italy"}]},{"given":"Andrea","family":"Acquaviva","sequence":"additional","affiliation":[{"name":"DEI University of Bologna,Bologna,Italy"}]},{"given":"Francesco","family":"Barchi","sequence":"additional","affiliation":[{"name":"DEI University of Bologna,Bologna,Italy"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abg5810"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1142\/S0129065709002002"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2018.12.002"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.adi0591"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/SiPS52927.2021.00053"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2022.3141602"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abh1221"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487274"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811564"},{"key":"ref10","article-title":"Datt: Deep adaptive trajectory tracking for quadrotor control","author":"Huang","year":"2023"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2017.2720851"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref14","first-page":"91","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","volume-title":"Conference on Robot Learning","author":"Rudin"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.12794\/metadc1505267"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06419-4"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/COINS61597.2024.10622558"},{"key":"ref18","article-title":"Measuring scheduling efficiency of rnns for nlp applications","author":"Thakker","year":"2019"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/EMC249363.2019.00013"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-024-77779-8"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2018.00331"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-020-17236-y"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3389\/fncom.2015.00099"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2020.00088"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460482"},{"key":"ref26","first-page":"1","article-title":"NengoDL: Combining deep learning and neuromorphic modelling methods","author":"Rasmussen","year":"2018"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1088\/1748-3190\/ac290c"},{"key":"ref28","article-title":"Slayer: Spike layer error reassignment in time","volume":"31","author":"Shrestha","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/MetroInd4.0IoT51437.2021.9488476"},{"key":"ref30","article-title":"Deep reinforcement learning with spiking q-learning","author":"Chen","year":"2022"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2017.7966127"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2017.7966242"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.08.009"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i11.17180"},{"key":"ref35","article-title":"Openai gym","author":"Brockman","year":"2016"},{"key":"ref36","first-page":"2016","article-title":"Deep reinforcement learning with population-coded spiking neural network for continuous control","volume-title":"Conference on Robot Learning","author":"Tang"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.126885"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2024.3400838"},{"key":"ref39","article-title":"Evolving spiking neural networks to mimic pid control for autonomous blimps","author":"Burgers","year":"2023"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2025.3553418"},{"issue":"254","key":"ref41","first-page":"1","article-title":"skrl: Modular and flexible library for reinforcement learning","volume-title":"Journal of Machine Learning Research","volume":"24","author":"Serrano-Mu\u00f1oz","year":"2023"},{"key":"ref42","article-title":"Isaac gym: High performance gpu-based physics simulation for robot learning","author":"Makoviychuk","year":"2021"},{"key":"ref43","article-title":"rl-games: A high-performance framework for reinforcement learning","author":"Makoviichuk","year":"2021"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2018.112130359"}],"event":{"name":"2025 IEEE International Conference on Omni-layer Intelligent Systems (COINS)","location":"Madison, WI, USA","start":{"date-parts":[[2025,8,4]]},"end":{"date-parts":[[2025,8,6]]}},"container-title":["2025 IEEE International Conference on Omni-layer Intelligent Systems (COINS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11125678\/11125719\/11125776.pdf?arnumber=11125776","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T04:34:16Z","timestamp":1755923656000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11125776\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,4]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/coins65080.2025.11125776","relation":{},"subject":[],"published":{"date-parts":[[2025,8,4]]}}}