{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T02:23:45Z","timestamp":1774578225027,"version":"3.50.1"},"reference-count":12,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,8]],"date-time":"2024-12-08T00:00:00Z","timestamp":1733616000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,8]],"date-time":"2024-12-08T00:00:00Z","timestamp":1733616000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,8]]},"DOI":"10.1109\/globecom52923.2024.10901287","type":"proceedings-article","created":{"date-parts":[[2025,3,11]],"date-time":"2025-03-11T17:30:35Z","timestamp":1741714235000},"page":"1269-1274","source":"Crossref","is-referenced-by-count":5,"title":["Self-Supervised Radio Pre-training: Toward Foundational Models for Spectrogram Learning"],"prefix":"10.1109","author":[{"given":"Ahmed","family":"Aboulfotouh","sequence":"first","affiliation":[{"name":"University of Calgary,Department of Electrical and Software Engineering,Canada"}]},{"given":"Ashkan","family":"Eshaghbeigi","sequence":"additional","affiliation":[{"name":"Qoherent Inc,Toronto,Canada"}]},{"given":"Dimitrios","family":"Karslidis","sequence":"additional","affiliation":[{"name":"Qoherent Inc,Toronto,Canada"}]},{"given":"Hatem","family":"Abou-Zeid","sequence":"additional","affiliation":[{"name":"University of Calgary,Department of Electrical and Software Engineering,Canada"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3090866"},{"key":"ref2","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"CoRR"},{"key":"ref3","article-title":"Roberta: A robustly optimized bert pretraining approach","author":"Liu","year":"2019"},{"key":"ref4","article-title":"Self-supervised pretraining of visual features in the wild","author":"Goyal","year":"2021"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2017.2758370"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2018.2868698"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2020.3027027"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2022.3164060"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.001.2300364"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095691"},{"key":"ref11","article-title":"Spectrum sensing with deep learning to identify 5g and lte signals"},{"key":"ref12","article-title":"Convolutional lstm network: A machine learning approach for precipitation nowcasting","volume-title":"Advances in Neural Information Processing Systems","volume":"28","author":"Shi","year":"2015"}],"event":{"name":"GLOBECOM 2024 - 2024 IEEE Global Communications Conference","location":"Cape Town, South Africa","start":{"date-parts":[[2024,12,8]]},"end":{"date-parts":[[2024,12,12]]}},"container-title":["GLOBECOM 2024 - 2024 IEEE Global Communications Conference"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10900933\/10900934\/10901287.pdf?arnumber=10901287","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T05:34:47Z","timestamp":1741757687000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10901287\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,8]]},"references-count":12,"URL":"https:\/\/doi.org\/10.1109\/globecom52923.2024.10901287","relation":{},"subject":[],"published":{"date-parts":[[2024,12,8]]}}}