{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T18:33:58Z","timestamp":1776278038453,"version":"3.50.1"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"publisher","award":["NRF-2020M3F6A1109603"],"award-info":[{"award-number":["NRF-2020M3F6A1109603"]}],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Ministry of Science and ICT, Korea","award":["RS-2022-00156345"],"award-info":[{"award-number":["RS-2022-00156345"]}]},{"name":"Institute for Information and Communications Technology Planning and Evaluation"},{"name":"Institute for Information and Communications Technology Planning and Evaluation","award":["2021-0-02067"],"award-info":[{"award-number":["2021-0-02067"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1109\/lra.2022.3196781","type":"journal-article","created":{"date-parts":[[2022,8,5]],"date-time":"2022-08-05T19:27:05Z","timestamp":1659727625000},"page":"10969-10976","source":"Crossref","is-referenced-by-count":41,"title":["TransDSSL: Transformer Based Depth Estimation via Self-Supervised Learning"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2139-7719","authenticated-orcid":false,"given":"Daechan","family":"Han","sequence":"first","affiliation":[{"name":"School of Intelligent Mechatronic Engineering, Sejong University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7591-665X","authenticated-orcid":false,"given":"Jeongmin","family":"Shin","sequence":"additional","affiliation":[{"name":"School of Intelligent Mechatronic Engineering, Sejong University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3388-678X","authenticated-orcid":false,"given":"Namil","family":"Kim","sequence":"additional","affiliation":[{"name":"Naver Labs, Gyeonggi-do, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1499-3253","authenticated-orcid":false,"given":"Soonmin","family":"Hwang","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9970-0132","authenticated-orcid":false,"given":"Yukyung","family":"Choi","sequence":"additional","affiliation":[{"name":"School of Intelligent Mechatronic Engineering, Sejong University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00481"},{"key":"ref2","first-page":"2366","article-title":"Depth map prediction from a single image using a multi-scale deep network","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Eigen","year":"2014"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00393"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3074306"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.699"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00256"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2699184"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref9","first-page":"3828","article-title":"Digging into self-supervised monocular depth estimation","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Ranftl","year":"2021"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/iccv48922.2021.00986"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913491297"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2017.700"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58529-7_34"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561441"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00742"},{"key":"ref16","article-title":"Semantically-guided representation learning for self-supervised monocular depth","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Guizilini","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3101049"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01596"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-97672-9_30"},{"key":"ref20","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00090"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00707"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/3DV53792.2021.00056"},{"key":"ref24","first-page":"30392","article-title":"Early convolutions help transformers see better","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Xiao","year":"2021"},{"key":"ref25","first-page":"9355","article-title":"Twins: Revisiting the design of spatial attention in vision transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chu","year":"2021"},{"key":"ref26","article-title":"Self-supervised monocular depth estimation with internal feature fusion","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Zhou","year":"2021"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01173"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00695"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"ref31","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref32","first-page":"12077","article-title":"SegFormer: Simple and efficient design for semantic segmentation with transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Xie","year":"2021"},{"key":"ref33","first-page":"7281","article-title":"HRFormer: High-resolution vision transformer for dense predict","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yuan","year":"2021"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i3.16329"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00864"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.463"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.5220\/0010884000003124"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00062"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9831196\/09851497.pdf?arnumber=9851497","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T11:44:38Z","timestamp":1706787878000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9851497\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10]]},"references-count":38,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2022.3196781","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10]]}}}