{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T00:15:43Z","timestamp":1775607343712,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icra40945.2020.9197031","type":"proceedings-article","created":{"date-parts":[[2020,9,15]],"date-time":"2020-09-15T21:25:46Z","timestamp":1600205146000},"page":"101-107","source":"Crossref","is-referenced-by-count":81,"title":["FADNet: A Fast and Accurate Network for Disparity Estimation"],"prefix":"10.1109","author":[{"given":"Qiang","family":"Wang","sequence":"first","affiliation":[]},{"given":"Shaohuai","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Shizhen","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Kaiyong","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Xiaowen","family":"Chu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.316"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.179"},{"key":"ref12","article-title":"Occlusions, motion and depth boundaries with a generic network for disparity, optical flow or scene flow estimation","author":"ilg","year":"2018","journal-title":"the European Conference on Computer Vision (ECCV)"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00297"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref15","article-title":"Skip connections eliminate singularities","author":"orhan","year":"2017"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793573"},{"key":"ref17","article-title":"Amnet: Deep atrous multiscale stereo disparity estimation networks","author":"du","year":"2019"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.17"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00340"},{"key":"ref4","doi-asserted-by":"crossref","DOI":"10.1109\/ICCV.2015.316","article-title":"Flownet: Learning optical flow with convolutional networks","author":"dosovitskiy","year":"2015","journal-title":"The IEEE International Conference on Computer Vision (ICCV)"},{"key":"ref3","first-page":"2","article-title":"Stereo matching by training a convolutional neural network to compare image patches","volume":"17","author":"zbontar","year":"2016","journal-title":"Journal of Machine Learning Research"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00567"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.438"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00190"},{"key":"ref7","article-title":"Ganet: Guided aggregation net for end-to-end stereo matching","author":"zhang","year":"2019","journal-title":"The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)"},{"key":"ref2","first-page":"4353","article-title":"Learning to compare image patches via convolutional neural networks","author":"zagoruyko","year":"2015","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"key":"ref9","article-title":"Automl: A survey of the state-of-the-art","author":"he","year":"2019"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2007.1166"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00024"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.179"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2017.108"},{"key":"ref24","article-title":"Cascade residual learning: A two-stage convolutional neural network for stereo matching","author":"pang","year":"2017","journal-title":"The IEEE International Conference on Computer Vision (ICCV) Workshops"},{"key":"ref23","doi-asserted-by":"crossref","first-page":"3207","DOI":"10.1109\/ICRA.2018.8463172","article-title":"Fast disparity estimation using dense networks","author":"atienza","year":"2018","journal-title":"2018 IEEE International Conference on Robotics and Automation (ICRA)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.5194\/isprsannals-II-3-W5-427-2015"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"}],"event":{"name":"2020 IEEE International Conference on Robotics and Automation (ICRA)","location":"Paris, France","start":{"date-parts":[[2020,5,31]]},"end":{"date-parts":[[2020,8,31]]}},"container-title":["2020 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9187508\/9196508\/09197031.pdf?arnumber=9197031","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:27:22Z","timestamp":1656376042000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9197031\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icra40945.2020.9197031","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}