{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,21]],"date-time":"2025-09-21T18:03:05Z","timestamp":1758477785830,"version":"3.40.3"},"publisher-location":"Cham","reference-count":17,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031439957"},{"type":"electronic","value":"9783031439964"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-43996-4_14","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:07:48Z","timestamp":1696115268000},"page":"144-153","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["TCL: Triplet Consistent Learning for Odometry Estimation of Monocular Endoscope"],"prefix":"10.1007","author":[{"given":"Hao","family":"Yue","sequence":"first","affiliation":[]},{"given":"Yun","family":"Gu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"14_CR1","unstructured":"Allan, M., et al.: Stereo correspondence and reconstruction of endoscopic data challenge. arXiv preprint arXiv:2101.01133 (2021)"},{"issue":"9","key":"14_CR2","doi-asserted-by":"publisher","first-page":"2548","DOI":"10.1007\/s11263-021-01484-6","volume":"129","author":"JW Bian","year":"2021","unstructured":"Bian, J.W., et al.: Unsupervised scale-consistent depth learning from video. Int. J. Comput. Vision 129(9), 2548\u20132564 (2021)","journal-title":"Int. J. Comput. Vision"},{"key":"14_CR3","doi-asserted-by":"publisher","first-page":"102302","DOI":"10.1016\/j.media.2021.102302","volume":"76","author":"PE Edwards","year":"2022","unstructured":"Edwards, P.E., Psychogyios, D., Speidel, S., Maier-Hein, L., Stoyanov, D.: SERV-CT: a disparity dataset from cone-beam CT for validation of endoscopic 3D reconstruction. Med. Image Anal. 76, 102302 (2022)","journal-title":"Med. Image Anal."},{"key":"14_CR4","doi-asserted-by":"publisher","first-page":"1231","DOI":"10.1177\/0278364913491297","volume":"32","author":"A Geiger","year":"2013","unstructured":"Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: Vision meets robotics: the kitti dataset. Int. J. Robot. Res. (IJRR) 32, 1231\u20131237 (2013)","journal-title":"Int. J. Robot. Res. (IJRR)"},{"key":"14_CR5","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Firman, M., Brostow, G.J.: Digging into self-supervised monocular depth estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3828\u20133838 (2019)","DOI":"10.1109\/ICCV.2019.00393"},{"key":"14_CR6","doi-asserted-by":"crossref","unstructured":"Kar, O.F., Yeo, T., Atanov, A., Zamir, A.: 3D common corruptions and data augmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18963\u201318974 (2022)","DOI":"10.1109\/CVPR52688.2022.01839"},{"key":"14_CR7","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"issue":"5","key":"14_CR8","doi-asserted-by":"publisher","first-page":"1438","DOI":"10.1109\/TMI.2019.2950936","volume":"39","author":"X Liu","year":"2019","unstructured":"Liu, X., et al.: Dense depth estimation in monocular endoscopy with self-supervised learning methods. IEEE Trans. Med. Imaging 39(5), 1438\u20131447 (2019)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"14_CR9","doi-asserted-by":"publisher","first-page":"102100","DOI":"10.1016\/j.media.2021.102100","volume":"72","author":"R Ma","year":"2021","unstructured":"Ma, R., et al.: RNNSLAM: reconstructing the 3D colon to visualize missing regions during a colonoscopy. Med. Image Anal. 72, 102100 (2021)","journal-title":"Med. Image Anal."},{"key":"14_CR10","doi-asserted-by":"publisher","first-page":"102058","DOI":"10.1016\/j.media.2021.102058","volume":"71","author":"KB Ozyoruk","year":"2021","unstructured":"Ozyoruk, K.B., et al.: EndoSLAM dataset and an unsupervised monocular visual odometry and depth estimation approach for endoscopic videos. Med. Image Anal. 71, 102058 (2021)","journal-title":"Med. Image Anal."},{"key":"14_CR11","unstructured":"Paszke, A., et al.: Automatic differentiation in pytorch (2017)"},{"key":"14_CR12","doi-asserted-by":"publisher","first-page":"102338","DOI":"10.1016\/j.media.2021.102338","volume":"77","author":"S Shao","year":"2022","unstructured":"Shao, S., et al.: Self-supervised monocular depth and ego-motion estimation in endoscopy: appearance flow to the rescue. Med. Image Anal. 77, 102338 (2022)","journal-title":"Med. Image Anal."},{"issue":"4","key":"14_CR13","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"14_CR14","doi-asserted-by":"crossref","unstructured":"Watson, J., Aodha, O.M., Prisacariu, V., Brostow, G., Firman, M.: The temporal opportunist: self-supervised multi-frame monocular depth. In: Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00122"},{"key":"14_CR15","doi-asserted-by":"crossref","unstructured":"Zhao, W., Liu, S., Shu, Y., Liu, Y.J.: Towards better generalization: joint depth-pose learning without posenet. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9151\u20139161 (2020)","DOI":"10.1109\/CVPR42600.2020.00917"},{"key":"14_CR16","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Kong, S., Fowlkes, C.: Camera pose matters: improving depth prediction by mitigating pose distribution bias. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15759\u201315768 (2021)","DOI":"10.1109\/CVPR46437.2021.01550"},{"key":"14_CR17","doi-asserted-by":"crossref","unstructured":"Zhou, T., Brown, M., Snavely, N., Lowe, D.G.: Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1851\u20131858 (2017)","DOI":"10.1109\/CVPR.2017.700"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-43996-4_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,4]],"date-time":"2024-07-04T16:04:41Z","timestamp":1720109081000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-43996-4_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031439957","9783031439964"],"references-count":17,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-43996-4_14","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2023\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2250","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"730","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}