{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T13:25:28Z","timestamp":1740144328511,"version":"3.37.3"},"reference-count":26,"publisher":"Springer Science and Business Media LLC","license":[{"start":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T00:00:00Z","timestamp":1666396800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T00:00:00Z","timestamp":1666396800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100003382","name":"Core Research for Evolutional Science and Technology","doi-asserted-by":"publisher","award":["JPMJCR20D5"],"award-info":[{"award-number":["JPMJCR20D5"]}],"id":[{"id":"10.13039\/501100003382","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001691","name":"Japan Society for the Promotion of Science","doi-asserted-by":"publisher","award":["17H00867"],"award-info":[{"award-number":["17H00867"]}],"id":[{"id":"10.13039\/501100001691","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000646","name":"Japan Society for the Promotion of Science London","doi-asserted-by":"publisher","award":["Bilateral Joint Research Project"],"award-info":[{"award-number":["Bilateral Joint Research Project"]}],"id":[{"id":"10.13039\/501100000646","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J CARS"],"DOI":"10.1007\/s11548-022-02773-2","type":"journal-article","created":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T03:40:27Z","timestamp":1666410027000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Class-wise confidence-aware active learning for laparoscopic images segmentation"],"prefix":"10.1007","author":[{"given":"Jie","family":"Qiu","sequence":"first","affiliation":[]},{"given":"Yuichiro","family":"Hayashi","sequence":"additional","affiliation":[]},{"given":"Masahiro","family":"Oda","sequence":"additional","affiliation":[]},{"given":"Takayuki","family":"Kitasaka","sequence":"additional","affiliation":[]},{"given":"Kensaku","family":"Mori","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,22]]},"reference":[{"key":"2773_CR1","doi-asserted-by":"crossref","unstructured":"Chen L-C, Zhu Y, Papandreou G, Schroff F, Adam H (2018) Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Proceedings of the European conference on computer vision, pp. 801\u2013818","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"2773_CR2","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. In: International conference on medical image computing and computer-assisted intervention, pp. 234\u2013241 . Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"2773_CR3","unstructured":"Settles B (2009) Active learning literature survey"},{"issue":"3","key":"2773_CR4","doi-asserted-by":"publisher","first-page":"379","DOI":"10.1002\/j.1538-7305.1948.tb01338.x","volume":"27","author":"CE Shannon","year":"1948","unstructured":"Shannon CE (1948) A mathematical theory of communication. Bell Syst Tech J 27(3):379\u2013423","journal-title":"Bell Syst Tech J"},{"key":"2773_CR5","unstructured":"Gal Y, Islam R, Ghahramani Z (2017) Deep Bayesian active learning with image data. In: International conference on machine learning, pp. 1183\u20131192"},{"key":"2773_CR6","doi-asserted-by":"crossref","unstructured":"Xie S, Feng Z, Chen Y, Sun S, Ma C, Song M (2020) Deal: difficulty-aware active learning for semantic segmentation. In: Proceedings of the Asian conference on computer vision, pp. 672\u2013688","DOI":"10.1007\/978-3-030-69525-5_40"},{"key":"2773_CR7","doi-asserted-by":"crossref","unstructured":"Yang L, Zhang Y, Chen J, Zhang S, Chen DZ (2017) Suggestive annotation: a deep active learning framework for biomedical image segmentation. In: International conference on medical image computing and computer-assisted intervention, pp. 399\u2013407. Springer","DOI":"10.1007\/978-3-319-66179-7_46"},{"key":"2773_CR8","unstructured":"Sener O, Savarese S (2018) Active learning for convolutional neural networks: a core-set approach. In: International conference on learning representations"},{"key":"2773_CR9","unstructured":"Houlsby N, Husz\u00e1r F, Ghahramani Z, Lengyel M (2011) Bayesian active learning for gaussian process classification. In: NIPS Workshop on Bayesian optimization, experimental design and bandits: theory and applications"},{"key":"2773_CR10","unstructured":"Gal Y, Ghahramani Z (2016) Dropout as a Bayesian approximation: representing model uncertainty in deep learning. In: International conference on machine learning, pp. 1050\u20131059"},{"key":"2773_CR11","unstructured":"Kendall A, Gal Y (2017) What uncertainties do we need in Bayesian deep learning for computer vision?. In: Advances in neural information processing systems, pp. 5574\u20135584"},{"key":"2773_CR12","doi-asserted-by":"crossref","unstructured":"Olsson V, Tranheden W, Pinto J, Svensson L (2021) Classmix: segmentation-based data augmentation for semi-supervised learning. In: Proceedings of the IEEE\/CVF winter conference on applications of computer vision, pp. 1369\u20131378","DOI":"10.1109\/WACV48630.2021.00141"},{"key":"2773_CR13","unstructured":"Allan M, Shvets A, Kurmann T, Zhang Z, Duggal R, Su Y-H, Rieke N, Laina I, Kalavakonda N, Bodenstedt S, Garcia-Peraza-Herrera L, Li W, Iglovikov V, Luo H, Yang J, Stoyanov D, Maier-Hein L, Speidel S, Azizian M (2017) robotic instrument segmentation challenge. arXiv preprint arXiv:1902.06426 (2019)"},{"key":"2773_CR14","unstructured":"Hong W-Y, Kao C-L, Kuo Y-H, Wang J-R, Chang W-L, Shih C-S (2020) Cholecseg8k: a semantic segmentation dataset for laparoscopic cholecystectomy based on cholec80. arXiv preprint arXiv:2012.12453"},{"key":"2773_CR15","unstructured":"Hu H, Wei F, Hu H, Ye Q, Cui J, Wang L (2021) Semi-supervised semantic segmentation via adaptive equalization learning. Adv Neural Inf Process Syst 34:22106\u201322118. https:\/\/papers.nips.cc\/paper\/2012\/hash\/c399862d3b9d6b76c8436e924a68c45b-Abstract.html"},{"key":"2773_CR16","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2012) Imagenet classification with deep convolutional neural networks. Adv Neural Inf Process Syst 25:1097\u20131105. https:\/\/proceedings.neurips.cc\/paper\/2021\/hash\/b98249b38337c5088bbc660d8f872d6a-Abstract.html"},{"key":"2773_CR17","doi-asserted-by":"crossref","unstructured":"Yun S, Han D, Oh SJ, Chun S, Choe J, Yoo Y (2019) Cutmix: regularization strategy to train strong classifiers with localizable features. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 6023\u20136032","DOI":"10.1109\/ICCV.2019.00612"},{"key":"2773_CR18","unstructured":"French G, Laine S, Aila T, Mackiewicz M, Finlayson G (2020) Semi-supervised semantic segmentation needs strong, varied perturbations. In: British machine vision conference"},{"key":"2773_CR19","unstructured":"Samuli L, Timo A (2017) Temporal ensembling for semi-supervised learning. In: International conference on learning representations"},{"issue":"1","key":"2773_CR20","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1214\/aoms\/1177729694","volume":"22","author":"S Kullback","year":"1951","unstructured":"Kullback S, Leibler RA (1951) On information and sufficiency. Ann Math Stat 22(1):79\u201386","journal-title":"Ann Math Stat"},{"issue":"1","key":"2773_CR21","doi-asserted-by":"publisher","first-page":"86","DOI":"10.1109\/TMI.2016.2593957","volume":"36","author":"AP Twinanda","year":"2016","unstructured":"Twinanda AP, Shehata S, Mutter D, Marescaux J, De Mathelin M, Padoy N (2016) Endonet: a deep architecture for recognition tasks on laparoscopic videos. IEEE Trans Med Imaging 36(1):86\u201397","journal-title":"IEEE Trans Med Imaging"},{"key":"2773_CR22","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"2773_CR23","doi-asserted-by":"crossref","unstructured":"Scheffer T, Decomain C, Wrobel S (2001) Active hidden markov models for information extraction. In: International symposium on intelligent data analysis, pp. 309\u2013318 . Springer","DOI":"10.1007\/3-540-44816-0_31"},{"key":"2773_CR24","unstructured":"Chen T, Kornblith S, Norouzi M, Hinton G (2020) A simple framework for contrastive learning of visual representations. In: International conference on machine learning, pp. 1597\u20131607 . PMLR"},{"key":"2773_CR25","unstructured":"Van\u00a0Amersfoort J, Smith L, Teh YW, Gal Y (2020) Uncertainty estimation using a single deep deterministic neural network. In: International conference on machine learning, pp. 9690\u20139700. PMLR"},{"key":"2773_CR26","unstructured":"Zhang B, Wang Y, Hou W, Wu H, Wang J, Okumura M, Shinozaki T (2021) Flexmatch: boosting semi-supervised learning with curriculum pseudo labeling. Adv Neural Inf Process Syst 34 :18408\u201318419. https:\/\/proceedings.neurips.cc\/paper\/2021 hash\/995693c15f439e3d189b06e89d145dd5-Abstract.htm"}],"container-title":["International Journal of Computer Assisted Radiology and Surgery"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11548-022-02773-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11548-022-02773-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11548-022-02773-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T04:03:54Z","timestamp":1666411434000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11548-022-02773-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,22]]},"references-count":26,"alternative-id":["2773"],"URL":"https:\/\/doi.org\/10.1007\/s11548-022-02773-2","relation":{},"ISSN":["1861-6429"],"issn-type":[{"type":"electronic","value":"1861-6429"}],"subject":[],"published":{"date-parts":[[2022,10,22]]},"assertion":[{"value":"15 January 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 October 2022","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 October 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain any studies with human participants or animals performed by any of the authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}]}}