{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T11:16:41Z","timestamp":1758280601704,"version":"3.40.4"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T00:00:00Z","timestamp":1732579200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["71973106"],"award-info":[{"award-number":["71973106"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Shandong Social Science Planning and Research Project","award":["22CSDJ36"],"award-info":[{"award-number":["22CSDJ36"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1007\/s00371-024-03715-6","type":"journal-article","created":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T01:23:46Z","timestamp":1732584226000},"page":"5189-5203","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["An enhanced underwater fish segmentation method in complex scenes using Swin transformer with cross-scale feature fusion"],"prefix":"10.1007","volume":"41","author":[{"given":"Shue","family":"Liu","sequence":"first","affiliation":[]},{"given":"Siwei","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Yiying","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jiaming","family":"Xin","sequence":"additional","affiliation":[]},{"given":"Dashe","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,26]]},"reference":[{"key":"3715_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.addma.2020.101696","volume":"37","author":"Z Jin","year":"2021","unstructured":"Jin, Z., Zhang, Z., Ott, J., Grace, X.G.: Precise localization and semantic segmentation detection of printing conditions in fused filament fabrication technologies using machine learning. Addit. Manuf. 37, 101696 (2021). https:\/\/doi.org\/10.1016\/j.addma.2020.101696","journal-title":"Addit. Manuf."},{"key":"3715_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.adapen.2021.100057","volume":"4","author":"P Li","year":"2021","unstructured":"Li, P., Zhang, H., Guo, Z., Lyu, S., Chen, J., Li, W., Song, X., Shibasaki, R., Yan, J.: Understanding rooftop pv panel semantic segmentation of satellite and aerial images for better using machine learning. Adv. Appl. Energy 4, 100057 (2021). https:\/\/doi.org\/10.1016\/j.adapen.2021.100057","journal-title":"Adv. Appl. Energy"},{"key":"3715_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2021.108017","volume":"183","author":"W Tingting","year":"2021","unstructured":"Tingting, W., Xiaoyu, G., Wang, Y., Zeng, T.: Adaptive total variation based image segmentation with semi-proximal alternating minimization. Signal Process. 183, 108017 (2021). https:\/\/doi.org\/10.1016\/j.sigpro.2021.108017","journal-title":"Signal Process."},{"key":"3715_CR4","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention\u2014MICCAI2015\u201418th International Conference Munich, Germany, October 5\u20139, 2015, Proceedings, Part III, vol. 9351, pp. 234\u2013241. Springer (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"3715_CR5","doi-asserted-by":"publisher","unstructured":"Chen, L.-C., Zhu, Y., Papandreou, G., Schroff, F., Adam, H.: Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Computer Vision\u2014ECCV 2018\u201415th European Conference, Munich,Germany, September 8-14, 2018,Proceedings, Part VII, vol. 11211, pp. 833\u2013851. Springer (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_49","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"3715_CR6","doi-asserted-by":"publisher","first-page":"7192","DOI":"10.1109\/TIP.2020.2999854","volume":"29","author":"A Nazir","year":"2020","unstructured":"Nazir, A., Cheema, M.N., Sheng, B., Li, H., Li, P., Yang, P., Jung, Y., Qin, J., Kim, J., Feng, D.D.: Off-enet: an optimally fused fully end-to-end network for automatic dense volumetric 3d intracranial blood vessels segmentation. IEEE Trans. Image Process. 29, 7192\u20137202 (2020). https:\/\/doi.org\/10.1109\/TIP.2020.2999854","journal-title":"IEEE Trans. Image Process."},{"key":"3715_CR7","doi-asserted-by":"publisher","unstructured":"Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16\u201320, 2019, pp. 5693\u20135703. Computer Vision Foundation\/IEEE (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00584","DOI":"10.1109\/CVPR.2019.00584"},{"key":"3715_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/J.COMPAG.2021.106169","volume":"185","author":"Yu Xiaoning","year":"2021","unstructured":"Xiaoning, Yu., Wang, Y., An, D., Wei, Y.: Identification methodology of special behaviors for fish school based on spatial behavior characteristics. Comput. Electron. Agric. 185, 106169 (2021). https:\/\/doi.org\/10.1016\/J.COMPAG.2021.106169","journal-title":"Comput. Electron. Agric."},{"key":"3715_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.aquaeng.2021.102215","volume":"96","author":"Yu Xiaoning","year":"2022","unstructured":"Xiaoning, Yu., Wang, Y., An, D., Wei, Y.: Counting method for cultured fishes based on multi-modules and attention mechanism. Aquacult. Eng. 96, 102215 (2022). https:\/\/doi.org\/10.1016\/j.aquaeng.2021.102215","journal-title":"Aquacult. Eng."},{"key":"3715_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.118403","volume":"210","author":"Yu Xiaoning","year":"2022","unstructured":"Xiaoning, Yu., Wang, Y., Liu, J., Wang, J., An, D., Wei, Y.: Non-contact weight estimation system for fish based on instance segmentation. Expert Syst. Appl. 210, 118403 (2022). https:\/\/doi.org\/10.1016\/j.eswa.2022.118403","journal-title":"Expert Syst. Appl."},{"key":"3715_CR11","doi-asserted-by":"publisher","DOI":"10.1007\/s10499-024-01424-4","author":"Y Yang","year":"2024","unstructured":"Yang, Y., Li, D., Zhao, S.: A novel approach for underwater fish segmentation in complex scenes based on multi-levels triangular atrous convolution. Aquacult. Int. (2024). https:\/\/doi.org\/10.1007\/s10499-024-01424-4","journal-title":"Aquacult. Int."},{"key":"3715_CR12","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.107093","volume":"126","author":"Y Duan","year":"2023","unstructured":"Duan, Y., Zhang, S., Liu, Y., Liu, J., An, D., Wei, Y.: Boosting fish counting in sonar images with global attention and point supervision. Eng. Appl. Artif. Intell. 126, 107093 (2023). https:\/\/doi.org\/10.1016\/j.engappai.2023.107093","journal-title":"Eng. Appl. Artif. Intell."},{"key":"3715_CR13","doi-asserted-by":"publisher","DOI":"10.1016\/j.marenvres.2023.106085","volume":"190","author":"D Li","year":"2023","unstructured":"Li, D., Yang, Y., Zhao, S., Yang, H.: A fish image segmentation methodology in aquaculture environment based on multi-feature fusion model. Mar. Environ. Res. 190, 106085 (2023). https:\/\/doi.org\/10.1016\/j.marenvres.2023.106085","journal-title":"Mar. Environ. Res."},{"issue":"8","key":"3715_CR14","doi-asserted-by":"publisher","first-page":"4499","DOI":"10.1109\/TNNLS.2021.3116209","volume":"34","author":"Z Xie","year":"2023","unstructured":"Xie, Z., Zhang, W., Sheng, B., Li, P., Chen, C.L.P.: Bagfn: broad attentive graph fusion network for high-order feature interactions. IEEE Trans. Neural Netw. Learn. Syst. 34(8), 4499\u20134513 (2023). https:\/\/doi.org\/10.1109\/TNNLS.2021.3116209","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"1","key":"3715_CR15","doi-asserted-by":"publisher","first-page":"163","DOI":"10.1109\/TII.2021.3085669","volume":"18","author":"J Li","year":"2022","unstructured":"Li, J., Chen, J., Sheng, B., Li, P., Yang, P., Feng, D.D., Qi, J.: Automatic detection and classification system of domestic waste via multimodel cascaded convolutional neural network. IEEE Trans. Indust. Inf. 18(1), 163\u2013173 (2022). https:\/\/doi.org\/10.1109\/TII.2021.3085669","journal-title":"IEEE Trans. Indust. Inf."},{"key":"3715_CR16","doi-asserted-by":"publisher","first-page":"2226","DOI":"10.1109\/TMM.2022.3144890","volume":"25","author":"N Jiang","year":"2023","unstructured":"Jiang, N., Sheng, B., Li, P., Lee, T.-Y.: Photohelper: portrait photographing guidance via deep feature retrieval and fusion. IEEE Trans. Multimedia 25, 2226\u20132238 (2023). https:\/\/doi.org\/10.1109\/TMM.2022.3144890","journal-title":"IEEE Trans. Multimedia"},{"key":"3715_CR17","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-021-23458-5","author":"L Dai","year":"2021","unstructured":"Dai, L., Liang, W., Li, H., Cai, C., Sheng, B.: A deep learning system for detecting diabetic retinopathy across the disease spectrum. Nat. Commun. (2021). https:\/\/doi.org\/10.1038\/s41467-021-23458-5","journal-title":"Nat. Commun."},{"key":"3715_CR18","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-023-02702-z","author":"L Dai","year":"2024","unstructured":"Dai, L., Sheng, B., Chen, T.: A deep learning system for predicting time to progression of diabetic retinopathy. Nat. Med. (2024). https:\/\/doi.org\/10.1038\/s41591-023-02702-z","journal-title":"Nat. Med."},{"issue":"3","key":"3715_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2024.100929","volume":"5","author":"B Qian","year":"2024","unstructured":"Qian, B., Chen, H., Wang, X., Guan, Z., Li, T., Jin, Y., Wu, Y., Wen, Y., Che, H., Kwon, G., Kim, J., Choi, S., Shin, S., Krause, F., Unterdechler, M., Hou, J., Feng, R., Li, Y., Daho, M.E., Habib, Y., Dawei, W., Qiang, Z., Ping, Y., Xiaokang, C., Yiyu, T., Wei, G.S., Cheung, C.Y., Jia, W., Li, H., Tham, Y.C., Wong, T.Y., Sheng, B.: Drac 2022: a public benchmark for diabetic retinopathy analysis on ultra-wide optical coherence tomography angiography images. Patterns 5(3), 100929 (2024). https:\/\/doi.org\/10.1016\/j.patter.2024.100929","journal-title":"Patterns"},{"key":"3715_CR20","doi-asserted-by":"publisher","unstructured":"Azad, R., Arimond, R., Aghdam, E.K., Kazerouni, A., Merhof, D.: Dae-former: dual attention-guided efficient transformer for medical image segmentation. In: Predictive Intelligence in Medicine\u20146th International Workshop, PRIME 2023, Held in Conjunction with MICCAI 2023, Vancouver, BC, Canada, October 8, 2023, Proceedings, vol. 14277, pp. 83\u201395. Springer (2023). https:\/\/doi.org\/10.1007\/978-3-031-46005-0_8","DOI":"10.1007\/978-3-031-46005-0_8"},{"key":"3715_CR21","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-024-03543-8","author":"Y Pan","year":"2024","unstructured":"Pan, Y., Chen, Q., Fang, X.: Damaf: dual attention network with multi-level adaptive complementary fusion for medical image segmentation. Vis. Comput. (2024). https:\/\/doi.org\/10.1007\/s00371-024-03543-8","journal-title":"Vis. Comput."},{"key":"3715_CR22","doi-asserted-by":"publisher","unstructured":"Zhu, L., Wang, X., Ke, Z., Zhang, W., Lau, R.W.H.: Biformer: vision transformer with bi-level routing attention. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2023, Vancouver, BC, Canada, June 17\u201324, 2023, pp. 10323\u201310333. IEEE (2023). https:\/\/doi.org\/10.1109\/CVPR52729.2023.00995","DOI":"10.1109\/CVPR52729.2023.00995"},{"key":"3715_CR23","doi-asserted-by":"publisher","unstructured":"Xu, Z., Wu, D., Yu, C., Chu, X., Sang, N., Gao, C.: Sctnet: single-branch CNN with transformer semantic information for real-time segmentation. In: 38th AAAI Conference on Artificial Intelligence, AAAI2024, 36th Conference on Innovative Applications of Artificial Intelligence, IAAI 2024, 14th Symposium on Educational Advances in Artificial Intelligence, EAAI 2014, February 20-27, 2024, Vancouver,Canada, pp. 6378\u20136386. AAAI Press (2024). https:\/\/doi.org\/10.1609\/AAAI.V38I6.28457","DOI":"10.1609\/AAAI.V38I6.28457"},{"key":"3715_CR24","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1109\/TMM.2021.3120873","volume":"25","author":"X Lin","year":"2023","unstructured":"Lin, X., Sun, S., Huang, W., Sheng, B., Li, P., Feng, D.D.: EAPT: efficient attention pyramid transformer for image processing. IEEE Trans. Multimedia 25, 50\u201361 (2023). https:\/\/doi.org\/10.1109\/TMM.2021.3120873","journal-title":"IEEE Trans. Multimedia"},{"key":"3715_CR25","doi-asserted-by":"publisher","unstructured":"Azad, R., Heidari, M., Shariatnia, M., Aghdam, E.K., Karimijafarbigloo, S., Adeli, E., Merhof, D.: Transdeeplab: convolution-free transformer-based deeplab v3+ for medical image segmentation. In: Predictive Intelligence in Medicine\u20145th International Workshop, PRIME 2022, Held in Conjunction with MICCAI 2022, Singapore, September 22, 2022, Proceedings, Lecture Notes in Computer Science, vol. 13564, pp. 91\u2013102. Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-16919-9_9","DOI":"10.1007\/978-3-031-16919-9_9"},{"issue":"5","key":"3715_CR26","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2023","unstructured":"Huang, X., Deng, Z., Li, D., Yuan, X., Ying, F.: Missformer: an effective transformer for 2D medical image segmentation. IEEE Trans. Medical Imaging 42(5), 1484\u20131494 (2023). https:\/\/doi.org\/10.1109\/TMI.2022.3230943","journal-title":"IEEE Trans. Medical Imaging"},{"key":"3715_CR27","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.: Transunet: transformers make strong encoders for medical image segmentation. CoRR (2021). arxiv:2102.04306"},{"key":"3715_CR28","doi-asserted-by":"publisher","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.: Swin-unet: unet-like pure transformer for medical image segmentation. In: Computer Vision\u2014ECCV 2022 Workshops\u2014Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part III. Lecture Notes in Computer Science, vol. 13803, pp. 205\u2013218. Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-25066-8_9","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"3715_CR29","doi-asserted-by":"publisher","unstructured":"Azad, R., Jia, Y., Aghdam, E.K., Cohen-Adad, J., Merhof, D.: Enhancing medical image segmentation with transception: a multi-scale feature fusion approach. CoRR (2023). https:\/\/doi.org\/10.48550\/ARXIV.2301.10847","DOI":"10.48550\/ARXIV.2301.10847"},{"key":"3715_CR30","doi-asserted-by":"publisher","unstructured":"Lan, L., Cai, P., Jiang, L., Liu, X., Li, Y., Zhang, Y.: Brau-net++: U-shaped hybrid CNN-transformer network for medical image segmentation. CoRR (2024). https:\/\/doi.org\/10.48550\/ARXIV.2401.00722","DOI":"10.48550\/ARXIV.2401.00722"},{"key":"3715_CR31","doi-asserted-by":"publisher","unstructured":"Manzari, O.N., Kaleybar, J.M., Saadat, H., Maleki, S.: Befunet: a hybrid CNN-transformer architecture for precise medical image segmentation. CoRR (2024). https:\/\/doi.org\/10.48550\/ARXIV.2402.08793","DOI":"10.48550\/ARXIV.2402.08793"},{"key":"3715_CR32","doi-asserted-by":"publisher","unstructured":"Chen, M., Lin, M., Li, K., Shen, Y., Wu, Y., Chao, F., Ji, R.: Cf-vit: a general coarse-to-fine method for vision transformer. In: 37th AAAI Conference on Artificial Intelligence, AAAI 2023, 35th Conference on Innovative Applications of Artificial Intelligence, IAAI 2023, 13th Symposium on Educational Advances in Artificial Intelligence, EAAI 2023, Washington, DC, USA, February 7\u201314, 2023, pp. 7042\u20137052. AAAI Press (2023). https:\/\/doi.org\/10.1609\/AAAI.V37I6.25860","DOI":"10.1609\/AAAI.V37I6.25860"},{"key":"3715_CR33","doi-asserted-by":"publisher","unstructured":"Heidari, M., Kazerouni, A., Kadarvish, M.S., Azad, R., Aghdam, E.K., Cohen-Adad, J., Merhof, D.: Hiformer: hierarchical multi-scale representations using transformers for medical image segmentation. In: IEEE\/CVF Winter Conference on Applications of Computer Vision, WACV 2023, Waikoloa, HI, USA, January 2\u20137, 2023, pp. 6191\u20136201. IEEE https:\/\/doi.org\/10.1109\/WACV56688.2023.00614 (2023)","DOI":"10.1109\/WACV56688.2023.00614"},{"key":"3715_CR34","doi-asserted-by":"publisher","unstructured":"Karimi, D., Vasylechko, S., Gholipour, A.: Convolution-free medical image segmentation using transformers. In: Medical Image Computing and Computer Assisted Intervention\u2014MICCAI 2021\u201424th International Conference, Strasbourg, France, September 27\u2013October 1, 2021, Proceedings, Part I, Lecture Notes in Computer Science, vol. 12901, pp. 78\u201388. Springer (2021). https:\/\/doi.org\/10.1007\/978-3-030-87193-2_8","DOI":"10.1007\/978-3-030-87193-2_8"},{"key":"3715_CR35","doi-asserted-by":"publisher","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: hierarchical vision transformer using shifted windows. In: 2021 IEEE\/CVF International Conference on Computer Vision, ICCV 2021, Montreal, QC, Canada, October 10\u201317, 2021, pp. 9992\u201310002. IEEE (2021). https:\/\/doi.org\/10.1109\/ICCV48922.2021.00986","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3715_CR36","doi-asserted-by":"publisher","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: CBAM: convolutional block attention module. In: Computer Vision\u2014ECCV 2018\u201415th European Conference, Munich, Germany, September 8\u201314, 2018, Proceedings, Part VII. Lecture Notes in Computer Science, vol. 11211, pp. 3\u201319. Springer (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_1","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"3715_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102327","volume":"76","author":"H Wu","year":"2022","unstructured":"Wu, H., Chen, S., Chen, G., Wang, W., Lei, B., Wen, Z.: Fat-net: feature adaptive transformers for automated skin lesion segmentation. Med. Image Anal. 76, 102327 (2022). https:\/\/doi.org\/10.1016\/j.media.2021.102327","journal-title":"Med. Image Anal."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03715-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-024-03715-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03715-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,24]],"date-time":"2025-04-24T10:01:26Z","timestamp":1745488886000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-024-03715-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,26]]},"references-count":37,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,5]]}},"alternative-id":["3715"],"URL":"https:\/\/doi.org\/10.1007\/s00371-024-03715-6","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2024,11,26]]},"assertion":[{"value":"4 November 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 November 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could influence the reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}