{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T06:31:44Z","timestamp":1763706704768,"version":"3.37.3"},"reference-count":47,"publisher":"Springer Science and Business Media LLC","issue":"17","license":[{"start":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T00:00:00Z","timestamp":1699920000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T00:00:00Z","timestamp":1699920000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-023-17344-z","type":"journal-article","created":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T06:02:00Z","timestamp":1699941720000},"page":"52275-52303","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Analytics of deep model-based spatiotemporal and spatial feature learning methods for surgical action classification"],"prefix":"10.1007","volume":"83","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8159-4055","authenticated-orcid":false,"given":"Rachana S.","family":"Oza","sequence":"first","affiliation":[]},{"given":"Mayuri A.","family":"Mehta","sequence":"additional","affiliation":[]},{"given":"Ketan","family":"Kotecha","sequence":"additional","affiliation":[]},{"given":"Jerry Chun-Wei","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,14]]},"reference":[{"key":"17344_CR1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-96866-7","author":"OYK Tsuda","year":"2019","unstructured":"Tsuda OYK (2019) Shawn, Robotic-Assisted Minimally Invasive Surgery. Springer. https:\/\/doi.org\/10.1007\/978-3-319-96866-7","journal-title":"Springer"},{"issue":"4","key":"17344_CR2","doi-asserted-by":"publisher","first-page":"684","DOI":"10.1097\/SLA.0000000000004425","volume":"273","author":"CR Garrow","year":"2021","unstructured":"Garrow CR et al (2021) Machine Learning for Surgical Phase Recognition: A Systematic Review. Ann Surg 273(4):684\u2013693. https:\/\/doi.org\/10.1097\/SLA.0000000000004425","journal-title":"Ann Surg"},{"issue":"12","key":"17344_CR3","doi-asserted-by":"publisher","first-page":"5377","DOI":"10.1007\/s00464-019-07330-8","volume":"34","author":"S Madad Zadeh","year":"2020","unstructured":"Madad Zadeh S et al (2020) SurgAI: deep learning for computerized laparoscopic image understanding in gynaecology. Surg Endosc 34(12):5377\u20135383. https:\/\/doi.org\/10.1007\/s00464-019-07330-8","journal-title":"Surg Endosc"},{"key":"17344_CR4","doi-asserted-by":"publisher","first-page":"757","DOI":"10.1007\/s00068-020-01444-8","volume":"47","author":"L Rimmer","year":"2020","unstructured":"Rimmer L, Howard C, Picca L, Bashir M (2020) The automaton as a surgeon: the future of artificial intelligence in emergency and general surgery. Eur J Trauma Emerg Surg 47:757\u2013762. https:\/\/doi.org\/10.1007\/s00068-020-01444-8","journal-title":"Eur J Trauma Emerg Surg"},{"issue":"9","key":"17344_CR5","doi-asserted-by":"publisher","first-page":"e29179","DOI":"10.7759\/cureus.29179","volume":"14","author":"S Bramhe","year":"2022","unstructured":"Bramhe S, Pathak SS (2022) Robotic Surgery: A Narrative Review. Cureus 14(9):e29179. https:\/\/doi.org\/10.7759\/cureus.29179. (pp. 1\u20137)","journal-title":"Cureus"},{"issue":"13","key":"17344_CR6","doi-asserted-by":"publisher","first-page":"16813","DOI":"10.1007\/s11042-017-5252-2","volume":"77","author":"K Schoeffmann","year":"2018","unstructured":"Schoeffmann K, Husslein H, Kletz S, Petscharnig S, Muenzer B, Beecks C (2018) Video retrieval in laparoscopic video recordings with dynamic content descriptors. Multimedia Tools Appl 77(13):16813\u201316832. https:\/\/doi.org\/10.1007\/s11042-017-5252-2","journal-title":"Multimedia Tools Appl"},{"key":"17344_CR7","doi-asserted-by":"publisher","unstructured":"Chittajallu DR, et al. (2019) \"XAI-CBIR\u202f: explainable ai system for content based retrieval of video frames from minimally invasive surgery videos.\" 2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019), no. Isbi, pp. 66\u201369. https:\/\/doi.org\/10.1109\/ISBI.2019.8759428","DOI":"10.1109\/ISBI.2019.8759428"},{"issue":"11","key":"17344_CR8","doi-asserted-by":"publisher","first-page":"2029","DOI":"10.1007\/s11548-021-02473-3","volume":"16","author":"B Zhang","year":"2021","unstructured":"Zhang B, Ghanem A, Simes A, Choi H, Yoo A (2021) Surgical workflow recognition with 3DCNN for Sleeve Gastrectomy. Int J Comput Assist Radiol Surg 16(11):2029\u20132036. https:\/\/doi.org\/10.1007\/s11548-021-02473-3","journal-title":"Int J Comput Assist Radiol Surg"},{"issue":"41\u201342","key":"17344_CR9","doi-asserted-by":"publisher","first-page":"30111","DOI":"10.1007\/s11042-020-09540-y","volume":"79","author":"T Khatibi","year":"2020","unstructured":"Khatibi T, Dezyani P (2020) Proposing novel methods for gynecologic surgical action recognition on laparoscopic videos. Multimed Tools Appl 79(41\u201342):30111\u201330133. https:\/\/doi.org\/10.1007\/s11042-020-09540-y","journal-title":"Multimed Tools Appl"},{"issue":"5","key":"17344_CR10","doi-asserted-by":"publisher","first-page":"1114","DOI":"10.1109\/TMI.2017.2787657","volume":"37","author":"Y Jin","year":"2018","unstructured":"Jin Y et al (2018) SV-RCNet: Workflow recognition from surgical videos using recurrent convolutional network. IEEE Trans Med Imaging 37(5):1114\u20131126. https:\/\/doi.org\/10.1109\/TMI.2017.2787657","journal-title":"IEEE Trans Med Imaging"},{"issue":"5","key":"17344_CR11","doi-asserted-by":"publisher","first-page":"3160","DOI":"10.1007\/s00464-021-08619-3","volume":"36","author":"K Cheng","year":"2022","unstructured":"Cheng K et al (2022) Artificial intelligence-based automated laparoscopic cholecystectomy surgical phase recognition and analysis. Surg Endosc 36(5):3160\u20133168. https:\/\/doi.org\/10.1007\/s00464-021-08619-3","journal-title":"Surg Endosc"},{"issue":"9","key":"17344_CR12","doi-asserted-by":"publisher","first-page":"1573","DOI":"10.1007\/s11548-020-02198-9","volume":"15","author":"X Shi","year":"2020","unstructured":"Shi X, Jin Y, Dou Q, Heng PA (2020) LRTD: long-range temporal dependency based active learning for surgical workflow recognition. Int J Comput Assist Radiol Surg 15(9):1573\u20131584. https:\/\/doi.org\/10.1007\/s11548-020-02198-9","journal-title":"Int J Comput Assist Radiol Surg"},{"key":"17344_CR13","volume-title":"\"Workflow Recognition in Cholesystectomy Videos\", CS230: Deep Learning","author":"S Menon","year":"2017","unstructured":"Menon S (2017) \u201cWorkflow Recognition in Cholesystectomy Videos\u201d, CS230: Deep Learning. Standford University, CA, NIPS"},{"key":"17344_CR14","doi-asserted-by":"publisher","unstructured":"Zia A, Hung A, Essa I, Jarc A (2018) Surgical Activity Recognition in Robot-Assisted Radical Prostatectomy Using Deep Learning. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 11073 LNCS:273\u2013280. https:\/\/doi.org\/10.1007\/978-3-030-00937-3_32","DOI":"10.1007\/978-3-030-00937-3_32"},{"issue":"3","key":"17344_CR15","doi-asserted-by":"publisher","DOI":"10.1001\/jamanetworkopen.2020.1664","volume":"3","author":"S Khalid","year":"2020","unstructured":"Khalid S, Goldenberg M, Grantcharov T, Taati B, Rudzicz F (2020) Evaluation of Deep Learning Models for Identifying Surgical Actions and Measuring Performance. JAMA Netw Open 3(3):e201664. https:\/\/doi.org\/10.1001\/jamanetworkopen.2020.1664","journal-title":"JAMA Netw Open"},{"issue":"1","key":"17344_CR16","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1186\/s13640-018-0316-4","volume":"2018","author":"Y Chen","year":"2018","unstructured":"Chen Y, Sun QL, Zhong K (2018) Semi-supervised spatio-temporal CNN for recognition of surgical workflow. Eurasip J Image Video Process 2018(1):8. https:\/\/doi.org\/10.1186\/s13640-018-0316-4","journal-title":"Eurasip J Image Video Process"},{"issue":"2","key":"17344_CR17","doi-asserted-by":"publisher","first-page":"2365","DOI":"10.1109\/LRA.2021.3060410","volume":"6","author":"J Park","year":"2021","unstructured":"Park J, Park CH (2021) Recognition and Prediction of Surgical Actions Based on Online Robotic Tool Detection. IEEE Robot Autom Lett 6(2):2365\u20132372. https:\/\/doi.org\/10.1109\/LRA.2021.3060410","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"17344_CR18","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1080\/24699322.2020.1801842","volume":"25","author":"C Yang","year":"2020","unstructured":"Yang C, Zhao Z, Hu S (2020) Image-based laparoscopic tool detection and tracking using convolutional neural networks: a review of the literature. Comput Assist Surg 25(1):15\u201328. https:\/\/doi.org\/10.1080\/24699322.2020.1801842","journal-title":"Comput Assist Surg"},{"key":"17344_CR19","doi-asserted-by":"publisher","first-page":"679","DOI":"10.1007\/s00464-021-08336-x","volume":"36","author":"B Namazi","year":"2022","unstructured":"Namazi B, Sankaranarayanan G, Devarajan V (2022) A contextual detector of surgical tools in laparoscopic videos using deep learning. Surg Endosc 36:679\u2013688. https:\/\/doi.org\/10.1007\/s00464-021-08336-x","journal-title":"Surg Endosc"},{"issue":"2","key":"17344_CR20","doi-asserted-by":"publisher","first-page":"3870","DOI":"10.1109\/LRA.2021.3066956","volume":"6","author":"Y Sun","year":"2021","unstructured":"Sun Y, Pan B, Fu Y (2021) Lightweight deep neural network for real-time instrument semantic segmentation in robot-assisted minimally invasive surgery. IEEE Robot Autom Lett 6(2):3870\u20133877. https:\/\/doi.org\/10.1109\/LRA.2021.3066956","journal-title":"IEEE Robot Autom Lett"},{"issue":"2","key":"17344_CR21","doi-asserted-by":"publisher","first-page":"1577","DOI":"10.1007\/s00521-021-06368-x","volume":"34","author":"Y Hou","year":"2022","unstructured":"Hou Y et al (2022) Adaptive kernel selection network with attention constraint for surgical instrument classification. Neural Comput Appl 34(2):1577\u20131591. https:\/\/doi.org\/10.1007\/s00521-021-06368-x","journal-title":"Neural Comput Appl"},{"key":"17344_CR22","doi-asserted-by":"publisher","unstructured":"Chao Z, Xu W, Liu R, Cho H, Jia F (2023) Surgical action detection based on path aggregation adaptive spatial network. Multimed. Tools Appl. https:\/\/doi.org\/10.1007\/s11042-023-14990-1","DOI":"10.1007\/s11042-023-14990-1"},{"key":"17344_CR23","doi-asserted-by":"publisher","unstructured":"Bichlmeier C, Heining SM, Ahmadi A, Navab N (2008) Stepping into the Operating Theater\u202f: ARAV - Augmented Reality Aided Vertebroplasty, pp. 165\u2013166. https:\/\/doi.org\/10.1109\/ISMAR.2008.4637348","DOI":"10.1109\/ISMAR.2008.4637348"},{"key":"17344_CR24","unstructured":"Gao Y, et al. (2014) JHU-ISI gesture and skill assessment working set (JIGSAWS): a surgical activity dataset for human motion modeling.\u00a0In: Proceedings of the Modeling and Monitoring of Computer Assisted Interventions (M2CAI) \u2013 MICCAI Workshop,\u00a0Vol 3.\u00a0CIRL, Johns Hopkins University"},{"issue":"9","key":"17344_CR25","doi-asserted-by":"publisher","first-page":"2025","DOI":"10.1109\/TBME.2016.2647680","volume":"64","author":"N Ahmidi","year":"2017","unstructured":"Ahmidi N et al (2017) A Dataset and Benchmarks for Segmentation and Recognition of Gestures in Robotic Surgery. IEEE Trans Biomed Eng 64(9):2025\u20132041. https:\/\/doi.org\/10.1109\/TBME.2016.2647680","journal-title":"IEEE Trans Biomed Eng"},{"issue":"4","key":"17344_CR26","doi-asserted-by":"publisher","first-page":"871","DOI":"10.1007\/s11517-020-02143-7","volume":"58","author":"Y Zhao","year":"2020","unstructured":"Zhao Y et al (2020) A novel noncontact detection method of surgeon\u2019s operation for a master-slave endovascular surgery robot. Med Biol Eng Compu 58(4):871\u2013885. https:\/\/doi.org\/10.1007\/s11517-020-02143-7","journal-title":"Med Biol Eng Compu"},{"issue":"4","key":"17344_CR27","doi-asserted-by":"publisher","first-page":"592","DOI":"10.1016\/j.jmig.2013.12.122","volume":"21","author":"E Manoucheri","year":"2014","unstructured":"Manoucheri E, Fuchs-Weizman N, Cohen SL, Wang KC, Einarsson J (2014) MAUDE: Analysis of Robotic-Assisted Gynecologic Surgery. J Minim Invasive Gynecol 21(4):592\u2013595. https:\/\/doi.org\/10.1016\/j.jmig.2013.12.122","journal-title":"J Minim Invasive Gynecol"},{"issue":"11","key":"17344_CR28","doi-asserted-by":"publisher","first-page":"4924","DOI":"10.1007\/s00464-019-07281-0","volume":"34","author":"D Kitaguchi","year":"2020","unstructured":"Kitaguchi D et al (2020) Real-time automatic surgical phase recognition in laparoscopic sigmoidectomy using the convolutional neural network-based deep learning approach. Surg Endosc 34(11):4924\u20134931. https:\/\/doi.org\/10.1007\/s00464-019-07281-0","journal-title":"Surg Endosc"},{"issue":"1","key":"17344_CR29","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41598-021-84295-6","volume":"11","author":"JL Lavanchy","year":"2021","unstructured":"Lavanchy JL et al (2021) Automation of surgical skill assessment using a three-stage machine learning algorithm. Sci Rep 11(1):1\u20139. https:\/\/doi.org\/10.1038\/s41598-021-84295-6","journal-title":"Sci Rep"},{"issue":"1","key":"17344_CR30","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41598-019-53091-8","volume":"9","author":"S Morita","year":"2019","unstructured":"Morita S, Tabuchi H, Masumoto H, Yamauchi T, Kamiura N (2019) Real-Time Extraction of Important Surgical Phases in Cataract Surgery Videos. Sci Rep 9(1):1\u20138. https:\/\/doi.org\/10.1038\/s41598-019-53091-8","journal-title":"Sci Rep"},{"issue":"7","key":"17344_CR31","doi-asserted-by":"publisher","first-page":"1217","DOI":"10.1007\/s11548-019-01995-1","volume":"14","author":"I Funke","year":"2019","unstructured":"Funke I, Mees ST, Weitz J, Speidel S (2019) Video-based surgical skill assessment using 3D convolutional neural networks. Int J Comput Assist Radiol Surg 14(7):1217\u20131225. https:\/\/doi.org\/10.1007\/s11548-019-01995-1","journal-title":"Int J Comput Assist Radiol Surg"},{"key":"17344_CR32","unstructured":"Nwoye CI, et al. (2022) CholecTriplet2021: A benchmark challenge for surgical action triplet recognition. [Online]. Available: http:\/\/arxiv.org\/abs\/2204.04746"},{"key":"17344_CR33","unstructured":"He Z, Mottaghi A, Sharghi A, Jamal MA, Mohareri O (2022) An empirical study on activity recognition in long surgical videos. In: Proceedings of the 2nd machine learning for health symposium. Proceedings of Machine Learning Research 193:356-372 Available from\u00a0https:\/\/proceedings.mlr.press\/v193\/he22a.html"},{"key":"17344_CR34","doi-asserted-by":"publisher","unstructured":"Bao H, Dong L, Piao S, Wei F (2021) BEiT: BERT Pre-Training of Image Transformers, no. Mim, pp. 1\u201318. https:\/\/doi.org\/10.48550\/arXiv.2106.08254","DOI":"10.48550\/arXiv.2106.08254"},{"issue":"3","key":"17344_CR35","doi-asserted-by":"publisher","first-page":"240","DOI":"10.1007\/s00595-019-01874-x","volume":"50","author":"H Katsuno","year":"2020","unstructured":"Katsuno H et al (2020) Short- and long-term outcomes of robotic surgery for rectal cancer: a single-center retrospective cohort study. Surg Today 50(3):240\u2013247. https:\/\/doi.org\/10.1007\/s00595-019-01874-x","journal-title":"Surg Today"},{"key":"17344_CR36","doi-asserted-by":"publisher","unstructured":"Nwoye CI, et al. (2022) Rendezvous: Attention mechanisms for the recognition of surgical action triplets in endoscopic videos. Medical Image Analysis 78. https:\/\/doi.org\/10.1016\/j.media.2022.102433","DOI":"10.1016\/j.media.2022.102433"},{"issue":"6","key":"17344_CR37","doi-asserted-by":"publisher","first-page":"1225","DOI":"10.1007\/s11517-020-02127-7","volume":"58","author":"I Patrini","year":"2020","unstructured":"Patrini I, Ruperti M, Moccia S, Mattos LS, Frontoni E, De Momi E (2020) Transfer learning for informative-frame selection in laryngoscopic videos through learned features. Med Biol Eng Compu 58(6):1225\u20131238. https:\/\/doi.org\/10.1007\/s11517-020-02127-7","journal-title":"Med Biol Eng Compu"},{"issue":"6","key":"17344_CR38","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"BA Krizhevsky","year":"2012","unstructured":"Krizhevsky BA, Sutskever I, Hinton GE (2012) ImageNet Classification with Deep Convolutional Neural Networks. Commun ACM 60(6):84\u201390. https:\/\/doi.org\/10.1145\/3065386","journal-title":"Commun ACM"},{"key":"17344_CR39","unstructured":"Simonyan K, Zisserman A (2015) Very deep convolutional networks for large-scale image recognition.\u00a0The 3rd International Conference on Learning Representations (ICLR2015).\u00a0https:\/\/arxiv.org\/abs\/1409.1556"},{"key":"17344_CR40","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) \"Deep residual learning for image recognition,\" Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, vol. 2016-Decem, pp. 770\u2013778. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"17344_CR41","doi-asserted-by":"publisher","unstructured":"Howard AG, et al. (2017) MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications. https:\/\/doi.org\/10.48550\/arXiv.1704.04861","DOI":"10.48550\/arXiv.1704.04861"},{"key":"17344_CR42","doi-asserted-by":"publisher","unstructured":"Huang G, Liu Z, Van Der Maaten L, Weinberger KQ (2017) Densely connected convolutional networks,\" Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, vol. 2017-Janua, pp. 2261\u20132269. https:\/\/doi.org\/10.1109\/CVPR.2017.243","DOI":"10.1109\/CVPR.2017.243"},{"key":"17344_CR43","doi-asserted-by":"publisher","unstructured":"Szegedy C, Ioffe S, Vanhoucke V, Alemi AA (2017) Inception-v4, inception-ResNet and the impact of residual connections on learning. 31st AAAI Conference on Artificial Intelligence, AAAI 2017, pp. 4278\u20134284. https:\/\/doi.org\/10.48550\/arXiv.1704.04861","DOI":"10.48550\/arXiv.1704.04861"},{"key":"17344_CR44","doi-asserted-by":"publisher","unstructured":"Kolesnikov A, et al. (2020) Big Transfer (BiT): General Visual Representation Learning,\" Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 12350 LNCS, pp. 491\u2013507. https:\/\/doi.org\/10.1007\/978-3-030-58558-7_29","DOI":"10.1007\/978-3-030-58558-7_29"},{"key":"17344_CR45","unstructured":"Tan M, Le QV (2019)\u00a0EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. In: Proceedings of the 36th international conference on machine learning. Proceedings of Machine Learning\u00a0Research 97:6105-6114 Available from https:\/\/proceedings.mlr.press\/v97\/tan19a.html"},{"key":"17344_CR46","doi-asserted-by":"publisher","unstructured":"Pham H, Dai Z, Xie Q, Le QV (2021) Meta Pseudo Labels. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11557\u201311568. https:\/\/doi.org\/10.1109\/cvpr46437.2021.01139","DOI":"10.1109\/cvpr46437.2021.01139"},{"key":"17344_CR47","doi-asserted-by":"publisher","unstructured":"Janocha K, Czarnecki WM (2017) \"On loss functions for deep neural networks in classification,\" [Online]. Available: https:\/\/doi.org\/10.48550\/arXiv.1702.05659","DOI":"10.48550\/arXiv.1702.05659"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-17344-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-17344-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-17344-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,15]],"date-time":"2024-05-15T07:40:37Z","timestamp":1715758837000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-17344-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,14]]},"references-count":47,"journal-issue":{"issue":"17","published-online":{"date-parts":[[2024,5]]}},"alternative-id":["17344"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-17344-z","relation":{},"ISSN":["1573-7721"],"issn-type":[{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2023,11,14]]},"assertion":[{"value":"28 September 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 September 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 September 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 November 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflicts of interest to declare relevant to this article's content.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}