{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T18:12:12Z","timestamp":1771611132371,"version":"3.50.1"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2021,10,21]],"date-time":"2021-10-21T00:00:00Z","timestamp":1634774400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,10,21]],"date-time":"2021-10-21T00:00:00Z","timestamp":1634774400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61703284"],"award-info":[{"award-number":["61703284"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U1613212"],"award-info":[{"award-number":["U1613212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Auton Robot"],"published-print":{"date-parts":[[2022,1]]},"DOI":"10.1007\/s10514-021-10014-9","type":"journal-article","created":{"date-parts":[[2021,10,21]],"date-time":"2021-10-21T14:02:52Z","timestamp":1634824972000},"page":"21-43","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["Embodied scene description"],"prefix":"10.1007","volume":"46","author":[{"given":"Sinan","family":"Tan","sequence":"first","affiliation":[]},{"given":"Di","family":"Guo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4042-6044","authenticated-orcid":false,"given":"Huaping","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Fuchun","family":"Sun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,10,21]]},"reference":[{"key":"10014_CR1","unstructured":"https:\/\/news.microsoft.com\/features\/bonjour-bienvenidos-seeing-ai-expands-to-5-new-languages\/"},{"key":"10014_CR2","doi-asserted-by":"crossref","unstructured":"Anderson, P., He, X., Buehler, C., Teney, D., Johnson, M., Gould, S., & Zhang, L. (2018). Bottom-up and top-down attention for image captioning and visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6077\u20136086).","DOI":"10.1109\/CVPR.2018.00636"},{"key":"10014_CR3","doi-asserted-by":"crossref","unstructured":"Anderson, P., Wu, Q., Teney, D., Bruce, J., Johnson, M., S\u00fcnderhauf, N., Reid, I., Gould, S., & van den Hengel, A. (2018). Vision-and-language navigation: Interpreting visually-grounded navigation instructions in real environments. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 3674\u20133683).","DOI":"10.1109\/CVPR.2018.00387"},{"issue":"2","key":"10014_CR4","doi-asserted-by":"publisher","first-page":"449","DOI":"10.1007\/s10514-018-9792-8","volume":"43","author":"D Arumugam","year":"2019","unstructured":"Arumugam, D., Karamcheti, S., Gopalan, N., Williams, E. C., Rhee, M., Wong, L. L., & Tellex, S. (2019). Grounding natural language instructions to semantic goal representations for abstraction and generalization. Autonomous Robots, 43(2), 449\u2013468.","journal-title":"Autonomous Robots"},{"key":"10014_CR5","doi-asserted-by":"crossref","unstructured":"Bashiri, F. S., LaRose, E., Badger, J. C., D\u2019Souza, R. M., Yu, Z., & Peissig, P. (2018) Object detection to assist visually impaired people: A deep neural network adventure. In International symposium on visual computing (pp. 500\u2013510). Springer.","DOI":"10.1007\/978-3-030-03801-4_44"},{"key":"10014_CR6","doi-asserted-by":"crossref","unstructured":"Chebotar, Y., Handa, A., Makoviychuk, V., Macklin, M., Issac, J., Ratliff, N., & Fox, D. (2019). Closing the sim-to-real loop: Adapting simulation randomization with real world experience. In 2019 International conference on robotics and automation (ICRA) (pp. 8973\u20138979). IEEE.","DOI":"10.1109\/ICRA.2019.8793789"},{"key":"10014_CR7","doi-asserted-by":"crossref","unstructured":"Chen, K., de Vicente, J. P., Sepulveda, G., Xia, F., Soto, A., Vazquez, M., & Savarese, S. (2019). A behavioral approach to visual navigation with graph localization networks. Robotics: Science and Systems 1\u201310.","DOI":"10.15607\/RSS.2019.XV.010"},{"issue":"16","key":"10014_CR8","doi-asserted-by":"publisher","first-page":"20651","DOI":"10.1007\/s11042-017-5472-5","volume":"77","author":"R Cheng","year":"2018","unstructured":"Cheng, R., Wang, K., Yang, K., Long, N., Bai, J., & Liu, D. (2018). Real-time pedestrian crossing lights detection algorithm for the visually impaired. Multimedia Tools and Applications, 77(16), 20651\u201320671.","journal-title":"Multimedia Tools and Applications"},{"key":"10014_CR9","doi-asserted-by":"crossref","unstructured":"Das, A., Datta, S., Gkioxari, G., Lee, S., Parikh, D., & Batra, D. (2018a). Embodied question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops (pp. 2054\u20132063).","DOI":"10.1109\/CVPRW.2018.00279"},{"key":"10014_CR10","doi-asserted-by":"crossref","unstructured":"Das, A., Gkioxari, G., Lee, S., Parikh, D., & Batra, D. (2018b). Neural modular control for embodied question answering. arXiv preprint arXiv:1810.11181.","DOI":"10.1109\/CVPR.2018.00008"},{"key":"10014_CR11","doi-asserted-by":"crossref","unstructured":"Denkowski, M., & Lavie, A. (2014). Meteor universal: Language specific translation evaluation for any target language. In Proceedings of the ninth workshop on statistical machine translation (pp. 376\u2013380).","DOI":"10.3115\/v1\/W14-3348"},{"key":"10014_CR12","doi-asserted-by":"crossref","unstructured":"Donahue, J., Anne Hendricks, L., Guadarrama, S., Rohrbach, M., Venugopalan, S., Saenko, K., & Darrell, T. (2015). Long-term recurrent convolutional networks for visual recognition and description. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2625\u20132634).","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"10014_CR13","doi-asserted-by":"crossref","unstructured":"Gordon, D., Kembhavi, A., Rastegari, M., Redmon, J., Fox, D., & Farhadi, A. (2018). Iqa: Visual question answering in interactive environments. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 4089\u20134098).","DOI":"10.1109\/CVPR.2018.00430"},{"key":"10014_CR14","doi-asserted-by":"crossref","unstructured":"Haarnoja, T., Ha, S., Zhou, A., Tan, J., Tucker, G., & Levine, S. (2018). Learning to walk via deep reinforcement learning. Robotics: Science and Systems 1\u201310.","DOI":"10.15607\/RSS.2019.XV.011"},{"key":"10014_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8\u20139","key":"10014_CR16","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1016\/j.ipl.2010.02.001","volume":"110","author":"S Hougardy","year":"2010","unstructured":"Hougardy, S. (2010). The Floyd\u2013Warshall algorithm on graphs with negative cycles. Information Processing Letters, 110(8\u20139), 279\u2013281.","journal-title":"Information Processing Letters"},{"key":"10014_CR17","doi-asserted-by":"crossref","unstructured":"Jayaraman, D., & Grauman, K. (2018a). End-to-end policy learning for active visual categorization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41(7), 1601\u20131614.","DOI":"10.1109\/TPAMI.2018.2840991"},{"key":"10014_CR18","doi-asserted-by":"crossref","unstructured":"Jayaraman, D., & Grauman, K. (2018b). Learning to look around: Intelligently exploring unseen environments for unknown tasks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1238\u20131247).","DOI":"10.1109\/CVPR.2018.00135"},{"key":"10014_CR19","doi-asserted-by":"crossref","unstructured":"Jeong, R., Aytar, Y., Khosid, D., Zhou, Y., Kay, J., Lampe, T., Bousmalis, K., & Nori, F. (2020). Self-supervised sim-to-real adaptation for visual robotic manipulation. In 2020 IEEE international conference on robotics and automation (ICRA) (pp. 2718\u20132724). IEEE.","DOI":"10.1109\/ICRA40945.2020.9197326"},{"key":"10014_CR20","doi-asserted-by":"crossref","unstructured":"Johnson, J., Karpathy, A., & Fei-Fei, L. (2016). Densecap: Fully convolutional localization networks for dense captioning. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 4565\u20134574).","DOI":"10.1109\/CVPR.2016.494"},{"issue":"1","key":"10014_CR21","doi-asserted-by":"publisher","first-page":"12","DOI":"10.1049\/ccs.2019.0025","volume":"2","author":"A Kattepur","year":"2020","unstructured":"Kattepur, A., & Purushotaman, B. (2020). Roboplanner: A pragmatic task planning framework for autonomous robots. Cognitive Computation and Systems, 2(1), 12\u201322.","journal-title":"Cognitive Computation and Systems"},{"key":"10014_CR22","unstructured":"Kolve, E., Mottaghi, R., Gordon, D., Zhu, Y., Gupta, A., & Farhadi, A. (2017). Ai2-thor: An interactive 3d environment for visual ai. arXiv preprint arXiv:1712.05474."},{"key":"10014_CR23","doi-asserted-by":"crossref","unstructured":"Kong, C., Lin, D., Bansal, M., Urtasun, R., & Fidler, S. (2014). What are you talking about? Text-to-image coreference. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 3558\u20133565).","DOI":"10.1109\/CVPR.2014.455"},{"key":"10014_CR24","doi-asserted-by":"crossref","unstructured":"Krause, J., Johnson, J., Krishna, R., & Fei-Fei, L. (2017). A hierarchical approach for generating descriptive image paragraphs. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 317\u2013325).","DOI":"10.1109\/CVPR.2017.356"},{"key":"10014_CR25","doi-asserted-by":"crossref","unstructured":"Li, G., Mueller, M., Casser, V., Smith, N., Michels, D. L., & Ghanem, B. (2019a). Oil: Observational imitation learning. Robotics: Science and Systems 1\u201310.","DOI":"10.15607\/RSS.2019.XV.005"},{"key":"10014_CR26","doi-asserted-by":"crossref","unstructured":"Li, H., Zhang, Q., & Zhao, D. (2019b). Deep reinforcement learning-based automatic exploration for navigation in unknown environment. IEEE Transactions on Neural Networks and Learning Systems 31(6), 2064\u20132076.","DOI":"10.1109\/TNNLS.2019.2927869"},{"key":"10014_CR27","doi-asserted-by":"crossref","unstructured":"Liang, X., Hu, Z., Zhang, H., Gan, C., & Xing, E. P. (2017). Recurrent topic-transition gan for visual paragraph generation. In Proceedings of the IEEE international conference on computer vision (pp. 3362\u20133371).","DOI":"10.1109\/ICCV.2017.364"},{"issue":"6","key":"10014_CR28","doi-asserted-by":"publisher","first-page":"2253","DOI":"10.1109\/TNNLS.2017.2785233","volume":"29","author":"H Liu","year":"2018","unstructured":"Liu, H., Wu, Y., & Sun, F. (2018). Extreme trust region policy optimization for active object recognition. IEEE Transactions on Neural Networks and Learning Systems, 29(6), 2253\u20132258.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"10014_CR29","unstructured":"Mikolov, T., Sutskever, I., Chen, K., Corrado, G. S., & Dean, J. (2013). Distributed representations of words and phrases and their compositionality. In C.J.C. Burges, L. Bottou, M. Welling, Z. Ghahramani & K.Q. Weinberger (Eds.), Proceedings of a Advances in neural information processing systems (pp. 3111\u20133119)."},{"key":"10014_CR30","unstructured":"Papineni, K., Roukos, S., Ward, T., & Zhu, W. J. (2002). A method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting on association for computational linguistics (pp. 311\u2013318). Association for Computational Linguistics."},{"key":"10014_CR31","doi-asserted-by":"crossref","unstructured":"Park, D. H., Darrell, T., & Rohrbach, A. (2019). Robust change captioning. In Proceedings of the IEEE international conference on computer vision (pp. 4624\u20134633).","DOI":"10.1109\/ICCV.2019.00472"},{"key":"10014_CR32","doi-asserted-by":"crossref","unstructured":"Peng, X. B., Andrychowicz, M., Zaremba, W., & Abbeel, P. (2018). Sim-to-real transfer of robotic control with dynamics randomization. In 2018 IEEE international conference on robotics and automation (ICRA) (pp. 3803\u20133810). IEEE.","DOI":"10.1109\/ICRA.2018.8460528"},{"key":"10014_CR33","doi-asserted-by":"crossref","unstructured":"Pintado, D., Sanchez, V., Adarve, E., Mata, M., Gogebakan, Z., Cabuk, B., Chiu, C., Zhan, J., Gewali, L., & Oh, P. (2019). Deep learning based shopping assistant for the visually impaired. In 2019 IEEE international conference on consumer electronics (ICCE) (pp. 1\u20136). IEEE.","DOI":"10.1109\/ICCE.2019.8662011"},{"key":"10014_CR34","doi-asserted-by":"crossref","unstructured":"Ramakrishnan, S. K., & Grauman, K. (2018). Sidekick policy learning for active visual exploration. In Proceedings of the European conference on computer vision (ECCV) (pp. 413\u2013430).","DOI":"10.1007\/978-3-030-01258-8_26"},{"issue":"30","key":"10014_CR35","doi-asserted-by":"publisher","first-page":"eaaw6326","DOI":"10.1126\/scirobotics.aaw6326","volume":"4","author":"SK Ramakrishnan","year":"2019","unstructured":"Ramakrishnan, S. K., Jayaraman, D., & Grauman, K. (2019). Emergence of exploratory look-around behaviors through active observation completion. Science Robotics, 4(30), eaaw6326.","journal-title":"Science Robotics"},{"key":"10014_CR36","unstructured":"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards real-time object detection with region proposal networks. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama & R. Garnett (Eds.), Proceedings of Advances in neural information processing systems (pp. 91\u201399)."},{"key":"10014_CR37","doi-asserted-by":"crossref","unstructured":"Sadeghi, F. (2019) Divis: Domain invariant visual servoing for collision-free goal reaching. Robotics: Science and Systems 1\u201310.","DOI":"10.15607\/RSS.2019.XV.055"},{"issue":"10","key":"10014_CR38","doi-asserted-by":"publisher","first-page":"3047","DOI":"10.1109\/TNNLS.2018.2851077","volume":"30","author":"J Song","year":"2018","unstructured":"Song, J., Guo, Y., Gao, L., Li, X., Hanjalic, A., & Shen, H. T. (2018). From deterministic to generative: Multimodal stochastic rnns for video captioning. IEEE Transactions on Neural Networks and Learning Systems, 30(10), 3047\u20133058.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"issue":"2","key":"10014_CR39","doi-asserted-by":"publisher","first-page":"367","DOI":"10.1007\/s10514-016-9587-8","volume":"41","author":"F Stramandinoli","year":"2017","unstructured":"Stramandinoli, F., Marocco, D., & Cangelosi, A. (2017). Making sense of words: A robotic model for language abstraction. Autonomous Robots, 41(2), 367\u2013383.","journal-title":"Autonomous Robots"},{"issue":"4","key":"10014_CR40","doi-asserted-by":"publisher","first-page":"913","DOI":"10.1007\/s10514-018-9762-1","volume":"43","author":"W Takano","year":"2019","unstructured":"Takano, W., Yamada, Y., & Nakamura, Y. (2019). Linking human motions and objects to language for synthesizing action sentences. Autonomous Robots, 43(4), 913\u2013925.","journal-title":"Autonomous Robots"},{"key":"10014_CR41","doi-asserted-by":"crossref","unstructured":"Tobin, J., Fong, R., Ray, A., Schneider, J., Zaremba, W., & Abbeel, P. (2017). Domain randomization for transferring deep neural networks from simulation to the real world. In 2017 IEEE\/RSJ international conference on intelligent robots and systems (IROS) (pp. 23\u201330). IEEE.","DOI":"10.1109\/IROS.2017.8202133"},{"key":"10014_CR42","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence Zitnick, C., & Parikh, D. (2015). Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 4566\u20134575).","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"10014_CR43","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., & Erhan, D. (2015). Show and tell: A neural image caption generator. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 3156\u20133164).","DOI":"10.1109\/CVPR.2015.7298935"},{"issue":"4","key":"10014_CR44","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1109\/TPAMI.2016.2587640","volume":"39","author":"O Vinyals","year":"2016","unstructured":"Vinyals, O., Toshev, A., Bengio, S., & Erhan, D. (2016). Show and tell: Lessons learned from the 2015 mscoco image captioning challenge. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39(4), 652\u2013663.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"3","key":"10014_CR45","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1007\/s10514-016-9595-8","volume":"41","author":"A Wachaja","year":"2017","unstructured":"Wachaja, A., Agarwal, P., Zink, M., Adame, M. R., M\u00f6ller, K., & Burgard, W. (2017). Navigating blind people with walking impairments using a smart walker. Autonomous Robots, 41(3), 555\u2013573.","journal-title":"Autonomous Robots"},{"key":"10014_CR46","doi-asserted-by":"crossref","unstructured":"Wang, X., Huang, Q., Celikyilmaz, A., Gao, J., Shen, D., Wang, Y. F., Wang, W. Y., & Zhang, L. (2019). Reinforced cross-modal matching and self-supervised imitation learning for vision-language navigation. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6629\u20136638).","DOI":"10.1109\/CVPR.2019.00679"},{"key":"10014_CR47","doi-asserted-by":"crossref","unstructured":"Wijmans, E., Datta, S., Maksymets, O., Das, A., Gkioxari, G., Lee, S., Essa, I., Parikh, D., & Batra, D. (2019). Embodied question answering in photorealistic environments with point cloud perception. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6659\u20136668).","DOI":"10.1109\/CVPR.2019.00682"},{"key":"10014_CR48","doi-asserted-by":"crossref","unstructured":"Wu, Y., Jiang, L., & Yang, Y. (2019). Revisiting embodiedqa: A simple baseline and beyond. arXiv preprint arXiv:1904.04166.","DOI":"10.1109\/TIP.2020.2967584"},{"key":"10014_CR49","unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., & Bengio, Y. (2015). Show, attend and tell: Neural image caption generation with visual attention. In International conference on machine learning (pp. 2048\u20132057)."},{"key":"10014_CR50","unstructured":"Yang, J., Ren, Z., Xu, M., Chen, X., Crandall, D., Parikh, D., & Batra, D. (2019). Embodied visual recognition. arXiv preprint arXiv:1904.04404."},{"key":"10014_CR51","doi-asserted-by":"crossref","unstructured":"Ye, X., Lin, Z., Lee, J. Y., Zhang, J., Zheng, S., & Yang, Y. (2019). Gaple: Generalizable approaching policy learning for robotic object searching in indoor environment. IEEE Robotics and Automation Letters 4(4), 4003\u20134010.","DOI":"10.1109\/LRA.2019.2930426"},{"key":"10014_CR52","doi-asserted-by":"crossref","unstructured":"You, Q., Jin, H., Wang, Z., Fang, C., & Luo, J. (2016). Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 4651\u20134659).","DOI":"10.1109\/CVPR.2016.503"},{"key":"10014_CR53","doi-asserted-by":"crossref","unstructured":"Yu, L., Chen, X., Gkioxari, G., Bansal, M., Berg, T. L., & Batra, D. (2019). Multi-target embodied question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6309\u20136318).","DOI":"10.1109\/CVPR.2019.00647"},{"key":"10014_CR54","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., & Jia, J. (2017). Pyramid scene parsing network. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2881\u20132890).","DOI":"10.1109\/CVPR.2017.660"},{"issue":"5","key":"10014_CR55","doi-asserted-by":"publisher","first-page":"1271","DOI":"10.1007\/s10514-018-9793-7","volume":"43","author":"J Zhong","year":"2019","unstructured":"Zhong, J., Peniak, M., Tani, J., Ogata, T., & Cangelosi, A. (2019). Sensorimotor input as a language generalisation tool: A neurorobotics model for generation and generalisation of noun-verb combinations with sensorimotor inputs. Autonomous Robots, 43(5), 1271\u20131290.","journal-title":"Autonomous Robots"},{"key":"10014_CR56","doi-asserted-by":"crossref","unstructured":"Zhu, Y., Mottaghi, R., Kolve, E., Lim, J. J., Gupta, A., Fei-Fei, L., & Farhadi, A. (2017). Target-driven visual navigation in indoor scenes using deep reinforcement learning. In 2017 IEEE international conference on robotics and automation (ICRA) (pp. 3357\u20133364). IEEE.","DOI":"10.1109\/ICRA.2017.7989381"}],"container-title":["Autonomous Robots"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-021-10014-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10514-021-10014-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10514-021-10014-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,2,5]],"date-time":"2022-02-05T09:07:57Z","timestamp":1644052077000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10514-021-10014-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10,21]]},"references-count":56,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2022,1]]}},"alternative-id":["10014"],"URL":"https:\/\/doi.org\/10.1007\/s10514-021-10014-9","relation":{},"ISSN":["0929-5593","1573-7527"],"issn-type":[{"value":"0929-5593","type":"print"},{"value":"1573-7527","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,10,21]]},"assertion":[{"value":"3 January 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 July 2021","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 October 2021","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The author decalres that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Informed consent was obtained from all individual participants included in this study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed consent"}}]}}