{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,30]],"date-time":"2026-03-30T17:46:28Z","timestamp":1774892788292,"version":"3.50.1"},"reference-count":68,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T00:00:00Z","timestamp":1666915200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T00:00:00Z","timestamp":1666915200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["OIA-1946391"],"award-info":[{"award-number":["OIA-1946391"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2023,1]]},"DOI":"10.1007\/s11263-022-01702-9","type":"journal-article","created":{"date-parts":[[2022,10,28]],"date-time":"2022-10-28T21:02:40Z","timestamp":1666990960000},"page":"302-323","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":31,"title":["AOE-Net: Entities Interactions Modeling with Adaptive Attention Mechanism for Temporal Action Proposals Generation"],"prefix":"10.1007","volume":"131","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0277-7094","authenticated-orcid":false,"given":"Khoa","family":"Vo","sequence":"first","affiliation":[]},{"given":"Sang","family":"Truong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6569-6860","authenticated-orcid":false,"given":"Kashu","family":"Yamazaki","sequence":"additional","affiliation":[]},{"given":"Bhiksha","family":"Raj","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3046-3041","authenticated-orcid":false,"given":"Minh-Triet","family":"Tran","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2571-0511","authenticated-orcid":false,"given":"Ngan","family":"Le","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,28]]},"reference":[{"key":"1702_CR1","doi-asserted-by":"crossref","unstructured":"Lin, T., Zhao, X., Su, H., Wang, C., & Yang, M. (2018). Bsn: Boundary sensitive network for temporal action proposal generation. In ECCV.","DOI":"10.1007\/978-3-030-01225-0_1"},{"key":"1702_CR2","doi-asserted-by":"crossref","unstructured":"Su, H., Gan, W., Wu, W., Yan, J., & Qiao, Y. (2020). BSN++: complementary boundary regressor with scale-balanced relation modeling for temporal action proposal generation. In ACCV.","DOI":"10.1609\/aaai.v35i3.16363"},{"key":"1702_CR3","doi-asserted-by":"crossref","unstructured":"Lin, T., Liu, X., Li, X., Ding, E., & Wen, S. (2019). Bmn: Boundary-matching network for temporal action proposal generation. In ICCV.","DOI":"10.1109\/ICCV.2019.00399"},{"key":"1702_CR4","doi-asserted-by":"crossref","unstructured":"Lin, C., Li, J., Wang, Y., Tai, Y., Luo, D., Cui, Z., Wang, C., Li, J., Huang, F., & Ji, R. (2020). Fast learning of temporal action proposal via dense boundary generator. AAAI, 11499\u201311506.","DOI":"10.1609\/aaai.v34i07.6815"},{"key":"1702_CR5","doi-asserted-by":"crossref","unstructured":"Xu, M., Zhao, C., Rojas, D. S., Thabet, A., & Ghanem, B. (2020). G-tad: Sub-graph localization for temporal action detection. In CVPR.","DOI":"10.1109\/CVPR42600.2020.01017"},{"key":"1702_CR6","doi-asserted-by":"publisher","first-page":"126431","DOI":"10.1109\/ACCESS.2021.3110973","volume":"9","author":"K Vo","year":"2021","unstructured":"Vo, K., Yamazaki, K., Truong, S., Tran, M.-T., Sugimoto, A., & Le, N. (2021). Abn: Agent-aware boundary networks for temporal action proposal generation. IEEE Access, 9, 126431\u2013126445.","journal-title":"IEEE Access"},{"key":"1702_CR7","doi-asserted-by":"crossref","unstructured":"Vo-Ho, V.-K., Le, N., Kamazaki, K., Sugimoto, A., & Tran, M.-T. (2021). Agent-environment network for temporal action proposal generation. In ICASSP, pp. 2160\u20132164.","DOI":"10.1109\/ICASSP39728.2021.9415101"},{"key":"1702_CR8","doi-asserted-by":"crossref","unstructured":"Shou, Z., Wang, D., & Chang, S.-F. (2016). Temporal action localization in untrimmed videos via multi-stage cnns. In CVPR.","DOI":"10.1109\/CVPR.2016.119"},{"key":"1702_CR9","doi-asserted-by":"crossref","unstructured":"Gao, J., Yang, Z., & Nevatia, R. (2017). Cascaded boundary regression for temporal action detection. arXiv e-prints, 1705\u201301180. arXiv:1705.01180 [cs.CV].","DOI":"10.5244\/C.31.52"},{"key":"1702_CR10","doi-asserted-by":"crossref","unstructured":"Gao, J., Chen, K., & Nevatia, R. (2018). Ctap: Complementary temporal action proposal generation. In ECCV.","DOI":"10.1007\/978-3-030-01216-8_5"},{"key":"1702_CR11","doi-asserted-by":"crossref","unstructured":"Gao, J., Ge, R., Chen, K., & Nevatia, R. (2018). Motion-appearance co-memory networks for video question answering. In CVPR.","DOI":"10.1109\/CVPR.2018.00688"},{"key":"1702_CR12","doi-asserted-by":"crossref","unstructured":"Fabian Caba Heilbron, B. G. Victor Escorcia, & Niebles, J. C. (2015). Activitynet: A large-scale video benchmark for human activity understanding. In CVPR, pp. 961\u2013970.","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"1702_CR13","unstructured":"Jiang, Y.-G., Liu, J., Roshan Zamir, A., Toderici, G., Laptev, I., Shah, M., & Sukthankar, R. (2014). THUMOS Challenge: Action recognition with a large number of classes. http:\/\/crcv.ucf.edu\/THUMOS14\/."},{"key":"1702_CR14","doi-asserted-by":"crossref","unstructured":"Krishna, R., Hata, K., Ren, F., Fei-Fei, L., & Carlos Niebles, J. (2017). Dense-captioning events in videos. In ICCV, pp. 706\u2013715.","DOI":"10.1109\/ICCV.2017.83"},{"key":"1702_CR15","unstructured":"Kay, W., Carreira, J., , et al. (2017). The kinetics human action video dataset. arXiv preprint arXiv:1705.06950."},{"key":"1702_CR16","doi-asserted-by":"crossref","unstructured":"Richard, A., & Gall, J. (2016). Temporal action detection using a statistical language model. In CVPR.","DOI":"10.1109\/CVPR.2016.341"},{"key":"1702_CR17","doi-asserted-by":"crossref","unstructured":"Chao, Y., Vijayanarasimhan, S., Seybold, B., Ross, D. A., Deng, J., & Sukthankar, R. (2018). Rethinking the faster r-cnn architecture for temporal action localization. In CVPR, pp. 1130\u20131139.","DOI":"10.1109\/CVPR.2018.00124"},{"key":"1702_CR18","doi-asserted-by":"crossref","unstructured":"Heilbron, F. C., Niebles, J. C., & Ghanem, B. (2016). Fast temporal activity proposals for efficient detection of human actions in untrimmed videos. In CVPR.","DOI":"10.1109\/CVPR.2016.211"},{"key":"1702_CR19","doi-asserted-by":"crossref","unstructured":"Gao, J., Yang, Z., Chen, K., Sun, C., & Nevatia, R. (2017). Turn tap: Temporal unit regression network for temporal action proposals. In ICCV.","DOI":"10.1109\/ICCV.2017.392"},{"issue":"1","key":"1702_CR20","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"S Ji","year":"2013","unstructured":"Ji, S., Xu, W., Yang, M., & Yu, K. (2013). 3d convolutional neural networks for human action recognition. IEEE TPAMI, 35(1), 221\u2013231.","journal-title":"IEEE TPAMI"},{"key":"1702_CR21","doi-asserted-by":"crossref","unstructured":"Carreira, J., & Zisserman, A. (2017). Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, pp. 6299\u20136308.","DOI":"10.1109\/CVPR.2017.502"},{"key":"1702_CR22","unstructured":"Simonyan, K., & Zisserman, A. (2014). Two-stream convolutional networks for action recognition in videos. In NIPS. NIPS\u201914, pp. 568\u2013576. MIT Press, Cambridge, MA, USA."},{"key":"1702_CR23","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., & He, K. (2019). Slowfast networks for video recognition. In ICCV.","DOI":"10.1109\/ICCV.2019.00630"},{"key":"1702_CR24","doi-asserted-by":"crossref","unstructured":"Mei, T., Zhang, W., & Yao, T. (2020). Vision and language: from visual perception to content creation. APSIPA TSIP 9.","DOI":"10.1017\/ATSIP.2020.10"},{"key":"1702_CR25","doi-asserted-by":"crossref","unstructured":"Anderson, P., He, X., Buehler, C., Teney, D., Johnson, M., Gould, S., & Zhang, L. (2018). Bottom-up and top-down attention for image captioning and visual question answering. In CVPR, pp. 6077\u20136086","DOI":"10.1109\/CVPR.2018.00636"},{"key":"1702_CR26","unstructured":"Radford, A., Kim, J. W., et al. (2021). Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020."},{"key":"1702_CR27","doi-asserted-by":"crossref","unstructured":"Heilbron, F. C., Niebles, J. C., & Ghanem, B. (2016). Fast temporal activity proposals for efficient detection of human actions in untrimmed videos. In CVPR, pp. 1914\u20131923.","DOI":"10.1109\/CVPR.2016.211"},{"key":"1702_CR28","unstructured":"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards real-time object detection with region proposal networks. In NeurIPS, pp. 91\u201399."},{"key":"1702_CR29","doi-asserted-by":"crossref","unstructured":"Lin, T., Goyal, P., Girshick, R., He, K., & Doll\u00f6r, P. (2017). Focal loss for dense object detection. In ICCV, pp. 2999\u20133007","DOI":"10.1109\/ICCV.2017.324"},{"key":"1702_CR30","unstructured":"Redmon, J., & Farhadi, A. (2018). Yolov3: An incremental improvement. arXiv."},{"key":"1702_CR31","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., & Paluri, M. (2015). Learning spatiotemporal features with 3d convolutional networks. In ICCV, pp. 4489\u20134497.","DOI":"10.1109\/ICCV.2015.510"},{"key":"1702_CR32","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xiong, Y., Wang, L., Wu, Z., Tang, X., & Lin, D. (2017). Temporal action detection with structured segment networks. In ICCV.","DOI":"10.1109\/ICCV.2017.317"},{"key":"1702_CR33","doi-asserted-by":"crossref","unstructured":"Liu, Y., Ma, L., Zhang, Y., Liu, W., & Chang, S.-F. (2019). Multi-granularity generator for temporal action proposal. In CVPR.","DOI":"10.1109\/CVPR.2019.00372"},{"issue":"11","key":"1702_CR34","doi-asserted-by":"publisher","first-page":"1254","DOI":"10.1109\/34.730558","volume":"20","author":"L Itti","year":"1998","unstructured":"Itti, L., Koch, C., & Niebur, E. (1998). A model of saliency-based visual attention for rapid scene analysis. IEEE TPAMI, 20(11), 1254\u20131259.","journal-title":"IEEE TPAMI"},{"issue":"5","key":"1702_CR35","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3465055","volume":"12","author":"S Chaudhari","year":"2021","unstructured":"Chaudhari, S., Mithal, V., Polatkan, G., & Ramanath, R. (2021). An attentive survey of attention models. TIST, 12(5), 1\u201332.","journal-title":"TIST"},{"key":"1702_CR36","unstructured":"Bahdanau, D., Cho, K., & Bengio, Y. (2014). Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473."},{"issue":"11","key":"1702_CR37","doi-asserted-by":"publisher","first-page":"1875","DOI":"10.1109\/TMM.2015.2477044","volume":"17","author":"K Cho","year":"2015","unstructured":"Cho, K., Courville, A., & Bengio, Y. (2015). Describing multimedia content using attention-based encoder-decoder networks. IEEE Transactions on Multimedia, 17(11), 1875\u20131886.","journal-title":"IEEE Transactions on Multimedia"},{"key":"1702_CR38","doi-asserted-by":"crossref","unstructured":"Galassi, A., Lippi, M., & Torroni, P. (2020). Attention in natural language processing. IEEE Transactions on Neural Networks and Learning Systems.","DOI":"10.1109\/TNNLS.2020.3019893"},{"key":"1702_CR39","unstructured":"Chaudhari, S., Polatkan, G., Ramanath, R., & Mithal, V. (2019). An attentive survey of attention models. arXiv preprint arXiv:1904.02874."},{"key":"1702_CR40","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L. U., & Polosukhin, I. (2017). Attention is all you need. In NeurIPS. Curran Associates, Inc."},{"key":"1702_CR41","unstructured":"Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., & Bengio, Y. (2015). Show, attend and tell: Neural image caption generation with visual attention. In ICML, pp. 2048\u20132057. PMLR."},{"key":"1702_CR42","unstructured":"Elsayed, G., Kornblith, S., & Le, Q. V. (2019). Saccader: Improving accuracy of hard attention models for vision. In NeurIPS."},{"key":"1702_CR43","doi-asserted-by":"crossref","unstructured":"Patro, B., & Namboodiri, V. P. (2018). Differential attention for visual question answering. In CVPR, pp. 7680\u20137688.","DOI":"10.1109\/CVPR.2018.00801"},{"key":"1702_CR44","doi-asserted-by":"crossref","unstructured":"Long, F., Yao, T., Qiu, Z., Tian, X., Luo, J., & Mei, T. (2019). Gaussian temporal awareness networks for action localization. In CVPR, pp. 344\u2013353.","DOI":"10.1109\/CVPR.2019.00043"},{"key":"1702_CR45","unstructured":"Liu, S., Zhao, X., Su, H., & Hu, Z. (2020). Tsi: Temporal scale invariant network for action proposal generation. In ACCV."},{"key":"1702_CR46","doi-asserted-by":"crossref","unstructured":"Bai, Y., Wang, Y., Tong, Y., Yang, Y., Liu, Q., & Liu, J. (2020). Boundary content graph neural network for temporal action proposal generation. In ECCV, pp. 121\u2013137. Springer.","DOI":"10.1007\/978-3-030-58604-1_8"},{"key":"1702_CR47","doi-asserted-by":"crossref","unstructured":"Tan, J., Tang, J., Wang, L., & Wu, G. (2021). Relaxed transformer decoders for direct action proposal generation. ICCV.","DOI":"10.1109\/ICCV48922.2021.01327"},{"key":"1702_CR48","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask r-cnn. In ICCV.","DOI":"10.1109\/ICCV.2017.322"},{"key":"1702_CR49","doi-asserted-by":"publisher","unstructured":"Sennrich, R., Haddow, B., & Birch, A. (2016). Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1715\u20131725. Association for Computational Linguistics, Berlin, Germany. https:\/\/doi.org\/10.18653\/v1\/P16-1162. https:\/\/aclanthology.org\/P16-1162.","DOI":"10.18653\/v1\/P16-1162"},{"key":"1702_CR50","unstructured":"Dosovitskiy, A., Beyer, L., et al. (2021). An image is worth 16x16 words: Transformers for image recognition at scale. CVPR."},{"key":"1702_CR51","doi-asserted-by":"crossref","unstructured":"Malinowski, M., Doersch, C., Santoro, A., & Battaglia, P. (2018). Learning visual question answering by bootstrapping hard attention. In ECCV, pp. 3\u201320.","DOI":"10.1007\/978-3-030-01231-1_1"},{"key":"1702_CR52","unstructured":"Vo, K., Joo, H., Yamazaki, K., Truong, S., Kitani, K., Tran, M.-T., & Le, N. (2021). Aei: Actors-environment interaction with adaptive attention for temporal action proposals generation. In 32nd British Machine Vision Conference 2021, BMVC 2021, Virtual Event, UK, November 22-25, 2021. https:\/\/www.bmvc2021-virtualconference.com\/assets\/papers\/1095.pdf."},{"key":"1702_CR53","doi-asserted-by":"crossref","unstructured":"Bodla, N., Singh, B., Chellappa, R., & Davis, L. S. (2017). Soft-nms \u2013 improving object detection with one line of code. In ICCV.","DOI":"10.1109\/ICCV.2017.593"},{"key":"1702_CR54","doi-asserted-by":"crossref","unstructured":"Neubeck, A., & Van Gool, L. (2006). Efficient non-maximum suppression. In ICPR, vol. 3, pp. 850\u2013855.","DOI":"10.1109\/ICPR.2006.479"},{"key":"1702_CR55","doi-asserted-by":"crossref","unstructured":"Damen, D., Doughty, H., et al. (2021). Rescaling egocentric vision: Collection, pipeline and challenges for epic-kitchens-100. IJVC, 1\u201323.","DOI":"10.1007\/s11263-021-01531-2"},{"key":"1702_CR56","doi-asserted-by":"crossref","unstructured":"Zhao, P., Xie, L., Ju, C., Zhang, Y., Wang, Y., & Tian, Q. (2020). Bottom-up temporal action localization with mutual regularization. In ECCV, pp. 539\u2013555. Springer.","DOI":"10.1007\/978-3-030-58598-3_32"},{"key":"1702_CR57","doi-asserted-by":"crossref","unstructured":"Dai, X., Singh, B., Zhang, G., Davis, L. S., & Qiu Chen, Y. (2017) Temporal context network for activity localization in videos. In ICCV.","DOI":"10.1109\/ICCV.2017.610"},{"key":"1702_CR58","unstructured":"Yao, T., Li, Y., Qiu, Z., Long, F., Pan, Y., Li, D., & Mei, T. (2017). Msr asia msm at activitynet challenge 2017: Trimmed action recognition, temporal action proposals and densecaptioning events in videos. In CVPR Workshops."},{"key":"1702_CR59","doi-asserted-by":"crossref","unstructured":"Buch, S., Escorcia, V., Ghanem, B., Fei-Fei, L., & Niebles, J. C. (2017). End-to-end, single-stream temporal action detection in untrimmed videos. In BMVC","DOI":"10.5244\/C.31.93"},{"key":"1702_CR60","doi-asserted-by":"crossref","unstructured":"Eun, H., Lee, S., Moon, J., Park, J., Jung, C., & Kim, C. (2019). Srg: Snippet relatedness-based temporal action proposal generator. IEEE Transactions on Circuits and Systems for Video Technology, p. 1.","DOI":"10.1109\/TCSVT.2019.2953187"},{"key":"1702_CR61","doi-asserted-by":"crossref","unstructured":"Wang, X., Zhang, S., Qing, Z., Shao, Y., Gao, C., & Sang, N. (2021). Self-supervised learning for semi-supervised temporal action proposal. In CVPR, pp. 1905\u20131914.","DOI":"10.1109\/CVPR46437.2021.00194"},{"key":"1702_CR62","doi-asserted-by":"crossref","unstructured":"Qing, Z., Su, H., Gan, W., Wang, D., Wu, W., Wang, X., Qiao, Y., Yan, J., Gao, C., & Sang, N. (2021). Temporal context aggregation network for temporal action proposal refinement. In CVPR, pp. 485\u2013494.","DOI":"10.1109\/CVPR46437.2021.00055"},{"key":"1702_CR63","doi-asserted-by":"crossref","unstructured":"Zheng, J., Chen, D., & Hu, H. (2021). Boundary adjusted network based on cosine similarity for temporal action proposal generation. Neural Processing Letters, 1\u201316.","DOI":"10.1007\/s11063-021-10500-2"},{"key":"1702_CR64","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollar, P., & Zitnick, L. (2014). Microsoft coco: Common objects in context. In ECCV.","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"1702_CR65","doi-asserted-by":"crossref","unstructured":"Gao, J., Shi, Z., Wang, G., Li, J., Yuan, Y., Ge, S., & Zhou, X. (2020). Accurate temporal action proposal generation with relation-aware pyramid network. In AAAI, vol. 34, pp. 10810\u201310817.","DOI":"10.1609\/aaai.v34i07.6711"},{"key":"1702_CR66","doi-asserted-by":"crossref","unstructured":"Zeng, R., Huang, W., Tan, M., Rong, Y., Zhao, P., Huang, J., & Gan, C. (2019). Graph convolutional networks for temporal action localization. In ICCV, pp. 7094\u20137103.","DOI":"10.1109\/ICCV.2019.00719"},{"key":"1702_CR67","unstructured":"Xiong, Y., Wang, L., Wang, Z., Zhang, B., Song, H., Li, W., Lin, D., Qiao, Y., Gool, L. V., & Tang, X. (2016). CUHK & ETHZ & SIAT submission to activitynet challenge 2016. CoRR arXiv:1608.00797."},{"key":"1702_CR68","doi-asserted-by":"crossref","unstructured":"Wang, L., Xiong, Y., Lin, D., & Van Gool, L. (2017). Untrimmednets for weakly supervised action recognition and detection. In CVPR.","DOI":"10.1109\/CVPR.2017.678"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-022-01702-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-022-01702-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-022-01702-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,6]],"date-time":"2023-01-06T03:30:10Z","timestamp":1672975810000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-022-01702-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,28]]},"references-count":68,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2023,1]]}},"alternative-id":["1702"],"URL":"https:\/\/doi.org\/10.1007\/s11263-022-01702-9","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10,28]]},"assertion":[{"value":"21 January 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 September 2022","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 October 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}