{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T10:57:30Z","timestamp":1756810650056,"version":"3.37.3"},"reference-count":65,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2021,9,15]],"date-time":"2021-09-15T00:00:00Z","timestamp":1631664000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,9,15]],"date-time":"2021-09-15T00:00:00Z","timestamp":1631664000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2018YFC0831500"],"award-info":[{"award-number":["2018YFC0831500"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["grant no.61972047"],"award-info":[{"award-number":["grant no.61972047"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"NSFC-General Technology Basic Research Joint Funds","award":["grant no. U1936220"],"award-info":[{"award-number":["grant no. U1936220"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2022,2]]},"DOI":"10.1007\/s11042-021-11466-y","type":"journal-article","created":{"date-parts":[[2021,9,15]],"date-time":"2021-09-15T03:26:35Z","timestamp":1631676395000},"page":"4909-4934","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["A Multimodal Approach for Multiple-Relation Extraction in Videos"],"prefix":"10.1007","volume":"81","author":[{"given":"Zihe","family":"Liu","sequence":"first","affiliation":[]},{"given":"Weiying","family":"Hou","sequence":"additional","affiliation":[]},{"given":"Jiayi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chenyu","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Bin","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,15]]},"reference":[{"key":"11466_CR1","doi-asserted-by":"crossref","unstructured":"Aimar ES, Radeva P, Dimiccoli M\u00a0(2019) Social relation recognition in egocentric photostreams. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 3227\u20133231. IEEE.","DOI":"10.1109\/ICIP.2019.8803634"},{"key":"11466_CR2","doi-asserted-by":"crossref","unstructured":"Arandjelovic R, Gronat P, Torii A, Pajdla T, Sivic J\u00a0(2016) Netvlad: Cnn architecture for weakly supervised place recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5297\u20135307","DOI":"10.1109\/CVPR.2016.572"},{"key":"11466_CR3","doi-asserted-by":"crossref","unstructured":"Barr JR, Cament LA, Bowyer KW, Flynn PJ\u00a0(2014) Active clustering with ensembles for social structure extraction. In: IEEE Winter Conference on Applications of Computer Vision, pp. 969\u2013976. IEEE","DOI":"10.1109\/WACV.2014.6835999"},{"key":"11466_CR4","doi-asserted-by":"crossref","unstructured":"Carreira J, Zisserman A\u00a0(2017) Quo vadis, action recognition? A new model and the kinetics dataset. CoRR abs\/1705.07750.\u00a0http:\/\/arxiv.org\/abs\/1705.07750","DOI":"10.1109\/CVPR.2017.502"},{"key":"11466_CR5","doi-asserted-by":"crossref","unstructured":"Carreira J, Zisserman A\u00a0(2017) Quo vadis, action recognition? a new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308","DOI":"10.1109\/CVPR.2017.502"},{"key":"11466_CR6","doi-asserted-by":"crossref","unstructured":"Chen YY, Hsu WH, Liao HYM (2012) Discovering informative social subgraphs and predicting pairwise relationships from group photos. In: Proceedings of the 20th ACM international conference on Multimedia, pp. 669\u2013678","DOI":"10.1145\/2393347.2393439"},{"key":"11466_CR7","unstructured":"Chiu YI, Huang CR, Chung PC\u00a0(2013) Character relationship analysis in movies using face tracks. In: MVA, pp. 431\u2013434"},{"key":"11466_CR8","doi-asserted-by":"crossref","unstructured":"Dai P, Lv J, Wu B\u00a0(2019) Two-stage model for social relationship understanding from videos. In: 2019 IEEE International Conference on Multimedia and Expo (ICME), pp. 1132\u20131137. IEEE","DOI":"10.1109\/ICME.2019.00198"},{"key":"11466_CR9","doi-asserted-by":"crossref","unstructured":"Dai Q, Carr P, Sigal L, Hoiem D(2015) Family member identification from photo collections. In: 2015 IEEE Winter Conference on Applications of Computer Vision, pp. 982\u2013989. IEEE","DOI":"10.1109\/WACV.2015.136"},{"key":"11466_CR10","doi-asserted-by":"crossref","unstructured":"Deng J, Guo J, Xue N, Zafeiriou S\u00a0(2019) Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4690\u20134699","DOI":"10.1109\/CVPR.2019.00482"},{"key":"11466_CR11","doi-asserted-by":"crossref","unstructured":"Deng J, Guo J, Zhou Y, Yu J, Kotsia I, Zafeiriou S\u00a0(2019) Retinaface: Single-stage dense face localisation in the wild. arXiv preprint arXiv:1905.00641","DOI":"10.1109\/CVPR42600.2020.00525"},{"key":"11466_CR12","doi-asserted-by":"crossref","unstructured":"Dibeklioglu H\u00a0(2017) Visual transformation aided contrastive learning for video-based kinship verification. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2459\u20132468","DOI":"10.1109\/ICCV.2017.269"},{"key":"11466_CR13","doi-asserted-by":"crossref","unstructured":"Ding L, Yilmaz A\u00a0(2010) Learning relations among movie characters: A social network perspective. In: European conference on computer vision, pp. 410\u2013423. Springer","DOI":"10.1007\/978-3-642-15561-1_30"},{"key":"11466_CR14","doi-asserted-by":"crossref","unstructured":"Ding L, Yilmaz A\u00a0(2011) Inferring social relations from visual concepts. In: 2011 International Conference on Computer Vision, pp. 699\u2013706. IEEE","DOI":"10.1109\/ICCV.2011.6126306"},{"key":"11466_CR15","unstructured":"Feichtenhofer C, Fan H, Malik J, He K\u00a0(2018) Slowfast networks for video recognition. CoRR abs\/1812.03982.\u00a0http:\/\/arxiv.org\/abs\/1812.03982"},{"key":"11466_CR16","doi-asserted-by":"crossref","unstructured":"Feichtenhofer C, Fan H, Malik J, He K\u00a0(2019) Slowfast networks for video recognition. In: Proceedings of the IEEE international conference on computer vision, pp. 6202\u20136211","DOI":"10.1109\/ICCV.2019.00630"},{"key":"11466_CR17","unstructured":"Feng F, Yang Y, Cer D, Arivazhagan N, Wang W\u00a0(2020) Language-agnostic bert sentence embedding. arXiv preprint arXiv:2007.01852"},{"key":"11466_CR18","doi-asserted-by":"crossref","unstructured":"Goel A, Ma KT, Tan C (2019) An end-to-end network for generating social relationship graphs. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 11186\u201311195","DOI":"10.1109\/CVPR.2019.01144"},{"key":"11466_CR19","doi-asserted-by":"crossref","unstructured":"Golder S (2008) Measuring social networks with digital photograph collections. In: Proceedings of the nineteenth ACM conference on Hypertext and hypermedia, pp. 43\u201348","DOI":"10.1145\/1379092.1379104"},{"key":"11466_CR20","doi-asserted-by":"crossref","unstructured":"He K, Gkioxari G, Doll\u00e1r P, Girshick R\u00a0(2017) Mask r-cnn. In: Proceedings of the IEEE international conference on computer vision, pp. 2961\u20132969","DOI":"10.1109\/ICCV.2017.322"},{"key":"11466_CR21","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J\u00a0(2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"11466_CR22","unstructured":"Huang G, Mattar MA, Berg TL, Learned-Miller E\u00a0(2008) Labeled faces in the wild: A database forstudying face recognition in unconstrained environments"},{"issue":"11","key":"11466_CR23","doi-asserted-by":"publisher","first-page":"3137","DOI":"10.1109\/TMM.2018.2823900","volume":"20","author":"YG Jiang","year":"2018","unstructured":"Jiang YG, Wu Z, Tang J, Li Z, Xue X, Chang SF (2018)\u00a0Modeling multimodal clues in a hybrid deep learning framework for video classification. IEEE Trans Multimed 20(11):3137\u20133147","journal-title":"IEEE Transactions on Multimedia"},{"key":"11466_CR24","doi-asserted-by":"crossref","unstructured":"Kampman O, Barezi EJ, Bertero D, Fung P\u00a0(2018) Investigating audio, video, and text fusion methods for end-to-end automatic personality prediction. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 606\u2013611","DOI":"10.18653\/v1\/P18-2096"},{"issue":"4","key":"11466_CR25","doi-asserted-by":"publisher","first-page":"779","DOI":"10.1007\/s11760-020-01796-z","volume":"15","author":"K Kanagaraj","year":"2021","unstructured":"Kanagaraj K, Priya GGL (2021)\u00a0A new 3d convolutional neural network (3d-cnn) framework for multimedia event detection. Signal Image Video Process 15(4):779\u2013787","journal-title":"Signal, Image and Video Processing"},{"key":"11466_CR26","doi-asserted-by":"crossref","unstructured":"Kemelmacher-Shlizerman I, Seitz S, Miller D, Brossard E\u00a0(2016) The megaface benchmark: 1 million faces for recognition at scale. 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) pp. 4873\u20134882","DOI":"10.1109\/CVPR.2016.527"},{"key":"11466_CR27","doi-asserted-by":"crossref","unstructured":"Khademi M\u00a0(2020) Multimodal neural graph memory networks for visual question answering. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7177\u20137188","DOI":"10.18653\/v1\/2020.acl-main.643"},{"issue":"3","key":"11466_CR28","doi-asserted-by":"publisher","first-page":"1329","DOI":"10.1109\/TIP.2018.2840880","volume":"28","author":"N Kohli","year":"2018","unstructured":"Kohli N, Yadav D, Vatsa M, Singh R, Noore A (2018)\u00a0Supervised mixed norm autoencoder for kinship verification in unconstrained videos. IEEE Trans Image Process 28(3):1329\u20131341","journal-title":"IEEE Transactions on Image Processing"},{"key":"11466_CR29","doi-asserted-by":"crossref","unstructured":"Kukleva A, Tapaswi M, Laptev I\u00a0(2020) Learning interactions and relationships between movie characters. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9849\u20139858","DOI":"10.1109\/CVPR42600.2020.00987"},{"key":"11466_CR30","doi-asserted-by":"crossref","unstructured":"Li J, Wong Y, Zhao Q, Kankanhalli MS\u00a0(2017) Dual-glance model for deciphering social relationships. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2650\u20132659","DOI":"10.1109\/ICCV.2017.289"},{"key":"11466_CR31","doi-asserted-by":"crossref","unstructured":"Li M, Zareian A, Lin Y, Pan X, Whitehead S, Chen B, Wu B, Ji H, Chang SF, Voss C et\u00a0al\u00a0(2020) Gaia: A fine-grained multimedia knowledge extraction system. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pp. 77\u201386","DOI":"10.18653\/v1\/2020.acl-demos.11"},{"key":"11466_CR32","unstructured":"Liu J, Deng Y, Bai T, Huang C\u00a0(2015) Targeting ultimate accuracy: Face recognition via deep embedding. ArXiv abs\/1506.07310"},{"key":"11466_CR33","doi-asserted-by":"crossref","unstructured":"Liu W, Wen Y, Yu Z, Li M, Raj B, Song L\u00a0(2017) Sphereface: Deep hypersphere embedding for face recognition. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) pp. 6738\u20136746","DOI":"10.1109\/CVPR.2017.713"},{"key":"11466_CR34","unstructured":"Liu W, Wen Y, Yu Z, Yang M\u00a0(2016) Large-margin softmax loss for convolutional neural networks. ArXiv abs\/1612.02295"},{"key":"11466_CR35","doi-asserted-by":"crossref","unstructured":"Liu X, Liu W, Zhang M, Chen J, Gao L, Yan C, Mei T (2019) Social relation recognition from videos via multi-scale spatial-temporal reasoning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3566\u20133574","DOI":"10.1109\/CVPR.2019.00368"},{"key":"11466_CR36","unstructured":"Liu Y, Peng B, Shi P, Yan H, Zhou Y, Han B, Zheng Y, Lin C, Jiang J, Fan Y et\u00a0al\u00a0(2018) iqiyi-vid: A large dataset for multi-modal person identification. arXiv preprint arXiv:1811.07548"},{"key":"11466_CR37","doi-asserted-by":"crossref","unstructured":"Long X, Gan C, De\u00a0Melo G, Wu J, Liu X, Wen S\u00a0(2018) Attention clusters: Purely attention based local feature integration for video classification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7834\u20137843","DOI":"10.1109\/CVPR.2018.00817"},{"key":"11466_CR38","doi-asserted-by":"crossref","unstructured":"Lv J, Liu W, Zhou L, Wu B, Ma H\u00a0(2018) Multi-stream fusion model for social relation recognition from videos. In: International Conference on Multimedia Modeling, pp. 355\u2013368. Springer","DOI":"10.1007\/978-3-319-73603-7_29"},{"key":"11466_CR39","doi-asserted-by":"crossref","unstructured":"Lv J, Wu B (2019) Spatio-temporal attention model based on multi-view for social relation understanding. In: International Conference on Multimedia Modeling, pp. 390\u2013401. Springer","DOI":"10.1007\/978-3-030-05716-9_32"},{"key":"11466_CR40","doi-asserted-by":"publisher","first-page":"25958","DOI":"10.1109\/ACCESS.2018.2832087","volume":"6","author":"J Lv","year":"2018","unstructured":"Lv J, Wu B, Zhou L, Wang H (2018)\u00a0Storyrolenet: Social network construction of role relationship in video. IEEE Access 6:25958\u201325969\u00a0","journal-title":"IEEE Access"},{"key":"11466_CR41","doi-asserted-by":"crossref","unstructured":"Nan CJ, Kim KM, Zhang BT\u00a0(2015) Social network analysis of tv drama characters via deep concept hierarchies. In: 2015 IEEE\/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM), pp. 831\u2013836. IEEE","DOI":"10.1145\/2808797.2809306"},{"key":"11466_CR42","doi-asserted-by":"crossref","unstructured":"Parkhi O, Vedaldi A, Zisserman A\u00a0(2015) Deep face recognition. In: BMVC","DOI":"10.5244\/C.29.41"},{"key":"11466_CR43","doi-asserted-by":"crossref","unstructured":"Ramanathan V, Yao B, Fei-Fei L\u00a0(2013) Social role discovery in human events. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2475\u20132482","DOI":"10.1109\/CVPR.2013.320"},{"key":"11466_CR44","doi-asserted-by":"crossref","unstructured":"Schroff F, Kalenichenko D, Philbin J\u00a0(2015) Facenet: A unified embedding for face recognition and clustering. 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) pp. 815\u2013823","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"11466_CR45","doi-asserted-by":"crossref","unstructured":"Sun Q, Schiele B, Fritz M\u00a0(2017) A domain based approach to social relation recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3481\u20133490","DOI":"10.1109\/CVPR.2017.54"},{"key":"11466_CR46","unstructured":"Tran D, Bourdev LD, Fergus R, Torresani L, Paluri M\u00a0(2014) C3D: generic features for video analysis. CoRR abs\/1412.0767. http:\/\/arxiv.org\/abs\/1412.0767"},{"key":"11466_CR47","doi-asserted-by":"crossref","unstructured":"Vicol P, Tapaswi M, Castrejon L, Fidler S\u00a0(2018) Moviegraphs: Towards understanding human-centric situations from videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8581\u20138590","DOI":"10.1109\/CVPR.2018.00895"},{"key":"11466_CR48","doi-asserted-by":"crossref","unstructured":"Wang H, Wang Y, Zhou Z, Ji X, Li Z, Gong D, Zhou J, Liu W\u00a0(2018) Cosface: Large margin cosine loss for deep face recognition. 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition pp. 5265\u20135274","DOI":"10.1109\/CVPR.2018.00552"},{"key":"11466_CR49","doi-asserted-by":"crossref","unstructured":"Wang L, Xiong Y, Wang Z, Qiao Y, Lin D, Tang X, Gool LV\u00a0(2016) Temporal segment networks: Towards good practices for deep action recognition. CoRR abs\/1608.00859. http:\/\/arxiv.org\/abs\/1608.00859","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"11466_CR50","doi-asserted-by":"crossref","unstructured":"Wang Z, Chen T, Ren J, Yu W, Cheng H, Lin L\u00a0(2018) Deep reasoning with knowledge graph for social relationship understanding. arXiv preprint arXiv:1807.00504","DOI":"10.24963\/ijcai.2018\/142"},{"key":"11466_CR51","doi-asserted-by":"crossref","unstructured":"Wen Y, Zhang K, Li Z, Qiao Y (2016)\u00a0A discriminative feature learning approach for deep face recognition. In: ECCV","DOI":"10.1007\/978-3-319-46478-7_31"},{"issue":"2","key":"11466_CR52","doi-asserted-by":"publisher","first-page":"256","DOI":"10.1109\/TMM.2008.2009684","volume":"11","author":"CY Weng","year":"2009","unstructured":"Weng CY, Chu WT, Wu JL (2009)\u00a0Rolenet: Movie analysis from the perspective of social networks. IEEE Trans Multimed 11(2):256\u2013271","journal-title":"IEEE Transactions on Multimedia"},{"key":"11466_CR53","doi-asserted-by":"crossref","unstructured":"Wu P, Ding W, Mao Z, Tretter D\u00a0(2009) Close & closer: Discover social relationship from photo collections. In: 2009 IEEE International Conference on Multimedia and Expo, pp. 1652\u20131655. IEEE","DOI":"10.1109\/ICME.2009.5202837"},{"key":"11466_CR54","doi-asserted-by":"crossref","unstructured":"Wu X, Granger E, Kinnunen TH, Feng X, Hadid A (2019) Audio-visual kinship verification in the wild. In: 2019 International Conference on Biometrics (ICB), pp. 1\u20138. IEEE","DOI":"10.1109\/ICB45273.2019.8987241"},{"key":"11466_CR55","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1016\/j.patcog.2017.03.001","volume":"75","author":"H Yan","year":"2018","unstructured":"Yan H, Hu J (2018)\u00a0Video-based kinship verification using distance metric learning. Pattern Recognit 75:15\u201324","journal-title":"Pattern Recognition"},{"key":"11466_CR56","doi-asserted-by":"crossref","unstructured":"Yeh MC, Tseng MC, Wu WP\u00a0(2012) Automatic social network construction from movies using film-editing cues. In: 2012 IEEE International Conference on Multimedia and Expo Workshops, pp. 242\u2013247. IEEE","DOI":"10.1109\/ICMEW.2012.48"},{"key":"11466_CR57","doi-asserted-by":"crossref","unstructured":"Yuan K, Yao H, Ji R, Sun X\u00a0(2010) Mining actor correlations with hierarchical concurrence parsing. In: 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 798\u2013801. IEEE","DOI":"10.1109\/ICASSP.2010.5494953"},{"issue":"1","key":"11466_CR58","first-page":"183","volume":"56","author":"P Yuxin","year":"2019","unstructured":"Yuxin P, Jinwei Q, Xin H (2019)\u00a0Current research status and prospects on multimedia content understanding. J Comput Res Dev 56(1):183\u2013208","journal-title":"Journal of Computer Research and Development"},{"key":"11466_CR59","doi-asserted-by":"crossref","unstructured":"Zadeh A, Chen M, Poria S, Cambria E, Morency L\u00a0(2017) Tensor fusion network for multimodal sentiment analysis. In: Conference on Empirical Methods in Natural Language Processing, pp. 1103\u20131114","DOI":"10.18653\/v1\/D17-1115"},{"key":"11466_CR60","doi-asserted-by":"crossref","unstructured":"Zhang M, Liu X, Liu W, Zhou A, Ma H, Mei T\u00a0(2019) Multi-granularity reasoning for social relation recognition from images. In: 2019 IEEE International Conference on Multimedia and Expo (ICME), pp. 1618\u20131623. IEEE","DOI":"10.1109\/ICME.2019.00279"},{"key":"11466_CR61","doi-asserted-by":"crossref","unstructured":"Zhang Z, Luo P, Loy CC, Tang X\u00a0(2015) Learning social relation traits from face images. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3631\u20133639","DOI":"10.1109\/ICCV.2015.414"},{"key":"11466_CR62","doi-asserted-by":"crossref","unstructured":"Zhong Y, Arandjelovi\u0107 R, Zisserman A\u00a0(2018) Ghostvlad for set-based face recognition. In: Asian Conference on Computer Vision, pp. 35\u201350. Springer","DOI":"10.1007\/978-3-030-20890-5_3"},{"key":"11466_CR63","doi-asserted-by":"crossref","unstructured":"Zhou L, Lv J, Wu B\u00a0(2017) Social network construction of the role relation in unstructured data based on multi-view. In: 2017 IEEE Second International Conference on Data Science in Cyberspace (DSC), pp. 382\u2013388. IEEE","DOI":"10.1109\/DSC.2017.78"},{"key":"11466_CR64","doi-asserted-by":"crossref","unstructured":"Zhou L, Wu B, Lv J\u00a0(2018) Sre-net model for automatic social relation extraction from video. In: CCF Conference on Big Data, pp. 442\u2013460. Springer","DOI":"10.1007\/978-981-13-2922-7_30"},{"key":"11466_CR65","doi-asserted-by":"crossref","unstructured":"Zhu Z, Yu J, Wang Y, Sun Y, Hu Y, Wu Q\u00a0(2020) Mucko: Multi-layer cross-modal knowledge reasoning for fact-based visualquestion answering. arXiv preprint arXiv:2006.09073","DOI":"10.24963\/ijcai.2020\/153"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-11466-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-021-11466-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-021-11466-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,2,22]],"date-time":"2022-02-22T06:17:50Z","timestamp":1645510670000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-021-11466-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,15]]},"references-count":65,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2022,2]]}},"alternative-id":["11466"],"URL":"https:\/\/doi.org\/10.1007\/s11042-021-11466-y","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2021,9,15]]},"assertion":[{"value":"26 December 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 June 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 August 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 September 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}