{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T21:40:07Z","timestamp":1729978807319,"version":"3.28.0"},"reference-count":24,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2024,8,27]],"date-time":"2024-08-27T00:00:00Z","timestamp":1724716800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,8,27]],"date-time":"2024-08-27T00:00:00Z","timestamp":1724716800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Artif Life Robotics"],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1007\/s10015-024-00960-9","type":"journal-article","created":{"date-parts":[[2024,8,27]],"date-time":"2024-08-27T17:02:36Z","timestamp":1724778156000},"page":"536-545","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A cognitive strategy for service robots in recognizing emotional attribute of objects"],"prefix":"10.1007","volume":"29","author":[{"given":"Hao","family":"Wu","sequence":"first","affiliation":[]},{"given":"Jiaxuan","family":"Du","sequence":"additional","affiliation":[]},{"given":"Qin","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Qing","family":"Ma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,27]]},"reference":[{"key":"960_CR1","doi-asserted-by":"publisher","first-page":"286","DOI":"10.1177\/0963721411422522","volume":"20","author":"LF Barrett","year":"2011","unstructured":"Barrett LF, Mesquita B, Gendron M (2011) Context in emotion perception. Curr Dir Psychol Sci 20:286\u2013290","journal-title":"Curr Dir Psychol Sci"},{"key":"960_CR2","first-page":"2755","volume":"42","author":"R Kosti","year":"2020","unstructured":"Kosti R, Alvarez JM, Recasens A, Lapedriza A (2020) Context based emotion recognition using EMOTIC dataset. IEEE Trans Pattern Anal Mach Intell 42:2755\u20132766","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"960_CR3","doi-asserted-by":"crossref","unstructured":"Lowe DG (1999) Object recognition from local scale-invariant features. In: Proceedings of the Seventh IEEE International Conference on Computer Vision. IEEE, pp 1150\u20131157 vol.2","DOI":"10.1109\/ICCV.1999.790410"},{"key":"960_CR4","doi-asserted-by":"crossref","unstructured":"Dalal N, Triggs B (2005) Histograms of oriented gradients for human detection. In: 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201905). IEEE, pp 886\u2013893 vol. 1","DOI":"10.1109\/CVPR.2005.177"},{"key":"960_CR5","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2017","unstructured":"Krizhevsky A, Sutskever I, Hinton GE (2017) ImageNet classification with deep convolutional neural networks. Commun ACM 60:84\u201390","journal-title":"Commun ACM"},{"key":"960_CR6","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2015) Deep residual learning for image recognition. Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"960_CR7","doi-asserted-by":"crossref","unstructured":"Yuan Y, Tian G, Zhang M (2017) Autonomous planning of service robot based on natural language tasks in intelligent space. In: 2017 Chinese Automation Congress (CAC). IEEE, pp 5437\u20135442","DOI":"10.1109\/CAC.2017.8243748"},{"key":"960_CR8","first-page":"580","volume":"2014","author":"RB Girshick","year":"2013","unstructured":"Girshick RB, Donahue J, Darrell T, Malik J (2013) Rich feature hierarchies for accurate object detection and semantic segmentation. IEEE Conf Comput Vision Pattern Recognit 2014:580\u2013587","journal-title":"IEEE Conf Comput Vision Pattern Recognit"},{"key":"960_CR9","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2015","unstructured":"Ren S, He K, Girshick RB, Sun J (2015) Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Trans Pattern Anal Mach Intell 39:1137\u20131149","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"960_CR10","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/978-3-319-46448-0_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"W Liu","year":"2016","unstructured":"Liu W, Anguelov D, Erhan D et al (2016) SSD: Single Shot MultiBox Detector. Computer Vision \u2013 ECCV 2016. Springer International Publishing, Springer, Cham, pp 21\u201337"},{"key":"960_CR11","first-page":"779","volume-title":"You only look once: Unified, real-time object detection","author":"J Redmon","year":"2015","unstructured":"Redmon J, Divvala S, Girshick RB, Farhadi A (2015) You only look once: Unified, real-time object detection. Proc IEEE Comput Soc Conf Comput Vis Pattern Recognit, Springer, Cham, pp 779\u2013788"},{"key":"960_CR12","unstructured":"Redmon J, Farhadi A (2018) YOLOv3: An Incremental Improvement. arXiv [cs.CV]"},{"key":"960_CR13","doi-asserted-by":"publisher","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","volume":"42","author":"C Busso","year":"2008","unstructured":"Busso C, Bulut M, Lee C-C et al (2008) IEMOCAP: interactive emotional dyadic motion capture database. Lang Resour Eval 42:335\u2013359","journal-title":"Lang Resour Eval"},{"key":"960_CR14","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2019","unstructured":"Mollahosseini A, Hasani B, Mahoor MH (2019) AffectNet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans Affective Comput 10:18\u201331","journal-title":"IEEE Trans Affective Comput"},{"key":"960_CR15","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1606.06259","author":"A Zadeh","year":"2016","unstructured":"Zadeh A, Zellers R, Pincus E, Morency L-P (2016) MOSI: multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos. arXiv [csCL]. https:\/\/doi.org\/10.48550\/arXiv.1606.06259","journal-title":"arXiv [cs.CL]"},{"key":"960_CR16","volume-title":"Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics","author":"A Bagher Zadeh","year":"2018","unstructured":"Bagher Zadeh A, Liang PP, Poria S et al (2018) Multimodal language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph. In: Gurevych I, Miyao Y (eds) Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics, Melbourne"},{"key":"960_CR17","doi-asserted-by":"crossref","unstructured":"Lee J, Kim S, Kim S, et al (2019) Context-aware emotion recognition networks. ICCV 10142\u201310151","DOI":"10.1109\/ICCV.2019.01024"},{"key":"960_CR18","doi-asserted-by":"publisher","first-page":"34","DOI":"10.1109\/MMUL.2012.26","volume":"19","author":"A Dhall","year":"2012","unstructured":"Dhall A, Goecke R, Lucey S, Gedeon T (2012) Collecting large, richly annotated facial-expression databases from movies. IEEE Multimedia 19:34\u201341","journal-title":"IEEE Multimedia"},{"key":"960_CR19","doi-asserted-by":"crossref","unstructured":"Dhall A, Goecke R, Lucey S, Gedeon T (2011) Static facial expression analysis in tough conditions: Data, evaluation protocol and benchmark. In: 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops). IEEE, pp 2106\u20132112","DOI":"10.1109\/ICCVW.2011.6130508"},{"key":"960_CR20","doi-asserted-by":"publisher","first-page":"2177","DOI":"10.1007\/s00371-022-02472-8","volume":"39","author":"H Yang","year":"2023","unstructured":"Yang H, Fan Y, Lv G et al (2023) Exploiting emotional concepts for image emotion recognition. Vis Comput 39:2177\u20132190","journal-title":"Vis Comput"},{"key":"960_CR21","doi-asserted-by":"publisher","first-page":"7432","DOI":"10.1109\/TIP.2021.3106813","volume":"30","author":"J Yang","year":"2021","unstructured":"Yang J, Li J, Wang X et al (2021) Stimuli-aware visual emotion analysis. IEEE Trans Image Process 30:7432\u20137445","journal-title":"IEEE Trans Image Process"},{"key":"960_CR22","doi-asserted-by":"publisher","first-page":"7139","DOI":"10.1109\/TMM.2022.3217414","volume":"25","author":"S Lee","year":"2023","unstructured":"Lee S, Ryu C, Park E (2023) OSANet: object semantic attention network for visual sentiment analysis. IEEE Trans Multimedia 25:7139\u20137148","journal-title":"IEEE Trans Multimedia"},{"key":"960_CR23","doi-asserted-by":"publisher","first-page":"105245","DOI":"10.1016\/j.knosys.2019.105245","volume":"191","author":"J Zhang","year":"2020","unstructured":"Zhang J, Chen M, Sun H et al (2020) Object semantics sentiment correlation analysis enhanced image sentiment classification. Knowl-Based Syst 191:105245","journal-title":"Knowl-Based Syst"},{"key":"960_CR24","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-Excitation Networks. pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"}],"container-title":["Artificial Life and Robotics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10015-024-00960-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10015-024-00960-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10015-024-00960-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T21:02:40Z","timestamp":1729976560000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10015-024-00960-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,27]]},"references-count":24,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024,11]]}},"alternative-id":["960"],"URL":"https:\/\/doi.org\/10.1007\/s10015-024-00960-9","relation":{},"ISSN":["1433-5298","1614-7456"],"issn-type":[{"type":"print","value":"1433-5298"},{"type":"electronic","value":"1614-7456"}],"subject":[],"published":{"date-parts":[[2024,8,27]]},"assertion":[{"value":"13 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 July 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 August 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}