{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T10:31:19Z","timestamp":1753180279147,"version":"3.37.3"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T00:00:00Z","timestamp":1654473600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T00:00:00Z","timestamp":1654473600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,2]]},"DOI":"10.1007\/s10489-022-03653-7","type":"journal-article","created":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T23:02:46Z","timestamp":1654556566000},"page":"4257-4267","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Dual discriminant adversarial cross-modal retrieval"],"prefix":"10.1007","volume":"53","author":[{"given":"Pei","family":"He","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8388-0055","authenticated-orcid":false,"given":"Meng","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Ding","family":"Tu","sequence":"additional","affiliation":[]},{"given":"Zhuo","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,6,6]]},"reference":[{"key":"3653_CR1","unstructured":"Wang K, Yin Q, Wang W, Wu S, Wang L (2016) A Comprehensive Survey on Cross-modal Retrieval. arXiv:abs\/1607.06215"},{"key":"3653_CR2","doi-asserted-by":"publisher","first-page":"2639","DOI":"10.1162\/0899766042321814","volume":"16","author":"D Hardoon","year":"2004","unstructured":"Hardoon D, Szedm\u00e1k S, Shawe-Taylor J (2004) Canonical correlation analysis: An overview with application to learning methods. Neural Comput 16:2639\u20132664","journal-title":"Neural Comput"},{"key":"3653_CR3","doi-asserted-by":"crossref","unstructured":"Chua T, Tang J, Hong R, Li H, Luo Z, Zheng Y (2009) NUS-WIDE: A real-world web image database from National University of Singapore. CIVR \u201909","DOI":"10.1145\/1646396.1646452"},{"key":"3653_CR4","doi-asserted-by":"crossref","unstructured":"Chen W, Liu Y, Bakker EM, Lew MS (2021) Integrating Information Theory and Adversarial Learning for Cross-modal Retrieval. arXiv:abs\/2104.04991","DOI":"10.1016\/j.patcog.2021.107983"},{"key":"3653_CR5","doi-asserted-by":"crossref","unstructured":"Zhang X, Lai H, Feng J (2018) Attention-Aware Deep Adversarial Hashing for Cross-Modal Retrieval. ECCV","DOI":"10.1007\/978-3-030-01267-0_36"},{"key":"3653_CR6","doi-asserted-by":"publisher","first-page":"2208","DOI":"10.1007\/s10489-019-01625-y","volume":"50","author":"Y Zhang","year":"2020","unstructured":"Zhang Y, Feng Y, Liu D, Shang J, Qiang B (2020) FRWCAE: Joint faster-RCNN and Wasserstein convolutional auto-encoder for instance retrieval. Appl Intell 50:2208\u20132221","journal-title":"Appl Intell"},{"key":"3653_CR7","unstructured":"Andrew G, Arora R, Bilmes J, Livescu K (2013) Deep Canonical Correlation Analysis. ICML"},{"key":"3653_CR8","doi-asserted-by":"crossref","unstructured":"He X, Peng Y, Xi-e L (2019) A new benchmark and approach for fine-grained cross-media retrieval. In: Proceedings of the 27th ACM international conference on multimedia","DOI":"10.1145\/3343031.3350974"},{"key":"3653_CR9","first-page":"449","volume":"47","author":"Y Wei","year":"2017","unstructured":"Wei Y, Zhao Y, Lu C, Wei S, Liu L, Zhu Z, Yan S (2017) Cross-Modal Retrieval with CNN visual features: A new baseline. IEEE Trans Cybern 47:449\u2013460","journal-title":"IEEE Trans Cybern"},{"key":"3653_CR10","doi-asserted-by":"crossref","unstructured":"Wang C, Yang H, Meinel C (2015) Deep semantic mapping for cross-modal retrieval. In: 2015 IEEE 27th international conference on tools with artificial intelligence (ICTAI), pp 234\u2013241","DOI":"10.1109\/ICTAI.2015.45"},{"key":"3653_CR11","doi-asserted-by":"publisher","first-page":"298","DOI":"10.1016\/j.ins.2020.08.009","volume":"546","author":"X Wang","year":"2021","unstructured":"Wang X, Hu P, Zhen L, Peng D (2021) DRSL: Deep relational similarity learning for cross-modal retrieval. Inf Sci 546:298\u2013 311","journal-title":"Inf Sci"},{"key":"3653_CR12","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1007\/s10489-015-0693-7","volume":"44","author":"G Castellano","year":"2015","unstructured":"Castellano G, Fanelli A, Sforza G, Torsello MA (2015) Shape annotation for intelligent image retrieval. Appl Intell 44:179\u2013195","journal-title":"Appl Intell"},{"key":"3653_CR13","doi-asserted-by":"crossref","unstructured":"Wang B, Yang Y, Xu X, Hanjalic A, Shen HT (2017) Adversarial cross-modal retrieval. In: Proceedings of the 25th ACM international conference on multimedia","DOI":"10.1145\/3123266.3123326"},{"key":"3653_CR14","doi-asserted-by":"crossref","unstructured":"Zhen L, Hu P, Wang X, Peng D (2019) Deep supervised cross-modal retrieval. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 10386\u201310395","DOI":"10.1109\/CVPR.2019.01064"},{"key":"3653_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3284750","volume":"15","author":"Y Peng","year":"2019","unstructured":"Peng Y, Qi J, Yuan Y (2019) CM-GANS. ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM) 15:1\u201324","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"3653_CR16","doi-asserted-by":"crossref","unstructured":"Li C, Deng C, Li N, Liu W, Gao X, Tao D (2018) Self-supervised adversarial hashing networks for cross-modal retrieval. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, pp 4242\u20134251","DOI":"10.1109\/CVPR.2018.00446"},{"key":"3653_CR17","doi-asserted-by":"crossref","unstructured":"Bai C, Zeng C, Ma Q, Zhang J, Chen S (2020) Deep Adversarial discrete hashing for Cross-Modal retrieval. In: Proceedings of the 2020 international conference on multimedia retrieval","DOI":"10.1145\/3372278.3390711"},{"key":"3653_CR18","unstructured":"Simonyan K, Zisserman A (2015) Very deep convolutional networks for Large-Scale image recognition. CoRR, arXiv:abs\/1409.1556"},{"key":"3653_CR19","unstructured":"Mikolov T, Sutskever I, Chen K, Corrado G, Dean J (2013) Distributed Representations of Words and Phrases and their Compositionality. NIPS"},{"key":"3653_CR20","doi-asserted-by":"crossref","unstructured":"Kang P, Lin Z, Yang Z, Fang X, Bronstein A, Li Q, Liu W (2021) Intra-class low-rank regularization for supervised and semi-supervised cross-modal retrieval. Appl Intell, pp 1\u201322","DOI":"10.1007\/s10489-021-02308-3"},{"key":"3653_CR21","unstructured":"Kingma DP, Ba J (2015) Adam: A method for stochastic optimization. CoRR, arXiv:abs\/1412.6980"},{"key":"3653_CR22","unstructured":"Rashtchian C, Young P, Hodosh M, Hockenmaier J (2010) Collecting Image Annotations Using Amazon\u2019s Mechanical Turk. Mturk@HLT-NAACL"},{"key":"3653_CR23","doi-asserted-by":"publisher","first-page":"965","DOI":"10.1109\/TCSVT.2013.2276704","volume":"24","author":"X Zhai","year":"2014","unstructured":"Zhai X, Peng Y, Xiao J (2014) Learning Cross-Media joint representation with sparse and semisupervised regularization. IEEE Trans Circuits Syst Video Technol 24:965\u2013978","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"3653_CR24","first-page":"1","volume":"17","author":"C Zhang","year":"2021","unstructured":"Zhang C, Song J, Zhu X, Zhu L, Zhang S (2021) HCMSL: Hybrid cross-modal similarity learning for cross-modal retrieval. ACM Trans Multimedia Comput Commun Appl (TOMM) 17:1\u201322","journal-title":"ACM Trans Multimedia Comput Commun Appl (TOMM)"},{"key":"3653_CR25","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1109\/TMM.2017.2742704","volume":"20","author":"Y Peng","year":"2018","unstructured":"Peng Y, Qi J, Huang X, Yuan Y (2018) CCL: Cross-modal correlation learning with multigrained fusion by hierarchical network. IEEE Trans Multimedia 20:405\u2013420","journal-title":"IEEE Trans Multimedia"},{"key":"3653_CR26","first-page":"449","volume":"47","author":"Y Wei","year":"2017","unstructured":"Wei Y, Zhao Y, Lu C, Wei S, Liu L, Zhu Z, Yan S (2017) Cross-Modal Retrieval with CNN visual features: A new baseline. IEEE Trans Cybern 47:449\u2013460","journal-title":"IEEE Trans Cybern"},{"key":"3653_CR27","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1007\/s00778-015-0391-4","volume":"25","author":"W Wang","year":"2015","unstructured":"Wang W, Yang X, Ooi B, Zhang D, Zhuang Y (2015) Effective deep learning-based multi-modal retrieval. The VLDB J 25:79\u2013101","journal-title":"The VLDB J"},{"key":"3653_CR28","doi-asserted-by":"crossref","unstructured":"Li Z, Lu W, Bao E, Xing W (2015) Learning a Semantic Space by Deep Network for Cross-media Retrieval. DMS","DOI":"10.18293\/DMS2015-005"},{"key":"3653_CR29","doi-asserted-by":"publisher","first-page":"521","DOI":"10.1109\/TPAMI.2013.142","volume":"36","author":"JC Pereira","year":"2014","unstructured":"Pereira JC, Coviello E, Doyle G, Rasiwasia N, Lanckriet G, Levy R, Vasconcelos N (2014) On the role of correlation and abstraction in Cross-Modal multimedia retrieval. IEEE Trans Pattern Anal Mach Intell 36:521\u2013535","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"3653_CR30","doi-asserted-by":"publisher","first-page":"1047","DOI":"10.1109\/TCYB.2018.2879846","volume":"50","author":"X Huang","year":"2020","unstructured":"Huang X, Peng Y, Yuan M (2020) MHTN: Modal-Adversarial Hybrid transfer network for Cross-Modal retrieval. IEEE Trans Cybern 50:1047\u20131059","journal-title":"IEEE Trans Cybern"},{"key":"3653_CR31","doi-asserted-by":"crossref","unstructured":"Zhou Y, Feng Y, Zhou M, Qiang B, UL, Zhu J (2021) Deep adversarial quantization network for Cross-Modal retrieval. In: ICASSP 2021 - 2021 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp 4325\u20134329","DOI":"10.1109\/ICASSP39728.2021.9414247"},{"key":"3653_CR32","doi-asserted-by":"publisher","first-page":"489","DOI":"10.1109\/TCYB.2018.2868826","volume":"50","author":"JG Zhang","year":"2020","unstructured":"Zhang JG, Peng Y, Yuan M (2020) SCH-GAN: Semi-Supervised Cross-Modal Hashing by generative adversarial network. IEEE Trans Cybern 50:489\u2013502","journal-title":"IEEE Trans Cybern"},{"key":"3653_CR33","doi-asserted-by":"publisher","first-page":"1261","DOI":"10.1109\/TMM.2018.2877122","volume":"21","author":"G Song","year":"2019","unstructured":"Song G, Wang D, Tan X (2019) Deep memory network for Cross-Modal retrieval. IEEE Transactions on Multimedia 21:1261\u20131275","journal-title":"IEEE Transactions on Multimedia"},{"key":"3653_CR34","first-page":"449","volume":"47","author":"Y Wei","year":"2017","unstructured":"Wei Y, Zhao Y, Lu C, Wei S, Liu L, Zhu Z, Yan S (2017) Cross-Modal Retrieval with CNN visual features: a new baseline. IEEE Trans Cybern 47:449\u2013460","journal-title":"IEEE Trans Cybern"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03653-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-03653-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03653-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T06:51:02Z","timestamp":1675234262000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-03653-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,6]]},"references-count":34,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,2]]}},"alternative-id":["3653"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-03653-7","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"type":"print","value":"0924-669X"},{"type":"electronic","value":"1573-7497"}],"subject":[],"published":{"date-parts":[[2022,6,6]]},"assertion":[{"value":"19 April 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 June 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}