{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T07:18:56Z","timestamp":1743146336955,"version":"3.40.3"},"publisher-location":"Cham","reference-count":19,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031533013"},{"type":"electronic","value":"9783031533020"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-53302-0_11","type":"book-chapter","created":{"date-parts":[[2024,1,28]],"date-time":"2024-01-28T09:02:09Z","timestamp":1706432529000},"page":"145-158","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["LUMOS-DM: Landscape-Based Multimodal Scene Retrieval Enhanced by\u00a0Diffusion Model"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8537-1331","authenticated-orcid":false,"given":"Viet-Tham","family":"Huynh","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7729-2927","authenticated-orcid":false,"given":"Trong-Thuan","family":"Nguyen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2523-8851","authenticated-orcid":false,"given":"Quang-Thuc","family":"Nguyen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5460-0229","authenticated-orcid":false,"given":"Mai-Khiem","family":"Tran","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0236-7992","authenticated-orcid":false,"given":"Tam V.","family":"Nguyen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3046-3041","authenticated-orcid":false,"given":"Minh-Triet","family":"Tran","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,29]]},"reference":[{"key":"11_CR1","unstructured":"Alqasrawi, Y.: Bridging the gap between local semantic concepts and bag of visual words for natural scene image retrieval (2022)"},{"issue":"10","key":"11_CR2","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0274764","volume":"17","author":"R Bibi","year":"2022","unstructured":"Bibi, R., Mehmood, Z., Munshi, A., Yousaf, R.M., Ahmed, S.S.: Deep features optimization based on a transfer learning, genetic algorithm, and extreme learning machine for robust content-based image retrieval. PLoS ONE 17(10), e0274764 (2022)","journal-title":"PLoS ONE"},{"key":"11_CR3","doi-asserted-by":"crossref","unstructured":"Chang, S., Kopp, M., Ghamisi, P.: Dsfer-Net: a deep supervision and feature retrieval network for bitemporal change detection using modern hopfield networks (2023)","DOI":"10.1109\/TGRS.2024.3424532"},{"key":"11_CR4","unstructured":"Chen, J., Lai, H.: Ranking-aware uncertainty for text-guided image retrieval. ArXiv abs\/2308.08131 (2023). https:\/\/api.semanticscholar.org\/CorpusID:260926537"},{"key":"11_CR5","doi-asserted-by":"publisher","unstructured":"Chen, Y., Wang, J., Lin, L., Qi, Z., Ma, J., Shan, Y.: Tagging before alignment: integrating multi-modal tags for video-text retrieval. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 1, pp. 396\u2013404 (2023). https:\/\/doi.org\/10.1609\/aaai.v37i1.25113, https:\/\/ojs.aaai.org\/index.php\/AAAI\/article\/view\/25113","DOI":"10.1609\/aaai.v37i1.25113"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Gong, Y., Cosma, G.: Boon: a neural search engine for cross-modal information retrieval (2023)","DOI":"10.1145\/3606040.3617440"},{"key":"11_CR7","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"11_CR8","doi-asserted-by":"crossref","unstructured":"Ibrahimi, S., Sun, X., Wang, P., Garg, A., Sanan, A., Omar, M.: Audio-enhanced text-to-video retrieval using text-conditioned feature alignment (2023)","DOI":"10.1109\/ICCV51070.2023.01107"},{"key":"11_CR9","unstructured":"Li, H., Yang, S., Zhang, Y., Tao, D., Yu, Z.: Progressive feature mining and external knowledge-assisted text-pedestrian image retrieval (2023)"},{"key":"11_CR10","doi-asserted-by":"crossref","unstructured":"Lin, C., et al.: Text-adaptive multiple visual prototype matching for video-text retrieval. In: Oh, A.H., Agarwal, A., Belgrave, D., Cho, K. (eds.) Advances in Neural Information Processing Systems (2022). https:\/\/openreview.net\/forum?id=XevwsaZ-4z","DOI":"10.1186\/s13634-022-00887-y"},{"key":"11_CR11","doi-asserted-by":"crossref","unstructured":"Lin, X., et al.Towards fast adaptation of pretrained contrastive models for multi-channel video-language retrieval (2023)","DOI":"10.1109\/CVPR52729.2023.01426"},{"key":"11_CR12","doi-asserted-by":"publisher","first-page":"190","DOI":"10.1109\/TNB.2023.3303512","volume":"23","author":"J Pradhan","year":"2023","unstructured":"Pradhan, J., Pal, A.K., Hafizul Islam, S.K., Bhaya, C.: DNA encoding-based nucleotide pattern and deep features for instance and class-based image retrieval. IEEE Trans. Nanobiosc. 23, 190\u2013201 (2023)","journal-title":"IEEE Trans. Nanobiosc."},{"key":"11_CR13","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"11_CR14","unstructured":"Scotti, P.S., et al.: Reconstructing the mind\u2019s eye: fMRI-to-image with contrastive learning and diffusion priors (2023)"},{"key":"11_CR15","doi-asserted-by":"crossref","unstructured":"Siddharth, L., Li, G., Luo, J.: Enhancing patent retrieval using text and knowledge graph embeddings: a technical note (2022)","DOI":"10.1080\/09544828.2022.2144714"},{"issue":"12","key":"11_CR16","doi-asserted-by":"publisher","first-page":"7913","DOI":"10.1109\/TNNLS.2021.3084633","volume":"33","author":"P Staszewski","year":"2022","unstructured":"Staszewski, P., Jaworski, M., Cao, J., Rutkowski, L.: A new approach to descriptors generation for image retrieval by analyzing activations of deep neural network layers. IEEE Trans. Neural Netw. Learn. Syst. 33(12), 7913\u20137920 (2022)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"11_CR17","doi-asserted-by":"crossref","unstructured":"Sun, X., et al.: Pre-training with aspect-content text mutual prediction for multi-aspect dense retrieval (2023)","DOI":"10.1145\/3583780.3615157"},{"key":"11_CR18","unstructured":"Tan, M., Le, Q.: EfficientNet: rethinking model scaling for convolutional neural networks. In: International Conference on Machine Learning, pp. 6105\u20136114. PMLR (2019)"},{"key":"11_CR19","doi-asserted-by":"crossref","unstructured":"Zhang, L., Agrawala, M.: Adding conditional control to text-to-image diffusion models. arXiv preprint arXiv:2302.05543 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-53302-0_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,9]],"date-time":"2024-11-09T10:10:19Z","timestamp":1731147019000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-53302-0_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031533013","9783031533020"],"references-count":19,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-53302-0_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"29 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Amsterdam","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"The Netherlands","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 February 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"112","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}