{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,23]],"date-time":"2025-10-23T01:10:15Z","timestamp":1761181815691,"version":"build-2065373602"},"reference-count":52,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2025.3599077","type":"journal-article","created":{"date-parts":[[2025,8,19]],"date-time":"2025-08-19T18:16:45Z","timestamp":1755627405000},"page":"7255-7270","source":"Crossref","is-referenced-by-count":0,"title":["Semantic-Spatial Attention for Refined Object Placement in Text-to-Image Synthesis"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6017-0552","authenticated-orcid":false,"given":"Jianwei","family":"Zheng","sequence":"first","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-8521-2078","authenticated-orcid":false,"given":"Ni","family":"Xu","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-8566-6073","authenticated-orcid":false,"given":"Wei","family":"Li","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9200-9189","authenticated-orcid":false,"given":"Jiawei","family":"Jiang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0958-7285","authenticated-orcid":false,"given":"Xiaoqin","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University of Technology, Hangzhou, China"}]}],"member":"263","reference":[{"article-title":"Hierarchical text-conditional image generation with CLIP latents","year":"2022","author":"Ramesh","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref3","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Saharia","year":"2022"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01370"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00526"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00685"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3592116"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2765202"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00089"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.629"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2856256"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3238554"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3217384"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3060291"},{"key":"ref17","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ramesh","year":"2021"},{"key":"ref18","first-page":"19822","article-title":"CogView: Mastering text-to-image generation via transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ding","year":"2021"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19784-0_6"},{"key":"ref20","first-page":"1","article-title":"Scaling autoregressive models for content-rich text-to-image generation","author":"Yu","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref21","first-page":"16784","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Nichol","year":"2022"},{"key":"ref22","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Saharia","year":"2022"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3399075"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3415357"},{"key":"ref25","first-page":"8780","article-title":"Diffusion models beat GANs on image synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Dhariwal","year":"2021"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3592450"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00246"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01410"},{"key":"ref29","first-page":"13781","article-title":"Sdedit: Guided image synthesis and editing with stochastic differential equations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Meng","year":"2022"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3588432.3591513"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00191"},{"key":"ref32","first-page":"14369","article-title":"Prompt-to-prompt image editing with cross-attention control","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Hertz","year":"2023"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01762"},{"key":"ref34","first-page":"1737","article-title":"Multidiffusion: Fusing diffusion paths for controlled image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bar-Tal","year":"2023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00091"},{"article-title":"Mixture of diffusers for scene composition and high resolution image generation","year":"2023","author":"Jimnez","key":"ref36"},{"article-title":"eDiffi: Text-to-image diffusion models with an ensemble of expert denoisers","year":"2022","author":"Balaji","key":"ref37"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00714"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i5.28204"},{"key":"ref40","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00708"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00758"},{"key":"ref43","first-page":"16222","article-title":"Diffusion self-guidance for controllable image generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Epstein","year":"2023"},{"key":"ref44","first-page":"108974","article-title":"Understanding training-free diffusion guidance: Mechanisms and limitations","volume-title":"Proc. Adv. Neural Inf. Pross. Syst.","author":"Shen","year":"2024"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01834"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.303"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref50","first-page":"25278","article-title":"Laion-5b: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Schuhmann","year":"2022"},{"key":"ref51","first-page":"2209","article-title":"R&B: Region and boundary aware zero-shot grounded text-to-image generation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Xiao","year":"2024"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00852"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/11130401.pdf?arnumber=11130401","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T17:25:24Z","timestamp":1761153924000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11130401\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":52,"URL":"https:\/\/doi.org\/10.1109\/tmm.2025.3599077","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"type":"print","value":"1520-9210"},{"type":"electronic","value":"1941-0077"}],"subject":[],"published":{"date-parts":[[2025]]}}}