{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T12:13:11Z","timestamp":1775131991966,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T00:00:00Z","timestamp":1770076800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T00:00:00Z","timestamp":1770076800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1007\/s00530-025-02200-x","type":"journal-article","created":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T07:38:16Z","timestamp":1770104296000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A real-time vehicle trajectory prediction method based on the fusion of spatio-temporal awareness graph and multi-scale dilated convolution"],"prefix":"10.1007","volume":"32","author":[{"given":"Xiang","family":"Gu","sequence":"first","affiliation":[]},{"given":"Chenwen","family":"Gu","sequence":"additional","affiliation":[]},{"given":"Jing","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Chao","family":"Li","sequence":"additional","affiliation":[]},{"given":"Qiwei","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,3]]},"reference":[{"key":"2200_CR1","doi-asserted-by":"crossref","unstructured":"Sun, Q., Huang, X., Gu, J., Williams, B.C., Zhao, H.: \u201cM2I: From factored marginal trajectory prediction to interactive prediction,\u201d in Proc. IEEE\/CVF Conf. Comput. Vision Pattern Recognition, 6543\u20136552 (2022)","DOI":"10.1109\/CVPR52688.2022.00643"},{"key":"2200_CR2","doi-asserted-by":"crossref","unstructured":"Choi, D., Min, K.: Hierarchical latent structure for multi-modal vehicle trajectory forecasting. In: Conf, E. (ed.) Comput, Springer, Vision 129\u2013145 (2022)","DOI":"10.1007\/978-3-031-20047-2_8"},{"key":"2200_CR3","unstructured":"Ngiam, J., et al.: \u201cScene Transformer: A unified architecture for predicting multiple agent trajectories,\u201d arXiv preprint arXiv:2106.08417, (2021)"},{"key":"2200_CR4","doi-asserted-by":"crossref","unstructured":"Sadid, S.H., Antoniou, C.: \u201cDynamic spatio-temporal graph neural network for surrounding-aware trajectory prediction of autonomous vehicles,\u201d IEEE Trans. Intell. Veh., (2024)","DOI":"10.1109\/TIV.2024.3406507"},{"issue":"8","key":"2200_CR5","doi-asserted-by":"publisher","first-page":"10695","DOI":"10.1007\/s11227-023-05850-8","volume":"80","author":"W Chen","year":"2024","unstructured":"Chen, W., Sang, H., Wang, J., et al.: STIGCN: Spatial?temporal interaction-aware graph convolution network for pedestrian trajectory prediction. J. Supercomputing 80(8), 10695\u201310719 (2024)","journal-title":"J. Supercomputing"},{"key":"2200_CR6","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.102196","volume":"104","author":"Y Ma","year":"2024","unstructured":"Ma, Y., Lou, H., Yan, M., et al.: Spatio-temporal fusion graph convolutional network for traffic flow forecasting. Inf. Fusion 104, 102196 (2024)","journal-title":"Inf. Fusion"},{"key":"2200_CR7","doi-asserted-by":"publisher","DOI":"10.1016\/j.physa.2024.129746","volume":"641","author":"J Xu","year":"2024","unstructured":"Xu, J., Li, Y., Lu, W., et al.: A heterogeneous traffic spatio-temporal graph convolution model for traffic prediction. Physica A: Stat. Mech. Appl. 641, 129746 (2024)","journal-title":"Physica A: Stat. Mech. Appl."},{"issue":"10","key":"2200_CR8","doi-asserted-by":"publisher","first-page":"17654","DOI":"10.1109\/TITS.2022.3155749","volume":"23","author":"Z Sheng","year":"2022","unstructured":"Sheng, Z., Xu, Y., Xue, S., et al.: Graph-based spatial-temporal convolutional network for vehicle trajectory prediction in autonomous driving. IEEE Trans. Intell. Transport. Syst. 23(10), 17654\u201317665 (2022)","journal-title":"IEEE Trans. Intell. Transport. Syst."},{"key":"2200_CR9","unstructured":"Kusner, M., Sun, Y., Kolkin, N., et al.: \u201cFrom word embeddings to document distances,\u201d in Int, Conf. Mach. Learn., PMLR, 957\u2013966 (2015)"},{"key":"2200_CR10","unstructured":"Van Den Oord, A., Kalchbrenner, N., Kavukcuoglu, K.: \u201cPixel recurrent neural networks,\u201d in Int, Conf. Mach. Learn., PMLR, 1747\u20131756 (2016)"},{"key":"2200_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.phycom.2024.102420","volume":"66","author":"L Wang","year":"2024","unstructured":"Wang, L., Che, L., Lam, K.Y., et al.: Mobile traffic prediction with attention-based hybrid deep learning. Physical Communication 66, 102420 (2024)","journal-title":"Physical Communication"},{"key":"2200_CR12","doi-asserted-by":"crossref","unstructured":"Chen, Q., Xiao, Z., Zhang, Z., et al.: \u201cMulti-adversarial Adaptive Transformers for Joint Multi-agent Trajectory Prediction,\u201d in Chinese Conf. Pattern Recognit. Comput. Vision (PRCV), Singapore: Springer Nature Singapore, 229\u2013241 (2023)","DOI":"10.1007\/978-981-99-8543-2_19"},{"key":"2200_CR13","unstructured":"Ngiam, J., et al.: \u201cScene Transformer: A unified architecture for predicting multiple agent trajectories,\u201d arXiv preprint arXiv:2106.08417, (2021)"},{"key":"2200_CR14","unstructured":"Li, Y., Yu, R., Shahabi, C., et al.: \u201cDiffusion convolutional recurrent neural network: Data-driven traffic forecasting,\u201d arXiv preprint arXiv:1707.01926, (2017)"},{"key":"2200_CR15","doi-asserted-by":"crossref","unstructured":"Yu, B., Yin, H., Zhu, Z.: \u201cSpatio-temporal graph convolutional networks: A deep learning framework for traffic forecasting,\u201d arXiv preprint arXiv:1709.04875, (2017)","DOI":"10.24963\/ijcai.2018\/505"},{"key":"2200_CR16","doi-asserted-by":"publisher","first-page":"35973","DOI":"10.1109\/ACCESS.2021.3062114","volume":"9","author":"J Zhu","year":"2021","unstructured":"Zhu, J., Wang, Q., Tao, C., et al.: AST-GCN: Attribute-augmented spatiotemporal graph convolution network for traffic forecasting. IEEE Access 9, 35973\u201335983 (2021)","journal-title":"IEEE Access"},{"key":"2200_CR17","unstructured":"Lan, S., Ma, Y., Huang, W., et al.: \u201cDstagnn: Dynamic spatial-temporal aware graph neural network for traffic flow forecasting,\u201d in Int, Conf. Mach. Learn., PMLR, 11906\u201311917 (2022)"},{"key":"2200_CR18","doi-asserted-by":"crossref","unstructured":"Liang, M., Yang, B., Hu, R., et al.: \u201cLearning lane graph representations for motion forecasting,\u201d in Computer Vision-ECCV. Springer International Publishing 2020, 541\u2013556 (2020)","DOI":"10.1007\/978-3-030-58536-5_32"},{"key":"2200_CR19","doi-asserted-by":"crossref","unstructured":"Ye, M., Cao, T., Chen, Q.: \u201cTPCN: Temporal point cloud networks for motion forecasting,\u201d in Proc. IEEE\/CVF Conf. Comput. Vision Pattern Recognition, 11318\u201311327 (2021)","DOI":"10.1109\/CVPR46437.2021.01116"},{"key":"2200_CR20","doi-asserted-by":"crossref","unstructured":"Gu, J., Sun, C., Zhao, H.: \u201cDensetnt: End-to-end trajectory prediction from dense goal sets,\u201d in Proc. IEEE\/CVF Int. Conf. Comput. Vision, 15303\u201315312 (2021)","DOI":"10.1109\/ICCV48922.2021.01502"},{"key":"2200_CR21","unstructured":"Bahdanau, D., Cho, K., Bengio, Y.: \u201cNeural machine translation by jointly learning to align and translate,\u201d arXiv preprint arXiv:1409.0473, (2014)"},{"key":"2200_CR22","doi-asserted-by":"crossref","unstructured":"Gao, J., Sun, C., Zhao, H., et al.: \u201cVectornet: Encoding HD maps and agent dynamics from vectorized representation,\u201d in Proc. IEEE\/CVF Conf. Comput. Vision Pattern Recognition, 11525\u201311533 (2020)","DOI":"10.1109\/CVPR42600.2020.01154"},{"key":"2200_CR23","first-page":"683","volume":"2020","author":"T Salzmann","year":"2020","unstructured":"Salzmann, T., Ivanovic, B., Chakravarty, P., et al.: \u201cTrajectron++: Dynamically-feasible trajectory forecasting with heterogeneous data,\" in Computer Vision\u202f? ECCV. Springer International Publishing 2020, 683\u2013700 (2020)","journal-title":"Springer International Publishing"},{"key":"2200_CR24","unstructured":"Titouan, V., Courty, N., Tavenard, R., et al.: \u201cOptimal transport for structured data with application on graphs,\u201d in Int, Conf. Mach. Learn., PMLR, 6275\u20136284 (2019)"},{"key":"2200_CR25","doi-asserted-by":"crossref","unstructured":"Zeng, W., Luo, W., Suo, S., et al.: \u201cEnd-to-end interpretable neural motion planner,\u201d in Proc. IEEE\/CVF Conf. Comput. Vision Pattern Recognition, 8660\u20138669 (2019)","DOI":"10.1109\/CVPR.2019.00886"},{"key":"2200_CR26","first-page":"541","volume":"2020","author":"M Liang","year":"2020","unstructured":"Liang, M., Yang, B., Hu, R., et al.: \u201cLearning lane graph representations for motion forecasting,\" in Computer Vision\u202f? ECCV. Springer International Publishing 2020, 541\u2013556 (2020)","journal-title":"Springer International Publishing"},{"key":"2200_CR27","unstructured":"Defferrard, M., Bresson, X., Vandergheynst, P.: \u201cConvolutional neural networks on graphs with fast localized spectral filtering,\u201d Advances in Neural Information Processing Systems, 29, (2016)"},{"key":"2200_CR28","doi-asserted-by":"crossref","unstructured":"Chang, M.F., Lambert, J., Sangkloy, P., et al.: \u201cArgoverse: 3d tracking and forecasting with rich maps,\u201d in Proc. IEEE\/CVF Conf. Comput. Vision Pattern Recognition, 8748\u20138757 (2019)","DOI":"10.1109\/CVPR.2019.00895"},{"key":"2200_CR29","volume-title":"\u201cMultimodal motion prediction with stacked transformers,\" in Proc","author":"Y Liu","year":"2021","unstructured":"Liu, Y., Zhang, J., Fang, L., Jiang, Q., Zhou, B.: \u201cMultimodal motion prediction with stacked transformers,\" in Proc. IEEE\/CVF Conf. Comput, Vision Pattern Recognition (CVPR) (2021)"},{"key":"2200_CR30","volume-title":"\u201cScene Transformer: A unified architecture for predicting multiple agent trajectories,\" in Proc","author":"J Ngiam","year":"2022","unstructured":"Ngiam, J., et al.: \u201cScene Transformer: A unified architecture for predicting multiple agent trajectories,\" in Proc. Int. Conf, Learning Representations (ICLR) (2022)"},{"key":"2200_CR31","doi-asserted-by":"crossref","unstructured":"Gilles, T., Sabatini, S., Tsishkou, D., Stanciulescu, B., Moutarde, F.: \u201cHOME: Heatmap output for future motion estimation,\u201d in Proc. IEEE Int. Conf. Intell. Transport. Syst. (ITSC), (2021)","DOI":"10.1109\/ITSC48978.2021.9564944"},{"key":"2200_CR32","doi-asserted-by":"crossref","unstructured":"Gilles, T., Sabatini, S., Tsishkou, D., Stanciulescu, B., Moutarde, F.: \u201cGOHOME: Graph-oriented heatmap output for future motion estimation,\u201d in Proc. IEEE Int. Conf. Robot. Autom. (ICRA), (2022)","DOI":"10.1109\/ICRA46639.2022.9812253"},{"key":"2200_CR33","volume-title":"\u201cHIVT: Hierarchical vector transformer for multi-agent motion prediction,\" in Proc","author":"Z Zhou","year":"2022","unstructured":"Zhou, Z., Ye, L., Wang, J., Wu, K., Lu, K.: \u201cHIVT: Hierarchical vector transformer for multi-agent motion prediction,\" in Proc. IEEE\/CVF Conf. Comput, Vision Pattern Recognition (CVPR) (2022)"},{"issue":"12","key":"2200_CR34","first-page":"7832","volume":"8","author":"Y Ni","year":"2023","unstructured":"Ni, Y., Ni, S., Yan, J.: Dynamic Scenario Representation Learning for Motion Forecasting with Heterogeneous Graph Convolutional Recurrent Networks. IEEE Robot. Autom. Lett. 8(12), 7832\u20137839 (2023)","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"12","key":"2200_CR35","first-page":"8125","volume":"8","author":"C Tan","year":"2023","unstructured":"Tan, C., Zhou, Z., Yan, J.: MacFormer: Map-Agent Coupled Transformer for Real-time and Robust Trajectory Prediction. IEEE Robot. Autom. Lett. 8(12), 8125\u20138132 (2023)","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"8","key":"2200_CR36","first-page":"4456","volume":"8","author":"B Liu","year":"2023","unstructured":"Liu, B., Chen, L., Wang, Z., Jiang, F.: LAformer: Trajectory Prediction for Autonomous Driving with Lane-Aware Scene Constraints. IEEE Robot. Autom. Lett. 8(8), 4456\u20134463 (2023)","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"3","key":"2200_CR37","first-page":"2097","volume":"9","author":"L Zhan","year":"2024","unstructured":"Zhan, L., Chang, H., Yu, C.: SIMPL: A Simple and Efficient Multi-agent Motion Prediction Baseline for Autonomous Driving. IEEE Robot. Autom. Lett. 9(3), 2097\u20132104 (2024)","journal-title":"IEEE Robot. Autom. Lett."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02200-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-025-02200-x","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02200-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T11:35:41Z","timestamp":1775129741000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-025-02200-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,3]]},"references-count":37,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,4]]}},"alternative-id":["2200"],"URL":"https:\/\/doi.org\/10.1007\/s00530-025-02200-x","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2,3]]},"assertion":[{"value":"27 September 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 December 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 February 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest. The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"123"}}