{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,2]],"date-time":"2025-12-02T15:05:50Z","timestamp":1764687950369,"version":"3.28.0"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9414232","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"2085-2089","source":"Crossref","is-referenced-by-count":11,"title":["Regression or classification? New methods to evaluate no-reference picture and video quality models"],"prefix":"10.1109","author":[{"given":"Zhengzhong","family":"Tu","sequence":"first","affiliation":[]},{"given":"Chia-Ju","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Li-Heng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yilin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Neil","family":"Birkbeck","sequence":"additional","affiliation":[]},{"given":"Balu","family":"Adsumilli","sequence":"additional","affiliation":[]},{"given":"Alan C.","family":"Bovik","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2685941"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2015.2457911"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2922850"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2967829"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"372","DOI":"10.1109\/TIP.2015.2500021","article-title":"Massive online crowdsourced study of subjective and objective picture quality","volume":"25","author":"ghadiyaram","year":"2015","journal-title":"IEEE Trans Image Process"},{"article-title":"Proxiqa: A proxy approach to perceptual optimization of learned image compression","year":"2019","author":"chen","key":"ref30"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00373"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/MMSP.2019.8901772"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2869673"},{"article-title":"Predicting the quality of compressed videos with pre-existing distortions","year":"2020","author":"yu","key":"ref34"},{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1109\/TIP.2021.3112055","article-title":"No-reference video quality prediction of high-motion videos via space-time chips","author":"ebenezer","year":"2021","journal-title":"IEEE Trans Image Process"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2016.2585880"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2014.2355716"},{"key":"ref12","first-page":"1098","article-title":"Unsupervised feature learning framework for no-reference image quality assessment","author":"ye","year":"2012","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit (CVPR)"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053634"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2011.5946613"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP40778.2020.9191169"},{"article-title":"Perceptual video quality prediction emphasizing chroma distortions","year":"2020","author":"chen","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2774045"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.224"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2760518"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/1961189.1961199"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2214050"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/QoMEX.2017.7965673"},{"article-title":"UGC-VQA: Benchmarking blind video quality assessment for user generated content","year":"2020","author":"tu","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2014.2299154"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2015.2440172"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1167\/17.1.32"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3351028"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2923051"},{"key":"ref2","article-title":"VMAF: The journey continues","author":"li","year":"2018","journal-title":"The Netflix Tech Blog"},{"key":"ref9","doi-asserted-by":"crossref","DOI":"10.1109\/OJSP.2021.3090333","article-title":"Rapique: Rapid and accurate video quality prediction of user generated content","author":"tu","year":"2021"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2696576"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2941778"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6247954"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"95990z","DOI":"10.1117\/12.2188389","article-title":"Experimental design and analysis of jnd test on coded image\/video","volume":"9599","author":"lin","year":"2015","journal-title":"Appl Digit Image Process XXXVIII"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2011.5995347"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1117\/12.2569332"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TBC.2010.2086750"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2021,6,6]]},"location":"Toronto, ON, Canada","end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09414232.pdf?arnumber=9414232","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,27]],"date-time":"2022-12-27T08:31:49Z","timestamp":1672129909000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9414232\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9414232","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}