{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T18:06:52Z","timestamp":1725818812516},"publisher-location":"Cham","reference-count":18,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319163536"},{"type":"electronic","value":"9783319163543"}],"license":[{"start":{"date-parts":[[2015,1,1]],"date-time":"2015-01-01T00:00:00Z","timestamp":1420070400000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2015]]},"DOI":"10.1007\/978-3-319-16354-3_17","type":"book-chapter","created":{"date-parts":[[2015,3,16]],"date-time":"2015-03-16T12:53:25Z","timestamp":1426510405000},"page":"159-171","source":"Crossref","is-referenced-by-count":1,"title":["A Discriminative Approach to Predicting Assessor Accuracy"],"prefix":"10.1007","author":[{"given":"Hyun Joon","family":"Jung","sequence":"first","affiliation":[]},{"given":"Matthew","family":"Lease","sequence":"additional","affiliation":[]}],"member":"297","reference":[{"issue":"2","key":"17_CR1","doi-asserted-by":"publisher","first-page":"9","DOI":"10.1145\/1480506.1480508","volume":"42","author":"O. Alonso","year":"2008","unstructured":"Alonso, O., Rose, D.E., Stewart, B.: Crowdsourcing for relevance evaluation. ACM SIGIR Forum\u00a042(2), 9\u201315 (2008)","journal-title":"ACM SIGIR Forum"},{"key":"17_CR2","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1109\/MIC.2012.71","volume":"16","author":"J.B. Vuurens","year":"2012","unstructured":"Vuurens, J.B., de Vries, A.P.: Obtaining High-Quality Relevance Judgments Using Crowdsourcing. IEEE Internet Computing\u00a016, 20\u201327 (2012)","journal-title":"IEEE Internet Computing"},{"key":"17_CR3","unstructured":"Lease, M., Kazai, G.: Overview of the TREC 2011 Crowdsourcing Track (Conference Notebook). In: 20th Text Retrieval Conference (TREC) (2011)"},{"key":"17_CR4","doi-asserted-by":"crossref","unstructured":"Carterette, B., Soboroff, I.: The effect of assessor error on IR system evaluation. In: Proceedings of the 33rd International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR 2010, pp. 539\u2013546 (2010)","DOI":"10.1145\/1835449.1835540"},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Hosseini, M., Cox, I.J., Mili\u0107-frayling, N.: On aggregating labels from multiple crowd. In: Proceedings of the 34th European Conference on Advances in Information Retrieval, ECIR 2012, pp. 182\u2013194 (2012)","DOI":"10.1007\/978-3-642-28997-2_16"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Kazai, G., Kamps, J., Milic-Frayling, N.: The Face of Quality in Crowdsourcing Relevance Labels: Demographics, Personality and Labeling Accuracy. In: Proceedings of the 21st ACM International Conference on Information and Knowledge Management, CIKM 2012, pp. 2583\u20132586 (2012)","DOI":"10.1145\/2396761.2398697"},{"key":"17_CR7","doi-asserted-by":"crossref","unstructured":"Law, E., Bennett, P., Horvitz, E.: The effects of choice in routing relevance judgments. In: Proceedings of the 34th ACM SIGIR Conference on Research and Development in Information, SIGIR 2011, pp. 1127\u20131128 (2011)","DOI":"10.1145\/2009916.2010082"},{"key":"17_CR8","doi-asserted-by":"crossref","unstructured":"Ipeirotis, P.G., Gabrilovich, E.: Quizz: targeted crowdsourcing with a billion (potential) users. In: Proceedings of the 23rd International Conference on World Wide Web, WWW 2014, pp. 143\u2013154 (2014)","DOI":"10.1145\/2566486.2567988"},{"key":"17_CR9","doi-asserted-by":"crossref","unstructured":"Yuen, M., King, I., Leung, K.S.: Task recommendation in crowdsourcing systems. In: Proceedings of the First International Workshop on Crowdsourcing and Data Mining, pp. 22\u201326 (2012)","DOI":"10.1145\/2442657.2442661"},{"key":"17_CR10","doi-asserted-by":"crossref","unstructured":"Donmez, P., Carbonell, J., Schneider, J.: A probabilistic framework to learn from multiple annotators with time-varying accuracy. In: Proceedings of the SIAM International Conference on Data Mining, pp. 826\u2013837 (2010)","DOI":"10.1137\/1.9781611972801.72"},{"key":"17_CR11","doi-asserted-by":"crossref","unstructured":"Jung, H.J., Park, Y., Lease, M.: Predicting Next Label Quality: A Time-Series Model of Crowdwork. In: Proceedings of the 2nd AAAI Conference on Human Computation, HCOMP 2014, pp. 87\u201395 (2014)","DOI":"10.1609\/hcomp.v2i1.13165"},{"key":"17_CR12","doi-asserted-by":"crossref","unstructured":"Kazai, G.: In search of quality in crowdsourcing for search engine evaluation. In: Proceedings of the 30th European Conference on Advances in Information Retrieval. ECIR 2011, pp. 165\u2013176 (2011)","DOI":"10.1007\/978-3-642-20161-5_17"},{"key":"17_CR13","doi-asserted-by":"crossref","unstructured":"Smucker, M.D., Jethani, C.P.: Measuring assessor accuracy: a comparison of NIST assessors and user study participants. In: Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR 2011, pp. 1231\u20131232 (2011)","DOI":"10.1145\/2009916.2010134"},{"key":"17_CR14","first-page":"491","volume":"13","author":"V. Raykar","year":"2012","unstructured":"Raykar, V., Yu, S.: Eliminating spammers and ranking annotators for crowdsourced labeling tasks. Journal of Machine Learning Research\u00a013, 491\u2013518 (2012)","journal-title":"Journal of Machine Learning Research"},{"key":"17_CR15","doi-asserted-by":"crossref","unstructured":"Rzeszotarski, J.M., Kittur, A.: Instrumenting the crowd: Using implicit behavioral measures to predict task performance. In: Proceedings of the 24th Annual ACM Symposium on User Interface Software and Technology, UIST 2011, pp. 13\u201322 (2011)","DOI":"10.1145\/2047196.2047199"},{"key":"17_CR16","doi-asserted-by":"publisher","first-page":"697","DOI":"10.1016\/S0306-4573(00)00010-8","volume":"36","author":"E.M. Voorhees","year":"2000","unstructured":"Voorhees, E.M.: Variations in relevance judgments and the measurement of retrieval effectiveness. Information Processing and Management\u00a036, 697\u2013716 (2000)","journal-title":"Information Processing and Management"},{"key":"17_CR17","doi-asserted-by":"publisher","first-page":"2256","DOI":"10.1016\/j.patcog.2013.01.035","volume":"46","author":"I. Pillai","year":"2013","unstructured":"Pillai, I., Fumera, G., Roli, F.: Multi-label classification with a reject option. Pattern Recognition\u00a046, 2256\u20132266 (2013)","journal-title":"Pattern Recognition"},{"key":"17_CR18","unstructured":"Buckley, C., Lease, M., Smucker, M.D.: Overview of the TREC 2010 Relevance Feedback Track (Notebook). In: 19th Text Retrieval Conference, TREC (2010)"}],"container-title":["Lecture Notes in Computer Science","Advances in Information Retrieval"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-16354-3_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,8]],"date-time":"2023-08-08T20:39:20Z","timestamp":1691527160000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-319-16354-3_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015]]},"ISBN":["9783319163536","9783319163543"],"references-count":18,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-16354-3_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2015]]}}}