{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,3]],"date-time":"2025-05-03T01:17:34Z","timestamp":1746235054020},"publisher-location":"Cham","reference-count":16,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319566078"},{"type":"electronic","value":"9783319566085"}],"license":[{"start":{"date-parts":[[2017,1,1]],"date-time":"2017-01-01T00:00:00Z","timestamp":1483228800000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017]]},"DOI":"10.1007\/978-3-319-56608-5_19","type":"book-chapter","created":{"date-parts":[[2017,4,7]],"date-time":"2017-04-07T06:25:19Z","timestamp":1491546319000},"page":"239-251","source":"Crossref","is-referenced-by-count":4,"title":["Transitivity, Time Consumption, and Quality of Preference Judgments in Crowdsourcing"],"prefix":"10.1007","author":[{"given":"Kai","family":"Hui","sequence":"first","affiliation":[]},{"given":"Klaus","family":"Berberich","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,4,8]]},"reference":[{"key":"19_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1007\/978-3-642-20161-5_16","volume-title":"Advances in Information Retrieval","author":"O Alonso","year":"2011","unstructured":"Alonso, O., Baeza-Yates, R.: Design and implementation of relevance assessments using crowdsourcing. In: Clough, P., Foley, C., Gurrin, C., Jones, G.J.F., Kraaij, W., Lee, H., Mudoch, V. (eds.) ECIR 2011. LNCS, vol. 6611, pp. 153\u2013164. Springer, Heidelberg (2011). doi: 10.1007\/978-3-642-20161-5_16"},{"unstructured":"Alonso, O., Mizzaro, S.: Can we get rid of TREC assessors? Using mechanical turk for relevance assessment. In: SIGIR 2009 Workshop on the Future of IR Evaluation (2009)","key":"19_CR2"},{"issue":"6","key":"19_CR3","doi-asserted-by":"crossref","first-page":"1053","DOI":"10.1016\/j.ipm.2012.01.004","volume":"48","author":"O Alonso","year":"2012","unstructured":"Alonso, O., Mizzaro, S.: Using crowdsourcing for TREC relevance assessment. Inf. Process. Manag. 48(6), 1053\u20131066 (2012)","journal-title":"Inf. Process. Manag."},{"doi-asserted-by":"crossref","unstructured":"Bashir, M., Anderton, J., Wu, J., Golbus, P.B., Pavlu, V., Aslam, J.A.: A document rating system for preference judgements. In: SIGIR 2013 (2013)","key":"19_CR4","DOI":"10.1145\/2484028.2484170"},{"key":"19_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1007\/978-3-540-78646-7_5","volume-title":"Advances in Information Retrieval","author":"B Carterette","year":"2008","unstructured":"Carterette, B., Bennett, P.N., Chickering, D.M., Dumais, S.T.: Here or there: preference judgments for relevance. In: Macdonald, C., Ounis, I., Plachouras, V., Ruthven, I., White, R.W. (eds.) ECIR 2008. LNCS, vol. 4956, pp. 16\u201327. Springer, Heidelberg (2008). doi: 10.1007\/978-3-540-78646-7_5"},{"doi-asserted-by":"crossref","unstructured":"Cleverdon, C.: The cranfield tests on index language devices. In: Aslib Proceedings, vol. 19 (1967)","key":"19_CR6","DOI":"10.1108\/eb050097"},{"unstructured":"Grady, C., Lease, M.: Crowdsourcing document relevance assessment with mechanical turk. In: NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon\u2019s Mechanical Turk (2010)","key":"19_CR7"},{"unstructured":"Hansson, S.O., Grne-Yanoff, T.: Preferences. In: Zalta, E.N. (ed.) The Stanford Encyclopedia of Philosophy (2012)","key":"19_CR8"},{"key":"19_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"165","DOI":"10.1007\/978-3-642-20161-5_17","volume-title":"Advances in Information Retrieval","author":"G Kazai","year":"2011","unstructured":"Kazai, G.: In search of quality in crowdsourcing for search engine evaluation. In: Clough, P., Foley, C., Gurrin, C., Jones, G.J.F., Kraaij, W., Lee, H., Mudoch, V. (eds.) ECIR 2011. LNCS, vol. 6611, pp. 165\u2013176. Springer, Heidelberg (2011). doi: 10.1007\/978-3-642-20161-5_17"},{"doi-asserted-by":"crossref","unstructured":"Kazai, G., Yilmaz, E., Craswell, N., Tahaghoghi, S.M.: User intent and assessor disagreement in web search evaluation. In: CIKM 2013 (2013)","key":"19_CR10","DOI":"10.1145\/2505515.2505716"},{"doi-asserted-by":"crossref","unstructured":"Moshfeghi, Y., Huertas-Rosero, A.F., Jose, J.M.: Identifying careless workers in crowdsourcing platforms: a game theory approach. In: SIGIR 2016 (2016)","key":"19_CR11","DOI":"10.1145\/2911451.2914756"},{"doi-asserted-by":"crossref","unstructured":"Moshfeghi, Y., Rosero, A.F.H., Jose, J.M.: A game-theory approach for effective crowdsource-based relevance assessment. ACM Trans. Intell. Syst. Technol. 7(4) (2016)","key":"19_CR12","DOI":"10.1145\/2873063"},{"doi-asserted-by":"crossref","unstructured":"Radinsky, K., Ailon, N.: Ranking from pairs and triplets: information quality, evaluation methods and query complexity. In: WSDM 2011 (2011)","key":"19_CR13","DOI":"10.1145\/1935826.1935850"},{"issue":"8","key":"19_CR14","doi-asserted-by":"crossref","first-page":"590","DOI":"10.1002\/(SICI)1097-4571(199012)41:8<590::AID-ASI5>3.0.CO;2-T","volume":"41","author":"ME Rorvig","year":"1990","unstructured":"Rorvig, M.E.: The simple scalability of documents. J. Am. Soc. Inf. Sci. 41(8), 590\u2013598 (1990)","journal-title":"J. Am. Soc. Inf. Sci."},{"issue":"1","key":"19_CR15","doi-asserted-by":"crossref","first-page":"37","DOI":"10.1016\/j.ipm.2010.02.005","volume":"47","author":"R Song","year":"2011","unstructured":"Song, R., Guo, Q., Zhang, R., Xin, G., Wen, J.R., Yu, Y., Hon, H.W.: Select-the-best-ones: a new way to judge relative relevance. Inf. Process. Manag. 47(1), 37\u201352 (2011)","journal-title":"Inf. Process. Manag."},{"unstructured":"Zhu, D., Carterette, B.: An analysis of assessor behavior in crowdsourced preference judgments. In: SIGIR 2010 Workshop on Crowdsourcing for Search Evaluation (2010)","key":"19_CR16"}],"container-title":["Lecture Notes in Computer Science","Advances in Information Retrieval"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-56608-5_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,6,25]],"date-time":"2017-06-25T10:25:15Z","timestamp":1498386315000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-319-56608-5_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017]]},"ISBN":["9783319566078","9783319566085"],"references-count":16,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-56608-5_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2017]]}}}