Merge branch 'feature/rank-eval' into feature/rank-eval_index_type_to_id

This commit is contained in:
Isabel Drost-Fromm 2016-08-09 10:54:14 +02:00
commit 978d5366d2
9 changed files with 492 additions and 110 deletions

View File

@ -0,0 +1,183 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.rankeval;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchHit;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DiscountedCumulativeGainAt extends RankedListQualityMetric {
/** rank position up to which to check results. */
private int position;
/** If set to true, the dcg will be normalized (ndcg) */
private boolean normalize;
/** If set to, this will be the rating for docs the user hasn't supplied an explicit rating for */
private Integer unknownDocRating;
public static final String NAME = "dcg_at_n";
private static final double LOG2 = Math.log(2.0);
public DiscountedCumulativeGainAt(StreamInput in) throws IOException {
position = in.readInt();
normalize = in.readBoolean();
unknownDocRating = in.readOptionalVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(position);
out.writeBoolean(normalize);
out.writeOptionalVInt(unknownDocRating);
}
@Override
public String getWriteableName() {
return NAME;
}
/**
* Initialises position with 10
* */
public DiscountedCumulativeGainAt() {
this.position = 10;
}
/**
* @param position number of top results to check against a given set of relevant results. Must be positive.
*/
public DiscountedCumulativeGainAt(int position) {
if (position <= 0) {
throw new IllegalArgumentException("number of results to check needs to be positive but was " + position);
}
this.position = position;
}
/**
* Return number of search results to check for quality metric.
*/
public int getPosition() {
return this.position;
}
/**
* set number of search results to check for quality metric.
*/
public void setPosition(int position) {
this.position = position;
}
/**
* If set to true, the dcg will be normalized (ndcg)
*/
public void setNormalize(boolean normalize) {
this.normalize = normalize;
}
/**
* check whether this metric computes only dcg or "normalized" ndcg
*/
public boolean getNormalize() {
return this.normalize;
}
/**
* the rating for docs the user hasn't supplied an explicit rating for
*/
public void setUnknownDocRating(int unknownDocRating) {
this.unknownDocRating = unknownDocRating;
}
/**
* check whether this metric computes only dcg or "normalized" ndcg
*/
public Integer getUnknownDocRating() {
return this.unknownDocRating;
}
@Override
public EvalQueryQuality evaluate(SearchHit[] hits, List<RatedDocument> ratedDocs) {
Map<RatedDocumentKey, RatedDocument> ratedDocsByKey = new HashMap<>();
for (RatedDocument doc : ratedDocs) {
ratedDocsByKey.put(doc.getKey(), doc);
}
Collection<RatedDocumentKey> unknownDocIds = new ArrayList<>();
List<Integer> ratings = new ArrayList<>();
for (int i = 0; (i < position && i < hits.length); i++) {
RatedDocumentKey id = new RatedDocumentKey(hits[i].getIndex(), hits[i].getType(), hits[i].getId());
RatedDocument ratedDoc = ratedDocsByKey.get(id);
if (ratedDoc != null) {
ratings.add(ratedDoc.getRating());
} else {
unknownDocIds.add(id);
if (unknownDocRating != null) {
ratings.add(unknownDocRating);
}
}
}
double dcg = computeDCG(ratings);
if (normalize) {
Collections.sort(ratings, Collections.reverseOrder());
double idcg = computeDCG(ratings);
dcg = dcg / idcg;
}
return new EvalQueryQuality(dcg, unknownDocIds);
}
private static double computeDCG(List<Integer> ratings) {
int rank = 1;
double dcg = 0;
for (int rating : ratings) {
dcg += (Math.pow(2, rating) - 1) / ((Math.log(rank + 1) / LOG2));
rank++;
}
return dcg;
}
private static final ParseField SIZE_FIELD = new ParseField("size");
private static final ParseField NORMALIZE_FIELD = new ParseField("normalize");
private static final ParseField UNKNOWN_DOC_RATING_FIELD = new ParseField("unknown_doc_rating");
private static final ObjectParser<DiscountedCumulativeGainAt, ParseFieldMatcherSupplier> PARSER =
new ObjectParser<>("dcg_at", () -> new DiscountedCumulativeGainAt());
static {
PARSER.declareInt(DiscountedCumulativeGainAt::setPosition, SIZE_FIELD);
PARSER.declareBoolean(DiscountedCumulativeGainAt::setNormalize, NORMALIZE_FIELD);
PARSER.declareInt(DiscountedCumulativeGainAt::setUnknownDocRating, UNKNOWN_DOC_RATING_FIELD);
}
public static DiscountedCumulativeGainAt fromXContent(XContentParser parser, ParseFieldMatcherSupplier matcher) {
return PARSER.apply(parser, matcher);
}
}

View File

@ -38,42 +38,64 @@ import java.util.Map;
* Documents of unknown quality - i.e. those that haven't been supplied in the set of annotated documents but have been returned * Documents of unknown quality - i.e. those that haven't been supplied in the set of annotated documents but have been returned
* by the search are not taken into consideration when computing precision at n - they are ignored. * by the search are not taken into consideration when computing precision at n - they are ignored.
* *
* TODO get rid of either this or RankEvalResult
**/ **/
//TODO instead of just returning averages over complete results, think of other statistics, micro avg, macro avg, partial results
public class RankEvalResponse extends ActionResponse implements ToXContent { public class RankEvalResponse extends ActionResponse implements ToXContent {
/**ID of QA specification this result was generated for.*/
private RankEvalResult qualityResult; private String specId;
/**Average precision observed when issuing query intents with this specification.*/
private double qualityLevel;
/**Mapping from intent id to all documents seen for this intent that were not annotated.*/
private Map<String, Collection<RatedDocumentKey>> unknownDocs;
public RankEvalResponse() { public RankEvalResponse() {
} }
@SuppressWarnings("unchecked")
public RankEvalResponse(StreamInput in) throws IOException { public RankEvalResponse(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
this.qualityResult = new RankEvalResult(in); this.specId = in.readString();
this.qualityLevel = in.readDouble();
this.unknownDocs = (Map<String, Collection<RatedDocumentKey>>) in.readGenericValue();
}
public RankEvalResponse(String specId, double qualityLevel, Map<String, Collection<RatedDocumentKey>> unknownDocs) {
this.specId = specId;
this.qualityLevel = qualityLevel;
this.unknownDocs = unknownDocs;
}
public String getSpecId() {
return specId;
}
public double getQualityLevel() {
return qualityLevel;
}
public Map<String, Collection<RatedDocumentKey>> getUnknownDocs() {
return unknownDocs;
}
@Override
public String toString() {
return "RankEvalResult, ID :[" + specId + "], quality: " + qualityLevel + ", unknown docs: " + unknownDocs;
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
qualityResult.writeTo(out); out.writeString(specId);
} out.writeDouble(qualityLevel);
out.writeGenericValue(getUnknownDocs());
public void setRankEvalResult(RankEvalResult result) {
this.qualityResult = result;
}
public RankEvalResult getRankEvalResult() {
return qualityResult;
} }
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("rank_eval"); builder.startObject("rank_eval");
builder.field("spec_id", qualityResult.getSpecId()); builder.field("spec_id", specId);
builder.field("quality_level", qualityResult.getQualityLevel()); builder.field("quality_level", qualityLevel);
builder.startArray("unknown_docs"); builder.startArray("unknown_docs");
Map<String, Collection<RatedDocumentKey>> unknownDocs = qualityResult.getUnknownDocs();
for (String key : unknownDocs.keySet()) { for (String key : unknownDocs.keySet()) {
builder.startObject(); builder.startObject();
builder.field(key, unknownDocs.get(key)); builder.field(key, unknownDocs.get(key));

View File

@ -1,80 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.rankeval;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
/**
* For each precision at n computation the id of the search request specification used to generate search requests is returned
* for reference. In addition the averaged precision and the ids of all documents returned but not found annotated is returned.
* */
// TODO do we need an extra class for this or it RankEvalResponse enough?
// TODO instead of just returning averages over complete results, think of other statistics, micro avg, macro avg, partial results
public class RankEvalResult implements Writeable {
/**ID of QA specification this result was generated for.*/
private String specId;
/**Average precision observed when issuing query intents with this specification.*/
private double qualityLevel;
/**Mapping from intent id to all documents seen for this intent that were not annotated.*/
private Map<String, Collection<RatedDocumentKey>> unknownDocs;
@SuppressWarnings("unchecked")
public RankEvalResult(StreamInput in) throws IOException {
this.specId = in.readString();
this.qualityLevel = in.readDouble();
this.unknownDocs = (Map<String, Collection<RatedDocumentKey>>) in.readGenericValue();
}
public RankEvalResult(String specId, double quality, Map<String, Collection<RatedDocumentKey>> unknownDocs) {
this.specId = specId;
this.qualityLevel = quality;
this.unknownDocs = unknownDocs;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(specId);
out.writeDouble(qualityLevel);
out.writeGenericValue(getUnknownDocs());
}
public String getSpecId() {
return specId;
}
public double getQualityLevel() {
return qualityLevel;
}
public Map<String, Collection<RatedDocumentKey>> getUnknownDocs() {
return unknownDocs;
}
@Override
public String toString() {
return "RankEvalResult, ID :[" + specId + "], quality: " + qualityLevel + ", unknown docs: " + unknownDocs;
}
}

View File

@ -63,6 +63,9 @@ public abstract class RankedListQualityMetric implements NamedWriteable {
case ReciprocalRank.NAME: case ReciprocalRank.NAME:
rc = ReciprocalRank.fromXContent(parser, context); rc = ReciprocalRank.fromXContent(parser, context);
break; break;
case DiscountedCumulativeGainAt.NAME:
rc = DiscountedCumulativeGainAt.fromXContent(parser, context);
break;
default: default:
throw new ParsingException(parser.getTokenLocation(), "[_na] unknown query metric name [{}]", metricName); throw new ParsingException(parser.getTokenLocation(), "[_na] unknown query metric name [{}]", metricName);
} }

View File

@ -105,10 +105,8 @@ public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequ
unknownDocs.put(spec.getSpecId(), queryQuality.getUnknownDocs()); unknownDocs.put(spec.getSpecId(), queryQuality.getUnknownDocs());
} }
RankEvalResponse response = new RankEvalResponse();
// TODO add other statistics like micro/macro avg? // TODO add other statistics like micro/macro avg?
RankEvalResult result = new RankEvalResult(qualityTask.getTaskId(), metric.combine(partialResults), unknownDocs); RankEvalResponse response = new RankEvalResponse(qualityTask.getTaskId(), metric.combine(partialResults), unknownDocs);
response.setRankEvalResult(result);
listener.onResponse(response); listener.onResponse(response);
} }
} }

View File

@ -0,0 +1,124 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.rankeval;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
public class DiscountedCumulativeGainAtTests extends ESTestCase {
/**
* Assuming the docs are ranked in the following order:
*
* rank | rel_rank | 2^(rel_rank) - 1 | log_2(rank + 1) | (2^(rel_rank) - 1) / log_2(rank + 1)
* -------------------------------------------------------------------------------------------
* 1 | 3 | 7.0 | 1.0 | 7.0
* 2 | 2 | 3.0 | 1.5849625007211563 | 1.8927892607143721
* 3 | 3 | 7.0 | 2.0 | 3.5
* 4 | 0 | 0.0 | 2.321928094887362 | 0.0
* 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163
* 6 | 2 | 3.0 | 2.807354922057604 | 1.0686215613240666
*
* dcg = 13.84826362927298 (sum of last column)
*/
public void testDCGAtSix() throws IOException, InterruptedException, ExecutionException {
List<RatedDocument> rated = new ArrayList<>();
int[] relevanceRatings = new int[] { 3, 2, 3, 0, 1, 2 };
InternalSearchHit[] hits = new InternalSearchHit[6];
for (int i = 0; i < 6; i++) {
rated.add(new RatedDocument(new RatedDocumentKey("index", "type", Integer.toString(i)), relevanceRatings[i]));
hits[i] = new InternalSearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap());
hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0)));
}
DiscountedCumulativeGainAt dcg = new DiscountedCumulativeGainAt(6);
assertEquals(13.84826362927298, dcg.evaluate(hits, rated).getQualityLevel(), 0.00001);
/**
* Check with normalization: to get the maximal possible dcg, sort documents by relevance in descending order
*
* rank | rel_rank | 2^(rel_rank) - 1 | log_2(rank + 1) | (2^(rel_rank) - 1) / log_2(rank + 1)
* -------------------------------------------------------------------------------------------
* 1 | 3 | 7.0 | 1.0  | 7.0
* 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202
* 3 | 2 | 3.0 | 2.0  | 1.5
* 4 | 2 | 3.0 | 2.321928094887362  | 1.2920296742201793
* 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163
* 6 | 0 | 0.0 | 2.807354922057604  | 0.0
*
* idcg = 14.595390756454922 (sum of last column)
*/
dcg.setNormalize(true);
assertEquals(13.84826362927298 / 14.595390756454922, dcg.evaluate(hits, rated).getQualityLevel(), 0.00001);
}
/**
* This tests metric when some documents in the search result don't have a rating provided by the user.
*
* rank | rel_rank | 2^(rel_rank) - 1 | log_2(rank + 1) | (2^(rel_rank) - 1) / log_2(rank + 1)
* -------------------------------------------------------------------------------------------
* 1 | 3 | 7.0 | 1.0 | 7.0
* 2 | 2 | 3.0 | 1.5849625007211563 | 1.8927892607143721
* 3 | 3 | 7.0 | 2.0 | 3.5
* 4 | n/a | n/a | n/a | n/a
* 5 | n/a | n/a | n/a | n/a
* 6 | n/a | n/a | n/a | n/a
*
* dcg = 13.84826362927298 (sum of last column)
*/
public void testDCGAtSixMissingRatings() throws IOException, InterruptedException, ExecutionException {
List<RatedDocument> rated = new ArrayList<>();
int[] relevanceRatings = new int[] { 3, 2, 3};
InternalSearchHit[] hits = new InternalSearchHit[6];
for (int i = 0; i < 6; i++) {
if (i < relevanceRatings.length) {
rated.add(new RatedDocument(new RatedDocumentKey("index", "type", Integer.toString(i)), relevanceRatings[i]));
}
hits[i] = new InternalSearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap());
hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0)));
}
DiscountedCumulativeGainAt dcg = new DiscountedCumulativeGainAt(6);
EvalQueryQuality result = dcg.evaluate(hits, rated);
assertEquals(12.392789260714371, result.getQualityLevel(), 0.00001);
assertEquals(3, result.getUnknownDocs().size());
}
public void testParseFromXContent() throws IOException {
String xContent = " {\n"
+ " \"size\": 8,\n"
+ " \"normalize\": true\n"
+ "}";
XContentParser parser = XContentFactory.xContent(xContent).createParser(xContent);
DiscountedCumulativeGainAt dcgAt = DiscountedCumulativeGainAt.fromXContent(parser, () -> ParseFieldMatcher.STRICT);
assertEquals(8, dcgAt.getPosition());
assertEquals(true, dcgAt.getNormalize());
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.rankeval; package org.elasticsearch.index.rankeval;
import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.rankeval.PrecisionAtN;
import org.elasticsearch.index.rankeval.PrecisionAtN.Rating; import org.elasticsearch.index.rankeval.PrecisionAtN.Rating;
import org.elasticsearch.index.rankeval.QuerySpec; import org.elasticsearch.index.rankeval.QuerySpec;
import org.elasticsearch.index.rankeval.RankEvalAction; import org.elasticsearch.index.rankeval.RankEvalAction;
@ -28,7 +27,6 @@ import org.elasticsearch.index.rankeval.RankEvalPlugin;
import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.index.rankeval.RankEvalRequestBuilder; import org.elasticsearch.index.rankeval.RankEvalRequestBuilder;
import org.elasticsearch.index.rankeval.RankEvalResponse; import org.elasticsearch.index.rankeval.RankEvalResponse;
import org.elasticsearch.index.rankeval.RankEvalResult;
import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RankEvalSpec;
import org.elasticsearch.index.rankeval.RatedDocument; import org.elasticsearch.index.rankeval.RatedDocument;
import org.elasticsearch.index.rankeval.RatedDocumentKey; import org.elasticsearch.index.rankeval.RatedDocumentKey;
@ -98,10 +96,9 @@ public class RankEvalRequestTests extends ESIntegTestCase {
builder.setRankEvalSpec(task); builder.setRankEvalSpec(task);
RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet();
RankEvalResult result = response.getRankEvalResult(); assertEquals(specId, response.getSpecId());
assertEquals(specId, result.getSpecId()); assertEquals(1.0, response.getQualityLevel(), Double.MIN_VALUE);
assertEquals(1.0, result.getQualityLevel(), Double.MIN_VALUE); Set<Entry<String, Collection<RatedDocumentKey>>> entrySet = response.getUnknownDocs().entrySet();
Set<Entry<String, Collection<RatedDocumentKey>>> entrySet = result.getUnknownDocs().entrySet();
assertEquals(2, entrySet.size()); assertEquals(2, entrySet.size());
for (Entry<String, Collection<RatedDocumentKey>> entry : entrySet) { for (Entry<String, Collection<RatedDocumentKey>> entry : entrySet) {
if (entry.getKey() == "amsterdam_query") { if (entry.getKey() == "amsterdam_query") {

View File

@ -64,11 +64,17 @@ public class ReciprocalRankTests extends ESTestCase {
int rankAtFirstRelevant = relevantAt + 1; int rankAtFirstRelevant = relevantAt + 1;
EvalQueryQuality evaluation = reciprocalRank.evaluate(hits, ratedDocs); EvalQueryQuality evaluation = reciprocalRank.evaluate(hits, ratedDocs);
assertEquals(1.0 / rankAtFirstRelevant, evaluation.getQualityLevel(), Double.MIN_VALUE); if (rankAtFirstRelevant <= maxRank) {
assertEquals(1.0 / rankAtFirstRelevant, evaluation.getQualityLevel(), Double.MIN_VALUE);
reciprocalRank = new ReciprocalRank(rankAtFirstRelevant - 1); // check that if we lower maxRank by one, we don't find any result and get 0.0 quality level
evaluation = reciprocalRank.evaluate(hits, ratedDocs); reciprocalRank = new ReciprocalRank(rankAtFirstRelevant - 1);
assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE); evaluation = reciprocalRank.evaluate(hits, ratedDocs);
assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE);
} else {
assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE);
}
} }
public void testEvaluationOneRelevantInResults() { public void testEvaluationOneRelevantInResults() {

View File

@ -0,0 +1,129 @@
---
"Response format":
- do:
index:
index: foo
type: bar
id: doc1
body: { "bar": 1 }
- do:
index:
index: foo
type: bar
id: doc2
body: { "bar": 2 }
- do:
index:
index: foo
type: bar
id: doc3
body: { "bar": 3 }
- do:
index:
index: foo
type: bar
id: doc4
body: { "bar": 4 }
- do:
index:
index: foo
type: bar
id: doc5
body: { "bar": 5 }
- do:
index:
index: foo
type: bar
id: doc6
body: { "bar": 6 }
- do:
indices.refresh: {}
- do:
rank_eval:
body: {
"spec_id" : "dcg_qa_queries",
"requests" : [
{
"id": "dcg_query",
"request": { "query": { "match_all" : {}}, "sort" : [ "bar" ] },
"ratings": [
{"key": {"index": "foo", "type": "bar", "doc_id": "doc1"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc2"}, "rating": 2},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc3"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc4"}, "rating": 0},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc5"}, "rating": 1},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc6"}, "rating": 2}]
}
],
"metric" : { "dcg_at_n": { "size": 6}}
}
- match: {rank_eval.spec_id: "dcg_qa_queries"}
- match: {rank_eval.quality_level: 13.84826362927298}
# reverse the order in which the results are returned (less relevant docs first)
- do:
rank_eval:
body: {
"spec_id" : "dcg_qa_queries",
"requests" : [
{
"id": "dcg_query_reverse",
"request": { "query": { "match_all" : {}}, "sort" : [ {"bar" : "desc" }] },
"ratings": [
{"key": {"index": "foo", "type": "bar", "doc_id": "doc1"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc2"}, "rating": 2},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc3"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc4"}, "rating": 0},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc5"}, "rating": 1},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc6"}, "rating": 2}]
},
],
"metric" : { "dcg_at_n": { "size": 6}}
}
- match: {rank_eval.spec_id: "dcg_qa_queries"}
- match: {rank_eval.quality_level: 10.29967439154499}
# if we mix both, we should get the average
- do:
rank_eval:
body: {
"spec_id" : "dcg_qa_queries",
"requests" : [
{
"id": "dcg_query",
"request": { "query": { "match_all" : {}}, "sort" : [ "bar" ] },
"ratings": [
{"key": {"index": "foo", "type": "bar", "doc_id": "doc1"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc2"}, "rating": 2},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc3"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc4"}, "rating": 0},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc5"}, "rating": 1},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc6"}, "rating": 2}]
},
{
"id": "dcg_query_reverse",
"request": { "query": { "match_all" : {}}, "sort" : [ {"bar" : "desc" }] },
"ratings": [
{"key": {"index": "foo", "type": "bar", "doc_id": "doc1"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc2"}, "rating": 2},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc3"}, "rating": 3},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc4"}, "rating": 0},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc5"}, "rating": 1},
{"key": {"index": "foo", "type": "bar", "doc_id": "doc6"}, "rating": 2}]
},
],
"metric" : { "dcg_at_n": { "size": 6}}
}
- match: {rank_eval.spec_id: "dcg_qa_queries"}
- match: {rank_eval.quality_level: 12.073969010408984}