[Docs] Add rankEval method for Jva HL client

This change adds documentation about using the ranking evaluation API
from the high level Java REST client.

Closes #28694
This commit is contained in:
Christoph Büscher 2018-04-10 10:15:31 +02:00
parent 7c56cc2624
commit 124fecd221
3 changed files with 175 additions and 0 deletions

View File

@ -37,6 +37,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -44,6 +45,16 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.rankeval.EvalQueryQuality;
import org.elasticsearch.index.rankeval.EvaluationMetric;
import org.elasticsearch.index.rankeval.MetricDetail;
import org.elasticsearch.index.rankeval.PrecisionAtK;
import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.index.rankeval.RankEvalResponse;
import org.elasticsearch.index.rankeval.RankEvalSpec;
import org.elasticsearch.index.rankeval.RatedDocument;
import org.elasticsearch.index.rankeval.RatedRequest;
import org.elasticsearch.index.rankeval.RatedSearchHit;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.Scroll; import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
@ -74,6 +85,7 @@ import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestion;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -688,6 +700,78 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
} }
} }
public void testRankEval() throws Exception {
indexSearchTestData();
RestHighLevelClient client = highLevelClient();
{
// tag::rank-eval-request-basic
EvaluationMetric metric = new PrecisionAtK(); // <1>
List<RatedDocument> ratedDocs = new ArrayList<>();
ratedDocs.add(new RatedDocument("posts", "1", 1)); // <2>
SearchSourceBuilder searchQuery = new SearchSourceBuilder();
searchQuery.query(QueryBuilders.matchQuery("user", "kimchy"));// <3>
RatedRequest ratedRequest = // <4>
new RatedRequest("kimchy_query", ratedDocs, searchQuery);
List<RatedRequest> ratedRequests = Arrays.asList(ratedRequest);
RankEvalSpec specification =
new RankEvalSpec(ratedRequests, metric); // <5>
RankEvalRequest request = // <6>
new RankEvalRequest(specification, new String[] { "posts" });
// end::rank-eval-request-basic
// tag::rank-eval-execute
RankEvalResponse response = client.rankEval(request);
// end::rank-eval-execute
logger.warn(Strings.toString(response));
// tag::rank-eval-response
double evaluationResult = response.getEvaluationResult(); // <1>
assertEquals(1.0 / 3.0, evaluationResult, 0.0);
Map<String, EvalQueryQuality> partialResults =
response.getPartialResults();
EvalQueryQuality evalQuality =
partialResults.get("kimchy_query"); // <2>
assertEquals("kimchy_query", evalQuality.getId());
double qualityLevel = evalQuality.getQualityLevel(); // <3>
assertEquals(1.0 / 3.0, qualityLevel, 0.0);
List<RatedSearchHit> hitsAndRatings = evalQuality.getHitsAndRatings();
RatedSearchHit ratedSearchHit = hitsAndRatings.get(0);
assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4>
assertFalse(ratedSearchHit.getRating().isPresent()); // <5>
MetricDetail metricDetails = evalQuality.getMetricDetails();
String metricName = metricDetails.getMetricName();
assertEquals(PrecisionAtK.NAME, metricName); // <6>
PrecisionAtK.Detail detail = (PrecisionAtK.Detail) metricDetails;
assertEquals(1, detail.getRelevantRetrieved()); // <7>
assertEquals(3, detail.getRetrieved());
// end::rank-eval-response
// tag::rank-eval-execute-listener
ActionListener<RankEvalResponse> listener = new ActionListener<RankEvalResponse>() {
@Override
public void onResponse(RankEvalResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::rank-eval-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::rank-eval-execute-async
client.rankEvalAsync(request, listener); // <1>
// end::rank-eval-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testMultiSearch() throws Exception { public void testMultiSearch() throws Exception {
indexSearchTestData(); indexSearchTestData();
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();

View File

@ -0,0 +1,89 @@
[[java-rest-high-rank-eval]]
=== Ranking Evaluation API
The `rankEval` method allows to evaluate the quality of ranked search
results over a set of search request. Given sets of manually rated
documents for each search request, ranking evaluation performs a
<<java-rest-high-multi-search,multi search>> request and calculates
information retrieval metrics like _mean reciprocal rank_, _precision_
or _discounted cumulative gain_ on the returned results.
[[java-rest-high-rank-eval-request]]
==== Ranking Evaluation Request
In order to build a `RankEvalRequest`, you first need to create an
evaluation specification (`RankEvalSpec`). This specification requires
to define the evaluation metric that is going to be calculated, as well
as a list of rated documents per search requests. Creating the ranking
evaluation request then takes the specification and a list of target
indices as arguments:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-request-basic]
--------------------------------------------------
<1> Define the metric used in the evaluation
<2> Add rated documents, specified by index name, id and rating
<3> Create the search query to evaluate
<4> Combine the three former parts into a `RatedRequest`
<5> Create the ranking evaluation specification
<6> Create the ranking evaluation request
[[java-rest-high-rank-eval-sync]]
==== Synchronous Execution
The `rankEval` method executes `RankEvalRequest`s synchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute]
--------------------------------------------------
[[java-rest-high-rank-eval-async]]
==== Asynchronous Execution
The `rankEvalAsync` method executes `RankEvalRequest`s asynchronously,
calling the provided `ActionListener` when the response is ready.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-async]
--------------------------------------------------
<1> The `RankEvalRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `RankEvalResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed.
<2> Called when the whole `RankEvalRequest` fails.
==== RankEvalResponse
The `RankEvalResponse` that is returned by executing the request
contains information about the overall evaluation score, the
scores of each individual search request in the set of queries and
detailed information about search hits and details about the metric
calculation per partial result.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-response]
--------------------------------------------------
<1> The overall evaluation result
<2> Partial results that are keyed by their query id
<3> The metric score for each partial result
<4> Rated search hits contain a fully fledged `SearchHit`
<5> Rated search hits also contain an `Optional<Interger>` rating that
is not present if the document did not get a rating in the request
<6> Metric details are named after the metric used in the request
<7> After casting to the metric used in the request, the
metric details offers insight into parts of the metric calculation

View File

@ -32,10 +32,12 @@ The Java High Level REST Client supports the following Search APIs:
* <<java-rest-high-search-scroll>> * <<java-rest-high-search-scroll>>
* <<java-rest-high-clear-scroll>> * <<java-rest-high-clear-scroll>>
* <<java-rest-high-multi-search>> * <<java-rest-high-multi-search>>
* <<java-rest-high-rank-eval>>
include::search/search.asciidoc[] include::search/search.asciidoc[]
include::search/scroll.asciidoc[] include::search/scroll.asciidoc[]
include::search/multi-search.asciidoc[] include::search/multi-search.asciidoc[]
include::search/rank-eval.asciidoc[]
== Miscellaneous APIs == Miscellaneous APIs