Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
62c37339b8
|
@ -468,7 +468,18 @@ class BuildPlugin implements Plugin<Project> {
|
|||
File heapdumpDir = new File(project.buildDir, 'heapdump')
|
||||
heapdumpDir.mkdirs()
|
||||
jvmArg '-XX:HeapDumpPath=' + heapdumpDir
|
||||
argLine System.getProperty('tests.jvm.argline')
|
||||
/*
|
||||
* We only want to append -XX:-OmitStackTraceInFastThrow if a flag for OmitStackTraceInFastThrow is not already included in
|
||||
* tests.jvm.argline.
|
||||
*/
|
||||
final String testsJvmArgline = System.getProperty('tests.jvm.argline')
|
||||
if (testsJvmArgline == null) {
|
||||
argLine '-XX:-OmitStackTraceInFastThrow'
|
||||
} else if (testsJvmArgline.indexOf("OmitStackTraceInFastThrow") < 0) {
|
||||
argLine testsJvmArgline.trim() + ' ' + '-XX:-OmitStackTraceInFastThrow'
|
||||
} else {
|
||||
argLine testsJvmArgline.trim()
|
||||
}
|
||||
|
||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||
systemProperty 'java.io.tmpdir', './temp'
|
||||
|
|
|
@ -147,7 +147,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchPhaseController.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]DelegatingActionListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ToXContentToBytes.java" checks="LineLength" />
|
||||
|
@ -454,7 +453,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineRequestParsingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]RemoteClusterServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
|
||||
|
|
|
@ -0,0 +1,453 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilders;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.index.query.GeoShapeQueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boostingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.disMaxQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.regexpQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanContainingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanFirstQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanMultiTermQueryBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
|
||||
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction;
|
||||
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
|
||||
|
||||
/**
|
||||
* Examples of using the transport client that are imported into the transport client documentation.
|
||||
* There are no assertions here because we're mostly concerned with making sure that the examples
|
||||
* compile and don't throw weird runtime exceptions. Assertions and example data would be nice, but
|
||||
* that is secondary.
|
||||
*/
|
||||
public class QueryDSLDocumentationTests extends ESTestCase {
|
||||
public void testBool() {
|
||||
// tag::bool
|
||||
boolQuery()
|
||||
.must(termQuery("content", "test1")) // <1>
|
||||
.must(termQuery("content", "test4")) // <1>
|
||||
.mustNot(termQuery("content", "test2")) // <2>
|
||||
.should(termQuery("content", "test3")) // <3>
|
||||
.filter(termQuery("content", "test5")); // <4>
|
||||
// end::bool
|
||||
}
|
||||
|
||||
public void testBoosting() {
|
||||
// tag::boosting
|
||||
boostingQuery(
|
||||
termQuery("name","kimchy"), // <1>
|
||||
termQuery("name","dadoonet")) // <2>
|
||||
.negativeBoost(0.2f); // <3>
|
||||
// end::boosting
|
||||
}
|
||||
|
||||
public void testCommonTerms() {
|
||||
// tag::common_terms
|
||||
commonTermsQuery("name", // <1>
|
||||
"kimchy"); // <2>
|
||||
// end::common_terms
|
||||
}
|
||||
|
||||
public void testConstantScore() {
|
||||
// tag::constant_score
|
||||
constantScoreQuery(
|
||||
termQuery("name","kimchy")) // <1>
|
||||
.boost(2.0f); // <2>
|
||||
// end::constant_score
|
||||
}
|
||||
|
||||
public void testDisMax() {
|
||||
// tag::dis_max
|
||||
disMaxQuery()
|
||||
.add(termQuery("name", "kimchy")) // <1>
|
||||
.add(termQuery("name", "elasticsearch")) // <2>
|
||||
.boost(1.2f) // <3>
|
||||
.tieBreaker(0.7f); // <4>
|
||||
// end::dis_max
|
||||
}
|
||||
|
||||
public void testExists() {
|
||||
// tag::exists
|
||||
existsQuery("name"); // <1>
|
||||
// end::exists
|
||||
}
|
||||
|
||||
public void testFunctionScore() {
|
||||
// tag::function_score
|
||||
FilterFunctionBuilder[] functions = {
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
matchQuery("name", "kimchy"), // <1>
|
||||
randomFunction("ABCDEF")), // <2>
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
exponentialDecayFunction("age", 0L, 1L)) // <3>
|
||||
};
|
||||
functionScoreQuery(functions);
|
||||
// end::function_score
|
||||
}
|
||||
|
||||
public void testFuzzy() {
|
||||
// tag::fuzzy
|
||||
fuzzyQuery(
|
||||
"name", // <1>
|
||||
"kimchy"); // <2>
|
||||
// end::fuzzy
|
||||
}
|
||||
|
||||
public void testGeoBoundingBox() {
|
||||
// tag::geo_bounding_box
|
||||
geoBoundingBoxQuery("pin.location") // <1>
|
||||
.setCorners(40.73, -74.1, // <2>
|
||||
40.717, -73.99); // <3>
|
||||
// end::geo_bounding_box
|
||||
}
|
||||
|
||||
public void testGeoDistance() {
|
||||
// tag::geo_distance
|
||||
geoDistanceQuery("pin.location") // <1>
|
||||
.point(40, -70) // <2>
|
||||
.distance(200, DistanceUnit.KILOMETERS); // <3>
|
||||
// end::geo_distance
|
||||
}
|
||||
|
||||
public void testGeoPolygon() {
|
||||
// tag::geo_polygon
|
||||
List<GeoPoint> points = new ArrayList<GeoPoint>(); // <1>
|
||||
points.add(new GeoPoint(40, -70));
|
||||
points.add(new GeoPoint(30, -80));
|
||||
points.add(new GeoPoint(20, -90));
|
||||
geoPolygonQuery("pin.location", points); // <2>
|
||||
// end::geo_polygon
|
||||
}
|
||||
|
||||
public void testGeoShape() throws IOException {
|
||||
{
|
||||
// tag::geo_shape
|
||||
GeoShapeQueryBuilder qb = geoShapeQuery(
|
||||
"pin.location", // <1>
|
||||
ShapeBuilders.newMultiPoint( // <2>
|
||||
new CoordinatesBuilder()
|
||||
.coordinate(0, 0)
|
||||
.coordinate(0, 10)
|
||||
.coordinate(10, 10)
|
||||
.coordinate(10, 0)
|
||||
.coordinate(0, 0)
|
||||
.build()));
|
||||
qb.relation(ShapeRelation.WITHIN); // <3>
|
||||
// end::geo_shape
|
||||
}
|
||||
|
||||
{
|
||||
// tag::indexed_geo_shape
|
||||
// Using pre-indexed shapes
|
||||
GeoShapeQueryBuilder qb = geoShapeQuery(
|
||||
"pin.location", // <1>
|
||||
"DEU", // <2>
|
||||
"countries"); // <3>
|
||||
qb.relation(ShapeRelation.WITHIN) // <4>
|
||||
.indexedShapeIndex("shapes") // <5>
|
||||
.indexedShapePath("location"); // <6>
|
||||
// end::indexed_geo_shape
|
||||
}
|
||||
}
|
||||
|
||||
public void testHasChild() {
|
||||
// tag::has_child
|
||||
hasChildQuery(
|
||||
"blog_tag", // <1>
|
||||
termQuery("tag","something"), // <2>
|
||||
ScoreMode.None); // <3>
|
||||
// end::has_child
|
||||
}
|
||||
|
||||
public void testHasParent() {
|
||||
// tag::has_parent
|
||||
hasParentQuery(
|
||||
"blog", // <1>
|
||||
termQuery("tag","something"), // <2>
|
||||
false); // <3>
|
||||
// end::has_parent
|
||||
}
|
||||
|
||||
public void testIds() {
|
||||
// tag::ids
|
||||
idsQuery("my_type", "type2")
|
||||
.addIds("1", "4", "100");
|
||||
|
||||
idsQuery() // <1>
|
||||
.addIds("1", "4", "100");
|
||||
// end::ids
|
||||
}
|
||||
|
||||
public void testMatchAll() {
|
||||
// tag::match_all
|
||||
matchAllQuery();
|
||||
// end::match_all
|
||||
}
|
||||
|
||||
public void testMatch() {
|
||||
// tag::match
|
||||
matchQuery(
|
||||
"name", // <1>
|
||||
"kimchy elasticsearch"); // <2>
|
||||
// end::match
|
||||
}
|
||||
|
||||
public void testMoreLikeThis() {
|
||||
// tag::more_like_this
|
||||
String[] fields = {"name.first", "name.last"}; // <1>
|
||||
String[] texts = {"text like this one"}; // <2>
|
||||
|
||||
moreLikeThisQuery(fields, texts, null)
|
||||
.minTermFreq(1) // <3>
|
||||
.maxQueryTerms(12); // <4>
|
||||
// end::more_like_this
|
||||
}
|
||||
|
||||
public void testMultiMatch() {
|
||||
// tag::multi_match
|
||||
multiMatchQuery(
|
||||
"kimchy elasticsearch", // <1>
|
||||
"user", "message"); // <2>
|
||||
// end::multi_match
|
||||
}
|
||||
|
||||
public void testNested() {
|
||||
// tag::nested
|
||||
nestedQuery(
|
||||
"obj1", // <1>
|
||||
boolQuery() // <2>
|
||||
.must(matchQuery("obj1.name", "blue"))
|
||||
.must(rangeQuery("obj1.count").gt(5)),
|
||||
ScoreMode.Avg); // <3>
|
||||
// end::nested
|
||||
}
|
||||
|
||||
public void testPrefix() {
|
||||
// tag::prefix
|
||||
prefixQuery(
|
||||
"brand", // <1>
|
||||
"heine"); // <2>
|
||||
// end::prefix
|
||||
}
|
||||
|
||||
public void testQueryString() {
|
||||
// tag::query_string
|
||||
queryStringQuery("+kimchy -elasticsearch");
|
||||
// end::query_string
|
||||
}
|
||||
|
||||
public void testRange() {
|
||||
// tag::range
|
||||
rangeQuery("price") // <1>
|
||||
.from(5) // <2>
|
||||
.to(10) // <3>
|
||||
.includeLower(true) // <4>
|
||||
.includeUpper(false); // <5>
|
||||
// end::range
|
||||
|
||||
// tag::range_simplified
|
||||
// A simplified form using gte, gt, lt or lte
|
||||
rangeQuery("age") // <1>
|
||||
.gte("10") // <2>
|
||||
.lt("20"); // <3>
|
||||
// end::range_simplified
|
||||
}
|
||||
|
||||
public void testRegExp() {
|
||||
// tag::regexp
|
||||
regexpQuery(
|
||||
"name.first", // <1>
|
||||
"s.*y"); // <2>
|
||||
// end::regexp
|
||||
}
|
||||
|
||||
public void testScript() {
|
||||
// tag::script_inline
|
||||
scriptQuery(
|
||||
new Script("doc['num1'].value > 1") // <1>
|
||||
);
|
||||
// end::script_inline
|
||||
|
||||
// tag::script_file
|
||||
Map<String, Object> parameters = new HashMap<>();
|
||||
parameters.put("param1", 5);
|
||||
scriptQuery(new Script(
|
||||
ScriptType.FILE, // <1>
|
||||
"painless", // <2>
|
||||
"myscript", // <3>
|
||||
singletonMap("param1", 5))); // <4>
|
||||
// end::script_file
|
||||
}
|
||||
|
||||
public void testSimpleQueryString() {
|
||||
// tag::simple_query_string
|
||||
simpleQueryStringQuery("+kimchy -elasticsearch");
|
||||
// end::simple_query_string
|
||||
}
|
||||
|
||||
public void testSpanContaining() {
|
||||
// tag::span_containing
|
||||
spanContainingQuery(
|
||||
spanNearQuery(spanTermQuery("field1","bar"), 5) // <1>
|
||||
.addClause(spanTermQuery("field1","baz"))
|
||||
.inOrder(true),
|
||||
spanTermQuery("field1","foo")); // <2>
|
||||
// end::span_containing
|
||||
}
|
||||
|
||||
public void testSpanFirst() {
|
||||
// tag::span_first
|
||||
spanFirstQuery(
|
||||
spanTermQuery("user", "kimchy"), // <1>
|
||||
3 // <2>
|
||||
);
|
||||
// end::span_first
|
||||
}
|
||||
|
||||
public void testSpanMultiTerm() {
|
||||
// tag::span_multi
|
||||
spanMultiTermQueryBuilder(
|
||||
prefixQuery("user", "ki")); // <1>
|
||||
// end::span_multi
|
||||
}
|
||||
|
||||
public void testSpanNear() {
|
||||
// tag::span_near
|
||||
spanNearQuery(
|
||||
spanTermQuery("field","value1"), // <1>
|
||||
12) // <2>
|
||||
.addClause(spanTermQuery("field","value2")) // <1>
|
||||
.addClause(spanTermQuery("field","value3")) // <1>
|
||||
.inOrder(false); // <3>
|
||||
// end::span_near
|
||||
}
|
||||
|
||||
public void testSpanNot() {
|
||||
// tag::span_not
|
||||
spanNotQuery(
|
||||
spanTermQuery("field","value1"), // <1>
|
||||
spanTermQuery("field","value2")); // <2>
|
||||
// end::span_not
|
||||
}
|
||||
|
||||
public void testSpanOr() {
|
||||
// tag::span_or
|
||||
spanOrQuery(spanTermQuery("field","value1")) // <1>
|
||||
.addClause(spanTermQuery("field","value2")) // <1>
|
||||
.addClause(spanTermQuery("field","value3")); // <1>
|
||||
// end::span_or
|
||||
}
|
||||
|
||||
public void testSpanTerm() {
|
||||
// tag::span_term
|
||||
spanTermQuery(
|
||||
"user", // <1>
|
||||
"kimchy"); // <2>
|
||||
// end::span_term
|
||||
}
|
||||
|
||||
public void testSpanWithin() {
|
||||
// tag::span_within
|
||||
spanWithinQuery(
|
||||
spanNearQuery(spanTermQuery("field1", "bar"), 5) // <1>
|
||||
.addClause(spanTermQuery("field1", "baz"))
|
||||
.inOrder(true),
|
||||
spanTermQuery("field1", "foo")); // <2>
|
||||
// end::span_within
|
||||
}
|
||||
|
||||
public void testTerm() {
|
||||
// tag::term
|
||||
termQuery(
|
||||
"name", // <1>
|
||||
"kimchy"); // <2>
|
||||
// end::term
|
||||
}
|
||||
|
||||
public void testTerms() {
|
||||
// tag::terms
|
||||
termsQuery("tags", // <1>
|
||||
"blue", "pill"); // <2>
|
||||
// end::terms
|
||||
}
|
||||
|
||||
public void testType() {
|
||||
// tag::type
|
||||
typeQuery("my_type"); // <1>
|
||||
// end::type
|
||||
}
|
||||
|
||||
public void testWildcard() {
|
||||
// tag::wildcard
|
||||
wildcardQuery(
|
||||
"user", // <1>
|
||||
"k?mch*"); // <2>
|
||||
// end::wildcard
|
||||
}
|
||||
}
|
|
@ -19,18 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.action.support.PlainListenableActionFuture;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> {
|
||||
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> {
|
||||
|
||||
protected final Action<Request, Response, RequestBuilder> action;
|
||||
protected final Request request;
|
||||
private final ThreadPool threadPool;
|
||||
protected final ElasticsearchClient client;
|
||||
|
||||
protected ActionRequestBuilder(ElasticsearchClient client, Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
|
@ -38,18 +36,14 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
|||
this.action = action;
|
||||
this.request = request;
|
||||
this.client = client;
|
||||
threadPool = client.threadPool();
|
||||
}
|
||||
|
||||
|
||||
public Request request() {
|
||||
return this.request;
|
||||
}
|
||||
|
||||
public ListenableActionFuture<Response> execute() {
|
||||
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
||||
execute(future);
|
||||
return future;
|
||||
public ActionFuture<Response> execute() {
|
||||
return client.execute(action, request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -74,13 +68,6 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
|||
}
|
||||
|
||||
public void execute(ActionListener<Response> listener) {
|
||||
client.execute(action, beforeExecute(request), listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* A callback to additionally process the request before its executed
|
||||
*/
|
||||
protected Request beforeExecute(Request request) {
|
||||
return request;
|
||||
client.execute(action, request, listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.search.RemoteConnectionInfo;
|
||||
import org.elasticsearch.transport.RemoteConnectionInfo;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.RemoteClusterService;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
|
@ -30,8 +30,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public final class TransportRemoteInfoAction extends HandledTransportAction<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
|
|
@ -37,7 +37,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
|
||||
public CloseIndexRequest() {
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
public class OpenIndexRequest extends AcknowledgedRequest<OpenIndexRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, false, true);
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, false, true);
|
||||
|
||||
public OpenIndexRequest() {
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -155,7 +154,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void processAsyncFetch(ShardId shardId, List<NodeGatewayStartedShards> responses, List<FailedNodeException> failures) {
|
||||
protected synchronized void processAsyncFetch(List<NodeGatewayStartedShards> responses, List<FailedNodeException> failures, long fetchingRound) {
|
||||
fetchResponses.add(new Response(shardId, responses, failures));
|
||||
if (expectedOps.countDown()) {
|
||||
finish();
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
|
@ -44,7 +43,7 @@ import java.util.Map;
|
|||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> extends InitialSearchPhase<Result>
|
||||
|
@ -58,7 +57,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
/**
|
||||
* Used by subclasses to resolve node ids to DiscoveryNodes.
|
||||
**/
|
||||
private final Function<String, Transport.Connection> nodeIdToConnection;
|
||||
private final BiFunction<String, String, Transport.Connection> nodeIdToConnection;
|
||||
private final SearchTask task;
|
||||
private final SearchPhaseResults<Result> results;
|
||||
private final long clusterStateVersion;
|
||||
|
@ -71,7 +70,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
|
||||
|
||||
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
BiFunction<String, String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
|
@ -210,7 +209,7 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
results.getSuccessfulResults().forEach((entry) -> {
|
||||
try {
|
||||
SearchShardTarget searchShardTarget = entry.getSearchShardTarget();
|
||||
Transport.Connection connection = nodeIdToConnection.apply(searchShardTarget.getNodeId());
|
||||
Transport.Connection connection = getConnection(null, searchShardTarget.getNodeId());
|
||||
sendReleaseSearchContext(entry.getRequestId(), connection, searchShardTarget.getOriginalIndices());
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(exception);
|
||||
|
@ -273,8 +272,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
}
|
||||
|
||||
@Override
|
||||
public final Transport.Connection getConnection(String nodeId) {
|
||||
return nodeIdToConnection.apply(nodeId);
|
||||
public final Transport.Connection getConnection(String clusterAlias, String nodeId) {
|
||||
return nodeIdToConnection.apply(clusterAlias, nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -297,10 +296,10 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
|||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard) {
|
||||
AliasFilter filter = aliasFilter.get(shard.index().getUUID());
|
||||
public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt) {
|
||||
AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID());
|
||||
assert filter != null;
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(),
|
||||
filter, indexBoost, timeProvider.getAbsoluteStartMillis());
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ final class DfsQueryPhase extends SearchPhase {
|
|||
() -> context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)), context);
|
||||
for (final DfsSearchResult dfsResult : resultList) {
|
||||
final SearchShardTarget searchShardTarget = dfsResult.getSearchShardTarget();
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(searchShardTarget.getOriginalIndices(),
|
||||
dfsResult.getRequestId(), dfs);
|
||||
final int shardIndex = dfsResult.getShardIndex();
|
||||
|
|
|
@ -136,7 +136,8 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
counter.countDown();
|
||||
} else {
|
||||
SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget();
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(),
|
||||
searchShardTarget.getNodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().getRequestId(), i, entry,
|
||||
lastEmittedDocPerShard, searchShardTarget.getOriginalIndices());
|
||||
executeFetch(i, searchShardTarget, counter, fetchSearchRequest, queryResult.queryResult(),
|
||||
|
@ -191,7 +192,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
if (context.getRequest().scroll() == null && queryResult.hasSearchContext()) {
|
||||
try {
|
||||
SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget();
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId());
|
||||
context.sendReleaseSearchContext(queryResult.getRequestId(), connection, searchShardTarget.getOriginalIndices());
|
||||
} catch (Exception e) {
|
||||
context.getLogger().trace("failed to release context", e);
|
||||
|
|
|
@ -67,7 +67,8 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
final SearchShardIterator shardIt, Exception e) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId(), shardIt.getOriginalIndices());
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId(), shardIt.getClusterAlias(),
|
||||
shardIt.getOriginalIndices());
|
||||
onShardFailure(shardIndex, shardTarget, e);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
|
@ -144,7 +145,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
|
|||
} else {
|
||||
try {
|
||||
executePhaseOnShard(shardIt, shard, new SearchActionListener<FirstResult>(new SearchShardTarget(shard.currentNodeId(),
|
||||
shardIt.shardId(), shardIt.getOriginalIndices()), shardIndex) {
|
||||
shardIt.shardId(), shardIt.getClusterAlias(), shardIt.getOriginalIndices()), shardIndex) {
|
||||
@Override
|
||||
public void innerOnResponse(FirstResult result) {
|
||||
onShardResult(result, shardIt);
|
||||
|
|
|
@ -29,62 +29,33 @@ import org.elasticsearch.transport.Transport;
|
|||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(
|
||||
final Logger logger,
|
||||
final SearchTransportService searchTransportService,
|
||||
final Function<String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final SearchRequest request,
|
||||
final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
final long clusterStateVersion,
|
||||
final SearchTask task) {
|
||||
super(
|
||||
"dfs",
|
||||
logger,
|
||||
searchTransportService,
|
||||
nodeIdToConnection,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
executor,
|
||||
request,
|
||||
listener,
|
||||
shardsIts,
|
||||
timeProvider,
|
||||
clusterStateVersion,
|
||||
task,
|
||||
new SearchPhaseResults<>(shardsIts.size()));
|
||||
SearchDfsQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
|
||||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final SearchRequest request, final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
final long clusterStateVersion, final SearchTask task) {
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
|
||||
shardsIts, timeProvider, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void executePhaseOnShard(
|
||||
final SearchShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final SearchActionListener<DfsSearchResult> listener) {
|
||||
getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard) , getTask(), listener);
|
||||
protected void executePhaseOnShard(final SearchShardIterator shardIt, final ShardRouting shard,
|
||||
final SearchActionListener<DfsSearchResult> listener) {
|
||||
getSearchTransport().sendExecuteDfs(getConnection(shardIt.getClusterAlias(), shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt) , getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<DfsSearchResult> results, final SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(
|
||||
results.results,
|
||||
searchPhaseController,
|
||||
(queryResults) ->
|
||||
new FetchSearchPhase(queryResults, searchPhaseController, context),
|
||||
context);
|
||||
protected SearchPhase getNextPhase(final SearchPhaseResults<DfsSearchResult> results, final SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(results.results, searchPhaseController, (queryResults) ->
|
||||
new FetchSearchPhase(queryResults, searchPhaseController, context), context);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.search;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
@ -84,7 +83,7 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
|
|||
* Returns a connection to the node if connected otherwise and {@link org.elasticsearch.transport.ConnectTransportException} will be
|
||||
* thrown.
|
||||
*/
|
||||
Transport.Connection getConnection(String nodeId);
|
||||
Transport.Connection getConnection(String clusterAlias, String nodeId);
|
||||
|
||||
/**
|
||||
* Returns the {@link SearchTransportService} to send shard request to other nodes
|
||||
|
@ -106,7 +105,7 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
|
|||
/**
|
||||
* Builds an request for the initial search phase.
|
||||
*/
|
||||
ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard);
|
||||
ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt);
|
||||
|
||||
/**
|
||||
* Processes the phase transition from on phase to another. This method handles all errors that happen during the initial run execution
|
||||
|
|
|
@ -29,59 +29,31 @@ import org.elasticsearch.transport.Transport;
|
|||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPhaseResult> {
|
||||
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(
|
||||
final Logger logger,
|
||||
final SearchTransportService searchTransportService,
|
||||
final Function<String, Transport.Connection> nodeIdToConnection,
|
||||
final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts,
|
||||
final SearchPhaseController searchPhaseController,
|
||||
final Executor executor,
|
||||
final SearchRequest request,
|
||||
final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts,
|
||||
final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(
|
||||
"query",
|
||||
logger,
|
||||
searchTransportService,
|
||||
nodeIdToConnection,
|
||||
aliasFilter,
|
||||
concreteIndexBoosts,
|
||||
executor,
|
||||
request,
|
||||
listener,
|
||||
shardsIts,
|
||||
timeProvider,
|
||||
clusterStateVersion,
|
||||
task,
|
||||
searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
|
||||
SearchQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService,
|
||||
final BiFunction<String, String, Transport.Connection> nodeIdToConnection, final Map<String, AliasFilter> aliasFilter,
|
||||
final Map<String, Float> concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor,
|
||||
final SearchRequest request, final ActionListener<SearchResponse> listener,
|
||||
final GroupShardsIterator<SearchShardIterator> shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener,
|
||||
shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()));
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
protected void executePhaseOnShard(
|
||||
final SearchShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
getSearchTransport().sendExecuteQuery(
|
||||
getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard),
|
||||
getTask(),
|
||||
listener);
|
||||
protected void executePhaseOnShard(final SearchShardIterator shardIt, final ShardRouting shard,
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
getSearchTransport().sendExecuteQuery(getConnection(shardIt.getClusterAlias(), shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt), getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<SearchPhaseResult> results,
|
||||
final SearchPhaseContext context) {
|
||||
protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, final SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.List;
|
|||
public final class SearchShardIterator extends PlainShardIterator {
|
||||
|
||||
private final OriginalIndices originalIndices;
|
||||
private String clusterAlias;
|
||||
|
||||
/**
|
||||
* Creates a {@link PlainShardIterator} instance that iterates over a subset of the given shards
|
||||
|
@ -41,9 +42,10 @@ public final class SearchShardIterator extends PlainShardIterator {
|
|||
* @param shardId shard id of the group
|
||||
* @param shards shards to iterate
|
||||
*/
|
||||
public SearchShardIterator(ShardId shardId, List<ShardRouting> shards, OriginalIndices originalIndices) {
|
||||
public SearchShardIterator(String clusterAlias, ShardId shardId, List<ShardRouting> shards, OriginalIndices originalIndices) {
|
||||
super(shardId, shards);
|
||||
this.originalIndices = originalIndices;
|
||||
this.clusterAlias = clusterAlias;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,4 +54,8 @@ public final class SearchShardIterator extends PlainShardIterator {
|
|||
public OriginalIndices getOriginalIndices() {
|
||||
return originalIndices;
|
||||
}
|
||||
|
||||
public String getClusterAlias() {
|
||||
return clusterAlias;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,10 +26,9 @@ import org.elasticsearch.action.IndicesRequest;
|
|||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
@ -46,6 +45,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
|
|||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportActionProxy;
|
||||
import org.elasticsearch.transport.TaskAwareTransportRequestHandler;
|
||||
|
@ -62,7 +62,7 @@ import java.util.function.Supplier;
|
|||
* An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
|
||||
* transport.
|
||||
*/
|
||||
public class SearchTransportService extends AbstractLifecycleComponent {
|
||||
public class SearchTransportService extends AbstractComponent {
|
||||
|
||||
public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]";
|
||||
public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]";
|
||||
|
@ -77,19 +77,10 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
|
||||
|
||||
private final TransportService transportService;
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
private final boolean connectToRemote;
|
||||
|
||||
public SearchTransportService(Settings settings, ClusterSettings clusterSettings, TransportService transportService) {
|
||||
public SearchTransportService(Settings settings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.connectToRemote = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings);
|
||||
this.transportService = transportService;
|
||||
this.remoteClusterService = new RemoteClusterService(settings, transportService);
|
||||
if (connectToRemote) {
|
||||
clusterSettings.addAffixUpdateConsumer(RemoteClusterService.REMOTE_CLUSTERS_SEEDS, remoteClusterService::updateRemoteCluster,
|
||||
(namespace, value) -> {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void sendFreeContext(Transport.Connection connection, final long contextId, OriginalIndices originalIndices) {
|
||||
|
@ -183,7 +174,7 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public RemoteClusterService getRemoteClusterService() {
|
||||
return remoteClusterService;
|
||||
return transportService.getRemoteClusterService();
|
||||
}
|
||||
|
||||
static class ScrollFreeContextRequest extends TransportRequest {
|
||||
|
@ -398,23 +389,18 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new);
|
||||
}
|
||||
|
||||
Transport.Connection getConnection(DiscoveryNode node) {
|
||||
return transportService.getConnection(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
if (connectToRemote) {
|
||||
// here we start to connect to the remote clusters
|
||||
remoteClusterService.initializeRemoteClusters();
|
||||
/**
|
||||
* Returns a connection to the given node on the provided cluster. If the cluster alias is <code>null</code> the node will be resolved
|
||||
* against the local cluster.
|
||||
* @param clusterAlias the cluster alias the node should be resolve against
|
||||
* @param node the node to resolve
|
||||
* @return a connection to the given node belonging to the cluster with the provided alias.
|
||||
*/
|
||||
Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) {
|
||||
if (clusterAlias == null) {
|
||||
return transportService.getConnection(node);
|
||||
} else {
|
||||
return transportService.getRemoteClusterService().getConnection(node, clusterAlias);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
remoteClusterService.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ public class ShardSearchFailure implements ShardOperationFailedException {
|
|||
}
|
||||
return new ShardSearchFailure(exception,
|
||||
new SearchShardTarget(nodeId,
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), OriginalIndices.NONE));
|
||||
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), null, OriginalIndices.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.action.search;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -37,22 +39,26 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
|
||||
|
@ -179,7 +185,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
final Map<String, List<String>> groupedIndices = remoteClusterService.groupClusterIndices(searchRequest.indices(),
|
||||
// empty string is not allowed
|
||||
idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
List<String> remove = groupedIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY);
|
||||
List<String> remove = groupedIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
String[] indices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
|
||||
localIndices = new OriginalIndices(indices, searchRequest.indicesOptions());
|
||||
Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
|
||||
|
@ -197,22 +203,68 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
|
||||
if (remoteClusterIndices.isEmpty()) {
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
|
||||
(nodeId) -> null, clusterState, Collections.emptyMap(), listener);
|
||||
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener);
|
||||
} else {
|
||||
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices,
|
||||
ActionListener.wrap((searchShardsResponses) -> {
|
||||
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
|
||||
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
||||
Function<String, Transport.Connection> connectionFunction = remoteClusterService.processRemoteShards(
|
||||
searchShardsResponses, remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
|
||||
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
||||
remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators,
|
||||
connectionFunction, clusterState, remoteAliasFilters, listener);
|
||||
clusterNodeLookup, clusterState, remoteAliasFilters, listener);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
static BiFunction<String, String, DiscoveryNode> processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses,
|
||||
Map<String, OriginalIndices> remoteIndicesByCluster,
|
||||
List<SearchShardIterator> remoteShardIterators,
|
||||
Map<String, AliasFilter> aliasFilterMap) {
|
||||
Map<String, Map<String, DiscoveryNode>> clusterToNode = new HashMap<>();
|
||||
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
|
||||
String clusterAlias = entry.getKey();
|
||||
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
|
||||
HashMap<String, DiscoveryNode> idToDiscoveryNode = new HashMap<>();
|
||||
clusterToNode.put(clusterAlias, idToDiscoveryNode);
|
||||
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
|
||||
idToDiscoveryNode.put(remoteNode.getId(), remoteNode);
|
||||
}
|
||||
Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
|
||||
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
|
||||
//add the cluster name to the remote index names for indices disambiguation
|
||||
//this ends up in the hits returned with the search response
|
||||
ShardId shardId = clusterSearchShardsGroup.getShardId();
|
||||
Index remoteIndex = shardId.getIndex();
|
||||
Index index = new Index(clusterAlias + RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(),
|
||||
remoteIndex.getUUID());
|
||||
OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
|
||||
assert originalIndices != null;
|
||||
SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, new ShardId(index, shardId.getId()),
|
||||
Arrays.asList(clusterSearchShardsGroup.getShards()), originalIndices);
|
||||
remoteShardIterators.add(shardIterator);
|
||||
AliasFilter aliasFilter;
|
||||
if (indicesAndFilters == null) {
|
||||
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||
} else {
|
||||
aliasFilter = indicesAndFilters.get(shardId.getIndexName());
|
||||
assert aliasFilter != null;
|
||||
}
|
||||
// here we have to map the filters to the UUID since from now on we use the uuid for the lookup
|
||||
aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
|
||||
}
|
||||
}
|
||||
return (clusterAlias, nodeId) -> {
|
||||
Map<String, DiscoveryNode> clusterNodes = clusterToNode.get(clusterAlias);
|
||||
if (clusterNodes == null) {
|
||||
throw new IllegalArgumentException("unknown remote cluster: " + clusterAlias);
|
||||
}
|
||||
return clusterNodes.get(nodeId);
|
||||
};
|
||||
}
|
||||
|
||||
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices,
|
||||
List<SearchShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections,
|
||||
List<SearchShardIterator> remoteShardIterators, BiFunction<String, String, DiscoveryNode> remoteConnections,
|
||||
ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
|
||||
|
@ -234,9 +286,10 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
for (int i = 0; i < indices.length; i++) {
|
||||
concreteIndices[i] = indices[i].getName();
|
||||
}
|
||||
GroupShardsIterator<ShardIterator> localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap,
|
||||
searchRequest.preference());
|
||||
GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardsIterator, localIndices, remoteShardIterators);
|
||||
GroupShardsIterator<ShardIterator> localShardsIterator = clusterService.operationRouting().searchShards(clusterState,
|
||||
concreteIndices, routingMap, searchRequest.preference());
|
||||
GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardsIterator, localIndices,
|
||||
remoteShardIterators);
|
||||
|
||||
failIfOverShardCountLimit(clusterService, shardIterators.size());
|
||||
|
||||
|
@ -259,18 +312,12 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
}
|
||||
|
||||
final DiscoveryNodes nodes = clusterState.nodes();
|
||||
Function<String, Transport.Connection> connectionLookup = (nodeId) -> {
|
||||
final DiscoveryNode discoveryNode = nodes.get(nodeId);
|
||||
final Transport.Connection connection;
|
||||
if (discoveryNode != null) {
|
||||
connection = searchTransportService.getConnection(discoveryNode);
|
||||
} else {
|
||||
connection = remoteConnections.apply(nodeId);
|
||||
}
|
||||
if (connection == null) {
|
||||
BiFunction<String, String, Transport.Connection> connectionLookup = (clusterName, nodeId) -> {
|
||||
final DiscoveryNode discoveryNode = clusterName == null ? nodes.get(nodeId) : remoteConnections.apply(clusterName, nodeId);
|
||||
if (discoveryNode == null) {
|
||||
throw new IllegalStateException("no node found for id: " + nodeId);
|
||||
}
|
||||
return connection;
|
||||
return searchTransportService.getConnection(clusterName, discoveryNode);
|
||||
};
|
||||
|
||||
searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(),
|
||||
|
@ -285,7 +332,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
shards.add(shardIterator);
|
||||
}
|
||||
for (ShardIterator shardIterator : localShardsIterator) {
|
||||
shards.add(new SearchShardIterator(shardIterator.shardId(), shardIterator.getShardRoutings(), localIndices));
|
||||
shards.add(new SearchShardIterator(null, shardIterator.shardId(), shardIterator.getShardRoutings(), localIndices));
|
||||
}
|
||||
return new GroupShardsIterator<>(shards);
|
||||
}
|
||||
|
@ -297,7 +344,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
|
||||
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest,
|
||||
GroupShardsIterator<SearchShardIterator> shardIterators,
|
||||
SearchTimeProvider timeProvider, Function<String, Transport.Connection> connectionLookup,
|
||||
SearchTimeProvider timeProvider,
|
||||
BiFunction<String, String, Transport.Connection> connectionLookup,
|
||||
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
|
||||
Map<String, Float> concreteIndexBoosts,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
|
@ -306,13 +354,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider,
|
||||
clusterStateVersion, task);
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
|
||||
timeProvider, clusterStateVersion, task);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider,
|
||||
clusterStateVersion, task);
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators,
|
||||
timeProvider, clusterStateVersion, task);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
|
||||
|
||||
final ThreadPool threadPool;
|
||||
volatile Object listeners;
|
||||
boolean executedListeners = false;
|
||||
|
||||
protected AbstractListenableActionFuture(ThreadPool threadPool) {
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
public ThreadPool threadPool() {
|
||||
return threadPool;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addListener(final ActionListener<T> listener) {
|
||||
internalAddListener(listener);
|
||||
}
|
||||
|
||||
public void internalAddListener(ActionListener<T> listener) {
|
||||
listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener, false);
|
||||
boolean executeImmediate = false;
|
||||
synchronized (this) {
|
||||
if (executedListeners) {
|
||||
executeImmediate = true;
|
||||
} else {
|
||||
Object listeners = this.listeners;
|
||||
if (listeners == null) {
|
||||
listeners = listener;
|
||||
} else if (listeners instanceof List) {
|
||||
((List) this.listeners).add(listener);
|
||||
} else {
|
||||
Object orig = listeners;
|
||||
listeners = new ArrayList<>(2);
|
||||
((List) listeners).add(orig);
|
||||
((List) listeners).add(listener);
|
||||
}
|
||||
this.listeners = listeners;
|
||||
}
|
||||
}
|
||||
if (executeImmediate) {
|
||||
executeListener(listener);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void done() {
|
||||
super.done();
|
||||
synchronized (this) {
|
||||
executedListeners = true;
|
||||
}
|
||||
Object listeners = this.listeners;
|
||||
if (listeners != null) {
|
||||
if (listeners instanceof List) {
|
||||
List list = (List) listeners;
|
||||
for (Object listener : list) {
|
||||
executeListener((ActionListener<T>) listener);
|
||||
}
|
||||
} else {
|
||||
executeListener((ActionListener<T>) listeners);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeListener(final ActionListener<T> listener) {
|
||||
try {
|
||||
// we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread.
|
||||
// here we know we will never block
|
||||
listener.onResponse(actionGet(0));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,17 +19,120 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
public class PlainListenableActionFuture<T> extends AbstractListenableActionFuture<T, T> {
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public PlainListenableActionFuture(ThreadPool threadPool) {
|
||||
super(threadPool);
|
||||
public class PlainListenableActionFuture<T> extends AdapterActionFuture<T, T> implements ListenableActionFuture<T> {
|
||||
|
||||
volatile Object listeners;
|
||||
boolean executedListeners = false;
|
||||
|
||||
private PlainListenableActionFuture() {}
|
||||
|
||||
/**
|
||||
* This method returns a listenable future. The listeners will be called on completion of the future.
|
||||
* The listeners will be executed by the same thread that completes the future.
|
||||
*
|
||||
* @param <T> the result of the future
|
||||
* @return a listenable future
|
||||
*/
|
||||
public static <T> PlainListenableActionFuture<T> newListenableFuture() {
|
||||
return new PlainListenableActionFuture<>();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method returns a listenable future. The listeners will be called on completion of the future.
|
||||
* The listeners will be executed on the LISTENER thread pool.
|
||||
* @param threadPool the thread pool used to execute listeners
|
||||
* @param <T> the result of the future
|
||||
* @return a listenable future
|
||||
*/
|
||||
public static <T> PlainListenableActionFuture<T> newDispatchingListenableFuture(ThreadPool threadPool) {
|
||||
return new DispatchingListenableActionFuture<>(threadPool);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected T convert(T response) {
|
||||
return response;
|
||||
public void addListener(final ActionListener<T> listener) {
|
||||
internalAddListener(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void done() {
|
||||
super.done();
|
||||
synchronized (this) {
|
||||
executedListeners = true;
|
||||
}
|
||||
Object listeners = this.listeners;
|
||||
if (listeners != null) {
|
||||
if (listeners instanceof List) {
|
||||
List list = (List) listeners;
|
||||
for (Object listener : list) {
|
||||
executeListener((ActionListener<T>) listener);
|
||||
}
|
||||
} else {
|
||||
executeListener((ActionListener<T>) listeners);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected T convert(T listenerResponse) {
|
||||
return listenerResponse;
|
||||
}
|
||||
|
||||
private void internalAddListener(ActionListener<T> listener) {
|
||||
boolean executeImmediate = false;
|
||||
synchronized (this) {
|
||||
if (executedListeners) {
|
||||
executeImmediate = true;
|
||||
} else {
|
||||
Object listeners = this.listeners;
|
||||
if (listeners == null) {
|
||||
listeners = listener;
|
||||
} else if (listeners instanceof List) {
|
||||
((List) this.listeners).add(listener);
|
||||
} else {
|
||||
Object orig = listeners;
|
||||
listeners = new ArrayList<>(2);
|
||||
((List) listeners).add(orig);
|
||||
((List) listeners).add(listener);
|
||||
}
|
||||
this.listeners = listeners;
|
||||
}
|
||||
}
|
||||
if (executeImmediate) {
|
||||
executeListener(listener);
|
||||
}
|
||||
}
|
||||
|
||||
private void executeListener(final ActionListener<T> listener) {
|
||||
try {
|
||||
// we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread.
|
||||
// here we know we will never block
|
||||
listener.onResponse(actionGet(0));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class DispatchingListenableActionFuture<T> extends PlainListenableActionFuture<T> {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(DispatchingListenableActionFuture.class);
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private DispatchingListenableActionFuture(ThreadPool threadPool) {
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addListener(final ActionListener<T> listener) {
|
||||
super.addListener(new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener, false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,11 +35,14 @@ import org.elasticsearch.index.analysis.NamedAnalyzer;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.UnaryOperator;
|
||||
|
||||
/**
|
||||
* This service is responsible for upgrading legacy index metadata to the current version
|
||||
|
@ -54,14 +57,23 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
private final NamedXContentRegistry xContentRegistry;
|
||||
private final MapperRegistry mapperRegistry;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
private final UnaryOperator<IndexMetaData> upgraders;
|
||||
|
||||
@Inject
|
||||
public MetaDataIndexUpgradeService(Settings settings, NamedXContentRegistry xContentRegistry, MapperRegistry mapperRegistry,
|
||||
IndexScopedSettings indexScopedSettings) {
|
||||
IndexScopedSettings indexScopedSettings,
|
||||
Collection<UnaryOperator<IndexMetaData>> indexMetaDataUpgraders) {
|
||||
super(settings);
|
||||
this.xContentRegistry = xContentRegistry;
|
||||
this.mapperRegistry = mapperRegistry;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
this.upgraders = indexMetaData -> {
|
||||
IndexMetaData newIndexMetaData = indexMetaData;
|
||||
for (UnaryOperator<IndexMetaData> upgrader : indexMetaDataUpgraders) {
|
||||
newIndexMetaData = upgrader.apply(newIndexMetaData);
|
||||
}
|
||||
return newIndexMetaData;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,6 +96,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
newMetaData = archiveBrokenIndexSettings(newMetaData);
|
||||
// only run the check with the upgraded settings!!
|
||||
checkMappingsCompatibility(newMetaData);
|
||||
// apply plugin checks
|
||||
newMetaData = upgraders.apply(newMetaData);
|
||||
return markAsUpgraded(newMetaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -933,5 +933,4 @@ public abstract class StreamInput extends InputStream {
|
|||
* be a no-op depending on the underlying implementation if the information of the remaining bytes is not present.
|
||||
*/
|
||||
protected abstract void ensureCanReadBytes(int length) throws EOFException;
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
||||
import org.elasticsearch.action.search.RemoteClusterService;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
|
@ -254,7 +255,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
|
||||
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
|
||||
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING,
|
||||
RemoteClusterService.REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS,
|
||||
RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER,
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING,
|
||||
RemoteClusterService.REMOTE_NODE_ATTRIBUTE,
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
||||
public interface DiscoveryNodesProvider {
|
||||
|
||||
DiscoveryNodes nodes();
|
||||
|
||||
}
|
|
@ -20,11 +20,9 @@
|
|||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
|
||||
|
||||
public interface PingContextProvider extends DiscoveryNodesProvider {
|
||||
public interface PingContextProvider {
|
||||
|
||||
/** return the current cluster state of the node */
|
||||
ClusterState clusterState();
|
||||
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.store.AlreadyClosedException;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -111,8 +112,6 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
private final TransportService transportService;
|
||||
private final ClusterName clusterName;
|
||||
|
||||
private final int concurrentConnects;
|
||||
|
||||
private final List<String> configuredHosts;
|
||||
|
||||
private final int limitPortCounts;
|
||||
|
@ -145,7 +144,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.hostsProvider = unicastHostsProvider;
|
||||
|
||||
this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
|
||||
final int concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
|
||||
if (DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) {
|
||||
configuredHosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
|
||||
// we only limit to 1 addresses, makes no sense to ping 100 ports
|
||||
|
@ -308,7 +307,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
seedNodes.addAll(hostsProvider.buildDynamicNodes());
|
||||
final DiscoveryNodes nodes = contextProvider.nodes();
|
||||
final DiscoveryNodes nodes = contextProvider.clusterState().nodes();
|
||||
// add all possible master nodes that were active in the last known cluster configuration
|
||||
for (ObjectCursor<DiscoveryNode> masterNode : nodes.getMasterNodes().values()) {
|
||||
seedNodes.add(masterNode.value);
|
||||
|
@ -459,9 +458,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
final UnicastPingRequest pingRequest = new UnicastPingRequest();
|
||||
pingRequest.id = pingingRound.id();
|
||||
pingRequest.timeout = timeout;
|
||||
DiscoveryNodes discoNodes = contextProvider.nodes();
|
||||
ClusterState lastState = contextProvider.clusterState();
|
||||
|
||||
pingRequest.pingResponse = createPingResponse(discoNodes);
|
||||
pingRequest.pingResponse = createPingResponse(lastState);
|
||||
|
||||
Set<DiscoveryNode> nodesFromResponses = temporalResponses.stream().map(pingResponse -> {
|
||||
assert clusterName.equals(pingResponse.clusterName()) :
|
||||
|
@ -478,7 +477,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
// resolve what we can via the latest cluster state
|
||||
final Set<DiscoveryNode> nodesToPing = uniqueNodesByAddress.values().stream()
|
||||
.map(node -> {
|
||||
DiscoveryNode foundNode = discoNodes.findByAddress(node.getAddress());
|
||||
DiscoveryNode foundNode = lastState.nodes().findByAddress(node.getAddress());
|
||||
if (foundNode == null) {
|
||||
return node;
|
||||
} else {
|
||||
|
@ -596,7 +595,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
() -> temporalResponses.remove(request.pingResponse));
|
||||
|
||||
List<PingResponse> pingResponses = CollectionUtils.iterableAsArrayList(temporalResponses);
|
||||
pingResponses.add(createPingResponse(contextProvider.nodes()));
|
||||
pingResponses.add(createPingResponse(contextProvider.clusterState()));
|
||||
|
||||
UnicastPingResponse unicastPingResponse = new UnicastPingResponse();
|
||||
unicastPingResponse.id = request.id;
|
||||
|
@ -649,8 +648,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
|||
}
|
||||
}
|
||||
|
||||
private PingResponse createPingResponse(DiscoveryNodes discoNodes) {
|
||||
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), contextProvider.clusterState());
|
||||
private PingResponse createPingResponse(ClusterState clusterState) {
|
||||
DiscoveryNodes discoNodes = clusterState.nodes();
|
||||
return new PingResponse(discoNodes.getLocalNode(), discoNodes.getMasterNode(), clusterState);
|
||||
}
|
||||
|
||||
static class UnicastPingResponse extends TransportResponse {
|
||||
|
|
|
@ -258,7 +258,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
masterFD.stop("zen disco stop");
|
||||
nodesFD.stop();
|
||||
Releasables.close(zenPing); // stop any ongoing pinging
|
||||
DiscoveryNodes nodes = nodes();
|
||||
DiscoveryNodes nodes = clusterState().nodes();
|
||||
if (sendLeaveRequest) {
|
||||
if (nodes.getMasterNode() == null) {
|
||||
// if we don't know who the master is, nothing to do here
|
||||
|
@ -290,12 +290,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
IOUtils.close(masterFD, nodesFD);
|
||||
}
|
||||
|
||||
/** start of {@link PingContextProvider } implementation */
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return clusterState().nodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
ClusterState clusterState = state.get();
|
||||
|
@ -303,8 +297,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
return clusterState;
|
||||
}
|
||||
|
||||
/** end of {@link PingContextProvider } implementation */
|
||||
|
||||
@Override
|
||||
public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) {
|
||||
ClusterState newState = clusterChangedEvent.state();
|
||||
|
@ -677,7 +669,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
if (localNodeMaster()) {
|
||||
removeNode(node, "zen-disco-node-left", "left");
|
||||
} else if (node.equals(nodes().getMasterNode())) {
|
||||
} else if (node.equals(clusterState().nodes().getMasterNode())) {
|
||||
handleMasterGone(node, null, "shut_down");
|
||||
}
|
||||
}
|
||||
|
@ -1041,7 +1033,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
|
||||
private boolean localNodeMaster() {
|
||||
return nodes().isLocalNodeElectedMaster();
|
||||
return clusterState().nodes().isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
private void handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) {
|
||||
|
|
|
@ -72,7 +72,7 @@ public interface ZenPing extends Releasable {
|
|||
* @param clusterStateVersion the current cluster state version of that node
|
||||
* ({@link ElectMasterService.MasterCandidate#UNRECOVERED_CLUSTER_VERSION} for not recovered)
|
||||
*/
|
||||
public PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) {
|
||||
PingResponse(DiscoveryNode node, DiscoveryNode master, ClusterName clusterName, long clusterStateVersion) {
|
||||
this.id = idGenerator.incrementAndGet();
|
||||
this.node = node;
|
||||
this.master = master;
|
||||
|
|
|
@ -44,6 +44,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
@ -67,10 +68,11 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
|
||||
protected final Logger logger;
|
||||
protected final String type;
|
||||
private final ShardId shardId;
|
||||
protected final ShardId shardId;
|
||||
private final Lister<BaseNodesResponse<T>, T> action;
|
||||
private final Map<String, NodeEntry<T>> cache = new HashMap<>();
|
||||
private final Set<String> nodesToIgnore = new HashSet<>();
|
||||
private final AtomicLong round = new AtomicLong();
|
||||
private boolean closed;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -112,20 +114,22 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
}
|
||||
nodesToIgnore.addAll(ignoreNodes);
|
||||
fillShardCacheWithDataNodes(cache, nodes);
|
||||
Set<NodeEntry<T>> nodesToFetch = findNodesToFetch(cache);
|
||||
List<NodeEntry<T>> nodesToFetch = findNodesToFetch(cache);
|
||||
if (nodesToFetch.isEmpty() == false) {
|
||||
// mark all node as fetching and go ahead and async fetch them
|
||||
// use a unique round id to detect stale responses in processAsyncFetch
|
||||
final long fetchingRound = round.incrementAndGet();
|
||||
for (NodeEntry<T> nodeEntry : nodesToFetch) {
|
||||
nodeEntry.markAsFetching();
|
||||
nodeEntry.markAsFetching(fetchingRound);
|
||||
}
|
||||
DiscoveryNode[] discoNodesToFetch = nodesToFetch.stream().map(NodeEntry::getNodeId).map(nodes::get)
|
||||
.toArray(DiscoveryNode[]::new);
|
||||
asyncFetch(shardId, discoNodesToFetch);
|
||||
asyncFetch(discoNodesToFetch, fetchingRound);
|
||||
}
|
||||
|
||||
// if we are still fetching, return null to indicate it
|
||||
if (hasAnyNodeFetching(cache)) {
|
||||
return new FetchResult<>(shardId, null, emptySet(), emptySet());
|
||||
return new FetchResult<>(shardId, null, emptySet());
|
||||
} else {
|
||||
// nothing to fetch, yay, build the return value
|
||||
Map<DiscoveryNode, T> fetchData = new HashMap<>();
|
||||
|
@ -158,7 +162,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
if (failedNodes.isEmpty() == false || allIgnoreNodes.isEmpty() == false) {
|
||||
reroute(shardId, "nodes failed [" + failedNodes.size() + "], ignored [" + allIgnoreNodes.size() + "]");
|
||||
}
|
||||
return new FetchResult<>(shardId, fetchData, failedNodes, allIgnoreNodes);
|
||||
return new FetchResult<>(shardId, fetchData, allIgnoreNodes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,7 +172,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
* the shard (response + failures), issuing a reroute at the end of it to make sure there will be another round
|
||||
* of allocations taking this new data into account.
|
||||
*/
|
||||
protected synchronized void processAsyncFetch(ShardId shardId, List<T> responses, List<FailedNodeException> failures) {
|
||||
protected synchronized void processAsyncFetch(List<T> responses, List<FailedNodeException> failures, long fetchingRound) {
|
||||
if (closed) {
|
||||
// we are closed, no need to process this async fetch at all
|
||||
logger.trace("{} ignoring fetched [{}] results, already closed", shardId, type);
|
||||
|
@ -179,15 +183,19 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
if (responses != null) {
|
||||
for (T response : responses) {
|
||||
NodeEntry<T> nodeEntry = cache.get(response.getNode().getId());
|
||||
// if the entry is there, and not marked as failed already, process it
|
||||
if (nodeEntry == null) {
|
||||
continue;
|
||||
}
|
||||
if (nodeEntry.isFailed()) {
|
||||
logger.trace("{} node {} has failed for [{}] (failure [{}])", shardId, nodeEntry.getNodeId(), type, nodeEntry.getFailure());
|
||||
} else {
|
||||
logger.trace("{} marking {} as done for [{}], result is [{}]", shardId, nodeEntry.getNodeId(), type, response);
|
||||
nodeEntry.doneFetching(response);
|
||||
if (nodeEntry != null) {
|
||||
if (nodeEntry.getFetchingRound() != fetchingRound) {
|
||||
assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds";
|
||||
logger.trace("{} received response for [{}] from node {} for an older fetching round (expected: {} but was: {})",
|
||||
shardId, nodeEntry.getNodeId(), type, nodeEntry.getFetchingRound(), fetchingRound);
|
||||
} else if (nodeEntry.isFailed()) {
|
||||
logger.trace("{} node {} has failed for [{}] (failure [{}])", shardId, nodeEntry.getNodeId(), type,
|
||||
nodeEntry.getFailure());
|
||||
} else {
|
||||
// if the entry is there, for the right fetching round and not marked as failed already, process it
|
||||
logger.trace("{} marking {} as done for [{}], result is [{}]", shardId, nodeEntry.getNodeId(), type, response);
|
||||
nodeEntry.doneFetching(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -195,15 +203,24 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
for (FailedNodeException failure : failures) {
|
||||
logger.trace("{} processing failure {} for [{}]", shardId, failure, type);
|
||||
NodeEntry<T> nodeEntry = cache.get(failure.nodeId());
|
||||
// if the entry is there, and not marked as failed already, process it
|
||||
if (nodeEntry != null && nodeEntry.isFailed() == false) {
|
||||
Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause());
|
||||
// if the request got rejected or timed out, we need to try it again next time...
|
||||
if (unwrappedCause instanceof EsRejectedExecutionException || unwrappedCause instanceof ReceiveTimeoutTransportException || unwrappedCause instanceof ElasticsearchTimeoutException) {
|
||||
nodeEntry.restartFetching();
|
||||
} else {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure);
|
||||
nodeEntry.doneFetching(failure.getCause());
|
||||
if (nodeEntry != null) {
|
||||
if (nodeEntry.getFetchingRound() != fetchingRound) {
|
||||
assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds";
|
||||
logger.trace("{} received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})",
|
||||
shardId, nodeEntry.getNodeId(), type, nodeEntry.getFetchingRound(), fetchingRound);
|
||||
} else if (nodeEntry.isFailed() == false) {
|
||||
// if the entry is there, for the right fetching round and not marked as failed already, process it
|
||||
Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause());
|
||||
// if the request got rejected or timed out, we need to try it again next time...
|
||||
if (unwrappedCause instanceof EsRejectedExecutionException ||
|
||||
unwrappedCause instanceof ReceiveTimeoutTransportException ||
|
||||
unwrappedCause instanceof ElasticsearchTimeoutException) {
|
||||
nodeEntry.restartFetching();
|
||||
} else {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]",
|
||||
shardId, type, failure.nodeId()), failure);
|
||||
nodeEntry.doneFetching(failure.getCause());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -241,8 +258,8 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
* Finds all the nodes that need to be fetched. Those are nodes that have no
|
||||
* data, and are not in fetch mode.
|
||||
*/
|
||||
private Set<NodeEntry<T>> findNodesToFetch(Map<String, NodeEntry<T>> shardCache) {
|
||||
Set<NodeEntry<T>> nodesToFetch = new HashSet<>();
|
||||
private List<NodeEntry<T>> findNodesToFetch(Map<String, NodeEntry<T>> shardCache) {
|
||||
List<NodeEntry<T>> nodesToFetch = new ArrayList<>();
|
||||
for (NodeEntry<T> nodeEntry : shardCache.values()) {
|
||||
if (nodeEntry.hasData() == false && nodeEntry.isFetching() == false) {
|
||||
nodesToFetch.add(nodeEntry);
|
||||
|
@ -267,12 +284,12 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
* Async fetches data for the provided shard with the set of nodes that need to be fetched from.
|
||||
*/
|
||||
// visible for testing
|
||||
void asyncFetch(final ShardId shardId, final DiscoveryNode[] nodes) {
|
||||
void asyncFetch(final DiscoveryNode[] nodes, long fetchingRound) {
|
||||
logger.trace("{} fetching [{}] from {}", shardId, type, nodes);
|
||||
action.list(shardId, nodes, new ActionListener<BaseNodesResponse<T>>() {
|
||||
@Override
|
||||
public void onResponse(BaseNodesResponse<T> response) {
|
||||
processAsyncFetch(shardId, response.getNodes(), response.failures());
|
||||
processAsyncFetch(response.getNodes(), response.failures(), fetchingRound);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -281,7 +298,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
for (final DiscoveryNode node: nodes) {
|
||||
failures.add(new FailedNodeException(node.getId(), "total failure in fetching", e));
|
||||
}
|
||||
processAsyncFetch(shardId, null, failures);
|
||||
processAsyncFetch(null, failures, fetchingRound);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -294,13 +311,11 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
|
||||
private final ShardId shardId;
|
||||
private final Map<DiscoveryNode, T> data;
|
||||
private final Set<String> failedNodes;
|
||||
private final Set<String> ignoreNodes;
|
||||
|
||||
public FetchResult(ShardId shardId, Map<DiscoveryNode, T> data, Set<String> failedNodes, Set<String> ignoreNodes) {
|
||||
public FetchResult(ShardId shardId, Map<DiscoveryNode, T> data, Set<String> ignoreNodes) {
|
||||
this.shardId = shardId;
|
||||
this.data = data;
|
||||
this.failedNodes = failedNodes;
|
||||
this.ignoreNodes = ignoreNodes;
|
||||
}
|
||||
|
||||
|
@ -342,6 +357,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
private T value;
|
||||
private boolean valueSet;
|
||||
private Throwable failure;
|
||||
private long fetchingRound;
|
||||
|
||||
NodeEntry(String nodeId) {
|
||||
this.nodeId = nodeId;
|
||||
|
@ -355,9 +371,10 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
return fetching;
|
||||
}
|
||||
|
||||
void markAsFetching() {
|
||||
void markAsFetching(long fetchingRound) {
|
||||
assert fetching == false : "double marking a node as fetching";
|
||||
fetching = true;
|
||||
this.fetching = true;
|
||||
this.fetchingRound = fetchingRound;
|
||||
}
|
||||
|
||||
void doneFetching(T value) {
|
||||
|
@ -402,5 +419,9 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
assert valueSet : "value is not set, hasn't been fetched yet";
|
||||
return value;
|
||||
}
|
||||
|
||||
long getFetchingRound() {
|
||||
return fetchingRound;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -50,6 +51,9 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.UnaryOperator;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
@ -219,8 +223,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
|
|||
final String name = stateFile.getFileName().toString();
|
||||
if (name.startsWith("metadata-")) {
|
||||
throw new IllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before "
|
||||
+ Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath());
|
||||
+ Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -247,23 +251,41 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
|
|||
changed |= indexMetaData != newMetaData;
|
||||
upgradedMetaData.put(newMetaData, false);
|
||||
}
|
||||
// collect current customs
|
||||
Map<String, MetaData.Custom> existingCustoms = new HashMap<>();
|
||||
for (ObjectObjectCursor<String, MetaData.Custom> customCursor : metaData.customs()) {
|
||||
existingCustoms.put(customCursor.key, customCursor.value);
|
||||
}
|
||||
// upgrade global custom meta data
|
||||
Map<String, MetaData.Custom> upgradedCustoms = metaDataUpgrader.customMetaDataUpgraders.apply(existingCustoms);
|
||||
if (upgradedCustoms.equals(existingCustoms) == false) {
|
||||
existingCustoms.keySet().forEach(upgradedMetaData::removeCustom);
|
||||
for (Map.Entry<String, MetaData.Custom> upgradedCustomEntry : upgradedCustoms.entrySet()) {
|
||||
upgradedMetaData.putCustom(upgradedCustomEntry.getKey(), upgradedCustomEntry.getValue());
|
||||
}
|
||||
if (applyPluginUpgraders(metaData.getCustoms(), metaDataUpgrader.customMetaDataUpgraders,
|
||||
upgradedMetaData::removeCustom,upgradedMetaData::putCustom)) {
|
||||
changed = true;
|
||||
}
|
||||
// upgrade current templates
|
||||
if (applyPluginUpgraders(metaData.getTemplates(), metaDataUpgrader.indexTemplateMetaDataUpgraders,
|
||||
upgradedMetaData::removeTemplate, (s, indexTemplateMetaData) -> upgradedMetaData.put(indexTemplateMetaData))) {
|
||||
changed = true;
|
||||
}
|
||||
return changed ? upgradedMetaData.build() : metaData;
|
||||
}
|
||||
|
||||
private static <Data> boolean applyPluginUpgraders(ImmutableOpenMap<String, Data> existingData,
|
||||
UnaryOperator<Map<String, Data>> upgrader,
|
||||
Consumer<String> removeData,
|
||||
BiConsumer<String, Data> putData) {
|
||||
// collect current data
|
||||
Map<String, Data> existingMap = new HashMap<>();
|
||||
for (ObjectObjectCursor<String, Data> customCursor : existingData) {
|
||||
existingMap.put(customCursor.key, customCursor.value);
|
||||
}
|
||||
// upgrade global custom meta data
|
||||
Map<String, Data> upgradedCustoms = upgrader.apply(existingMap);
|
||||
if (upgradedCustoms.equals(existingMap) == false) {
|
||||
// remove all data first so a plugin can remove custom metadata or templates if needed
|
||||
existingMap.keySet().forEach(removeData);
|
||||
for (Map.Entry<String, Data> upgradedCustomEntry : upgradedCustoms.entrySet()) {
|
||||
putData.accept(upgradedCustomEntry.getKey(), upgradedCustomEntry.getValue());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// shard state BWC
|
||||
private void ensureNoPre019ShardState(NodeEnvironment nodeEnv) throws Exception {
|
||||
for (Path dataLocation : nodeEnv.nodeDataPaths()) {
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.seqno;
|
|||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import com.carrotsearch.hppc.ObjectLongMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -31,8 +31,6 @@ import java.util.HashSet;
|
|||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
||||
/**
|
||||
* This class is responsible of tracking the global checkpoint. The global checkpoint is the highest sequence number for which all lower (or
|
||||
* equal) sequence number have been processed on all shards that are currently active. Since shards count as "active" when the master starts
|
||||
|
@ -49,14 +47,20 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
* through recovery. These shards are treated as valid copies and participate in determining the global checkpoint. This map is keyed by
|
||||
* allocation IDs. All accesses to this set are guarded by a lock on this.
|
||||
*/
|
||||
private final ObjectLongMap<String> inSyncLocalCheckpoints;
|
||||
final ObjectLongMap<String> inSyncLocalCheckpoints;
|
||||
|
||||
/*
|
||||
* This set holds the last set of known valid allocation ids as received by the master. This is important to make sure shard that are
|
||||
* failed or relocated are cleaned up from {@link #inSyncLocalCheckpoints} and do not hold the global checkpoint back. All accesses to
|
||||
* this set are guarded by a lock on this.
|
||||
* This map holds the last known local checkpoint for initializing shards that are undergoing recovery. Such shards do not participate
|
||||
* in determining the global checkpoint. We must track these local checkpoints so that when a shard is activated we use the highest
|
||||
* known checkpoint.
|
||||
*/
|
||||
private final Set<String> assignedAllocationIds;
|
||||
final ObjectLongMap<String> trackingLocalCheckpoints;
|
||||
|
||||
/*
|
||||
* This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the
|
||||
* current global checkpoint.
|
||||
*/
|
||||
final Set<String> pendingInSync;
|
||||
|
||||
/*
|
||||
* The current global checkpoint for this shard. Note that this field is guarded by a lock on this and thus this field does not need to
|
||||
|
@ -74,10 +78,11 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
*/
|
||||
GlobalCheckpointTracker(final ShardId shardId, final IndexSettings indexSettings, final long globalCheckpoint) {
|
||||
super(shardId, indexSettings);
|
||||
assert globalCheckpoint >= UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint;
|
||||
inSyncLocalCheckpoints = new ObjectLongHashMap<>(1 + indexSettings.getNumberOfReplicas());
|
||||
assignedAllocationIds = new HashSet<>(1 + indexSettings.getNumberOfReplicas());
|
||||
assert globalCheckpoint >= SequenceNumbersService.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint;
|
||||
this.inSyncLocalCheckpoints = new ObjectLongHashMap<>(1 + indexSettings.getNumberOfReplicas());
|
||||
this.trackingLocalCheckpoints = new ObjectLongHashMap<>(indexSettings.getNumberOfReplicas());
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
this.pendingInSync = new HashSet<>();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,59 +91,85 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
* shards that are removed to be re-added.
|
||||
*
|
||||
* @param allocationId the allocation ID of the shard to update the local checkpoint for
|
||||
* @param checkpoint the local checkpoint for the shard
|
||||
* @param localCheckpoint the local checkpoint for the shard
|
||||
*/
|
||||
public synchronized void updateLocalCheckpoint(final String allocationId, final long checkpoint) {
|
||||
final int indexOfKey = inSyncLocalCheckpoints.indexOf(allocationId);
|
||||
if (indexOfKey >= 0) {
|
||||
final long current = inSyncLocalCheckpoints.indexGet(indexOfKey);
|
||||
if (current < checkpoint) {
|
||||
inSyncLocalCheckpoints.indexReplace(indexOfKey, checkpoint);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("updated local checkpoint of [{}] to [{}] (was [{}])", allocationId, checkpoint, current);
|
||||
}
|
||||
public synchronized void updateLocalCheckpoint(final String allocationId, final long localCheckpoint) {
|
||||
final boolean updated;
|
||||
if (updateLocalCheckpoint(allocationId, localCheckpoint, inSyncLocalCheckpoints, "in-sync")) {
|
||||
updated = true;
|
||||
} else if (updateLocalCheckpoint(allocationId, localCheckpoint, trackingLocalCheckpoints, "tracking")) {
|
||||
updated = true;
|
||||
} else {
|
||||
logger.trace("ignored local checkpoint [{}] of [{}], allocation ID is not tracked", localCheckpoint, allocationId);
|
||||
updated = false;
|
||||
}
|
||||
if (updated) {
|
||||
notifyAllWaiters();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Object#notifyAll waiters for local checkpoint advancement")
|
||||
private synchronized void notifyAllWaiters() {
|
||||
this.notifyAll();
|
||||
}
|
||||
|
||||
private boolean updateLocalCheckpoint(
|
||||
final String allocationId, final long localCheckpoint, ObjectLongMap<String> map, final String reason) {
|
||||
final int index = map.indexOf(allocationId);
|
||||
if (index >= 0) {
|
||||
final long current = map.indexGet(index);
|
||||
if (current < localCheckpoint) {
|
||||
map.indexReplace(index, localCheckpoint);
|
||||
logger.trace("updated local checkpoint of [{}] in [{}] from [{}] to [{}]", allocationId, reason, current, localCheckpoint);
|
||||
} else {
|
||||
logger.trace(
|
||||
"skipping update of local checkpoint [{}], current checkpoint is higher (current [{}], incoming [{}], type [{}])",
|
||||
allocationId,
|
||||
current,
|
||||
checkpoint,
|
||||
allocationId);
|
||||
"skipped updating local checkpoint of [{}] in [{}] from [{}] to [{}], current checkpoint is higher",
|
||||
allocationId,
|
||||
reason,
|
||||
current,
|
||||
localCheckpoint);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
logger.trace("[{}] isn't marked as in sync. ignoring local checkpoint of [{}].", allocationId, checkpoint);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Scans through the currently known local checkpoint and updates the global checkpoint accordingly.
|
||||
*
|
||||
* @return {@code true} if the checkpoint has been updated or if it can not be updated since one of the local checkpoints of one of the
|
||||
* active allocations is not known.
|
||||
* @return {@code true} if the checkpoint has been updated or if it can not be updated since the local checkpoints of one of the active
|
||||
* allocations is not known.
|
||||
*/
|
||||
synchronized boolean updateCheckpointOnPrimary() {
|
||||
long minCheckpoint = Long.MAX_VALUE;
|
||||
if (inSyncLocalCheckpoints.isEmpty()) {
|
||||
long minLocalCheckpoint = Long.MAX_VALUE;
|
||||
if (inSyncLocalCheckpoints.isEmpty() || !pendingInSync.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
for (final ObjectLongCursor<String> cp : inSyncLocalCheckpoints) {
|
||||
if (cp.value == UNASSIGNED_SEQ_NO) {
|
||||
logger.trace("unknown local checkpoint for active allocationId [{}], requesting a sync", cp.key);
|
||||
for (final ObjectLongCursor<String> localCheckpoint : inSyncLocalCheckpoints) {
|
||||
if (localCheckpoint.value == SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
logger.trace("unknown local checkpoint for active allocation ID [{}], requesting a sync", localCheckpoint.key);
|
||||
return true;
|
||||
}
|
||||
minCheckpoint = Math.min(cp.value, minCheckpoint);
|
||||
minLocalCheckpoint = Math.min(localCheckpoint.value, minLocalCheckpoint);
|
||||
}
|
||||
if (minCheckpoint < globalCheckpoint) {
|
||||
assert minLocalCheckpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO : "new global checkpoint must be assigned";
|
||||
if (minLocalCheckpoint < globalCheckpoint) {
|
||||
final String message =
|
||||
String.format(Locale.ROOT, "new global checkpoint [%d] is lower than previous one [%d]", minCheckpoint, globalCheckpoint);
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"new global checkpoint [%d] is lower than previous one [%d]",
|
||||
minLocalCheckpoint,
|
||||
globalCheckpoint);
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
if (globalCheckpoint != minCheckpoint) {
|
||||
logger.trace("global checkpoint updated to [{}]", minCheckpoint);
|
||||
globalCheckpoint = minCheckpoint;
|
||||
if (globalCheckpoint != minLocalCheckpoint) {
|
||||
logger.trace("global checkpoint updated to [{}]", minLocalCheckpoint);
|
||||
globalCheckpoint = minLocalCheckpoint;
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,17 +184,17 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
/**
|
||||
* Updates the global checkpoint on a replica shard after it has been updated by the primary.
|
||||
*
|
||||
* @param checkpoint the global checkpoint
|
||||
* @param globalCheckpoint the global checkpoint
|
||||
*/
|
||||
synchronized void updateCheckpointOnReplica(final long checkpoint) {
|
||||
synchronized void updateGlobalCheckpointOnReplica(final long globalCheckpoint) {
|
||||
/*
|
||||
* The global checkpoint here is a local knowledge which is updated under the mandate of the primary. It can happen that the primary
|
||||
* information is lagging compared to a replica (e.g., if a replica is promoted to primary but has stale info relative to other
|
||||
* replica shards). In these cases, the local knowledge of the global checkpoint could be higher than sync from the lagging primary.
|
||||
*/
|
||||
if (this.globalCheckpoint <= checkpoint) {
|
||||
this.globalCheckpoint = checkpoint;
|
||||
logger.trace("global checkpoint updated from primary to [{}]", checkpoint);
|
||||
if (this.globalCheckpoint <= globalCheckpoint) {
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
logger.trace("global checkpoint updated from primary to [{}]", globalCheckpoint);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,33 +204,98 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
* @param activeAllocationIds the allocation IDs of the currently active shard copies
|
||||
* @param initializingAllocationIds the allocation IDs of the currently initializing shard copies
|
||||
*/
|
||||
public synchronized void updateAllocationIdsFromMaster(final Set<String> activeAllocationIds,
|
||||
final Set<String> initializingAllocationIds) {
|
||||
assignedAllocationIds.removeIf(
|
||||
aId -> activeAllocationIds.contains(aId) == false && initializingAllocationIds.contains(aId) == false);
|
||||
assignedAllocationIds.addAll(activeAllocationIds);
|
||||
assignedAllocationIds.addAll(initializingAllocationIds);
|
||||
for (String activeId : activeAllocationIds) {
|
||||
if (inSyncLocalCheckpoints.containsKey(activeId) == false) {
|
||||
inSyncLocalCheckpoints.put(activeId, UNASSIGNED_SEQ_NO);
|
||||
public synchronized void updateAllocationIdsFromMaster(
|
||||
final Set<String> activeAllocationIds, final Set<String> initializingAllocationIds) {
|
||||
// remove shards whose allocation ID no longer exists
|
||||
inSyncLocalCheckpoints.removeAll(a -> !activeAllocationIds.contains(a) && !initializingAllocationIds.contains(a));
|
||||
|
||||
// add any new active allocation IDs
|
||||
for (final String a : activeAllocationIds) {
|
||||
if (!inSyncLocalCheckpoints.containsKey(a)) {
|
||||
final long localCheckpoint = trackingLocalCheckpoints.getOrDefault(a, SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
inSyncLocalCheckpoints.put(a, localCheckpoint);
|
||||
logger.trace("marked [{}] as in-sync with local checkpoint [{}] via cluster state update from master", a, localCheckpoint);
|
||||
}
|
||||
}
|
||||
inSyncLocalCheckpoints.removeAll(key -> assignedAllocationIds.contains(key) == false);
|
||||
|
||||
trackingLocalCheckpoints.removeAll(a -> !initializingAllocationIds.contains(a));
|
||||
for (final String a : initializingAllocationIds) {
|
||||
if (inSyncLocalCheckpoints.containsKey(a)) {
|
||||
/*
|
||||
* This can happen if we mark the allocation ID as in sync at the end of recovery before seeing a cluster state update from
|
||||
* marking the shard as active.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
if (trackingLocalCheckpoints.containsKey(a)) {
|
||||
// we are already tracking this allocation ID
|
||||
continue;
|
||||
}
|
||||
// this is a new allocation ID
|
||||
trackingLocalCheckpoints.put(a, SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
logger.trace("tracking [{}] via cluster state update from master", a);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the shard with the provided allocation ID as in-sync with the primary shard. This should be called at the end of recovery where
|
||||
* the primary knows all operations below the global checkpoint have been completed on this shard.
|
||||
* Marks the shard with the provided allocation ID as in-sync with the primary shard. This method will block until the local checkpoint
|
||||
* on the specified shard advances above the current global checkpoint.
|
||||
*
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param localCheckpoint the current local checkpoint on the shard
|
||||
*
|
||||
* @throws InterruptedException if the thread is interrupted waiting for the local checkpoint on the shard to advance
|
||||
*/
|
||||
public synchronized void markAllocationIdAsInSync(final String allocationId) {
|
||||
if (assignedAllocationIds.contains(allocationId) == false) {
|
||||
// master has removed this allocation, ignore
|
||||
public synchronized void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException {
|
||||
if (!trackingLocalCheckpoints.containsKey(allocationId)) {
|
||||
/*
|
||||
* This can happen if the recovery target has been failed and the cluster state update from the master has triggered removing
|
||||
* this allocation ID from the tracking map but this recovery thread has not yet been made aware that the recovery is
|
||||
* cancelled.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
logger.trace("marked [{}] as in sync", allocationId);
|
||||
inSyncLocalCheckpoints.put(allocationId, UNASSIGNED_SEQ_NO);
|
||||
|
||||
updateLocalCheckpoint(allocationId, localCheckpoint, trackingLocalCheckpoints, "tracking");
|
||||
waitForAllocationIdToBeInSync(allocationId);
|
||||
}
|
||||
|
||||
private synchronized void waitForAllocationIdToBeInSync(final String allocationId) throws InterruptedException {
|
||||
if (!pendingInSync.add(allocationId)) {
|
||||
throw new IllegalStateException("there is already a pending sync in progress for allocation ID [" + allocationId + "]");
|
||||
}
|
||||
try {
|
||||
while (true) {
|
||||
/*
|
||||
* If the allocation has been cancelled and so removed from the tracking map from a cluster state update from the master it
|
||||
* means that this recovery will be cancelled; we are here on a cancellable recovery thread and so this thread will throw
|
||||
* an interrupted exception as soon as it tries to wait on the monitor.
|
||||
*/
|
||||
final long current = trackingLocalCheckpoints.getOrDefault(allocationId, Long.MIN_VALUE);
|
||||
if (current >= globalCheckpoint) {
|
||||
logger.trace("marked [{}] as in-sync with local checkpoint [{}]", allocationId, current);
|
||||
trackingLocalCheckpoints.remove(allocationId);
|
||||
/*
|
||||
* This is prematurely adding the allocation ID to the in-sync map as at this point recovery is not yet finished and
|
||||
* could still abort. At this point we will end up with a shard in the in-sync map holding back the global checkpoint
|
||||
* because the shard never recovered and we would have to wait until either the recovery retries and completes
|
||||
* successfully, or the master fails the shard and issues a cluster state update that removes the shard from the set of
|
||||
* active allocation IDs.
|
||||
*/
|
||||
inSyncLocalCheckpoints.put(allocationId, current);
|
||||
break;
|
||||
} else {
|
||||
waitForLocalCheckpointToAdvance();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
pendingInSync.remove(allocationId);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Object#wait for local checkpoint advancement")
|
||||
private synchronized void waitForLocalCheckpointToAdvance() throws InterruptedException {
|
||||
this.wait();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -213,7 +309,7 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent {
|
|||
if (inSyncLocalCheckpoints.containsKey(allocationId)) {
|
||||
return inSyncLocalCheckpoints.get(allocationId);
|
||||
}
|
||||
return UNASSIGNED_SEQ_NO;
|
||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -127,12 +127,13 @@ public class SequenceNumbersService extends AbstractIndexShardComponent {
|
|||
|
||||
/**
|
||||
* Marks the shard with the provided allocation ID as in-sync with the primary shard. See
|
||||
* {@link GlobalCheckpointTracker#markAllocationIdAsInSync(String)} for additional details.
|
||||
* {@link GlobalCheckpointTracker#markAllocationIdAsInSync(String, long)} for additional details.
|
||||
*
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param localCheckpoint the current local checkpoint on the shard
|
||||
*/
|
||||
public void markAllocationIdAsInSync(final String allocationId) {
|
||||
globalCheckpointTracker.markAllocationIdAsInSync(allocationId);
|
||||
public void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException {
|
||||
globalCheckpointTracker.markAllocationIdAsInSync(allocationId, localCheckpoint);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,10 +167,10 @@ public class SequenceNumbersService extends AbstractIndexShardComponent {
|
|||
/**
|
||||
* Updates the global checkpoint on a replica shard after it has been updated by the primary.
|
||||
*
|
||||
* @param checkpoint the global checkpoint
|
||||
* @param globalCheckpoint the global checkpoint
|
||||
*/
|
||||
public void updateGlobalCheckpointOnReplica(final long checkpoint) {
|
||||
globalCheckpointTracker.updateCheckpointOnReplica(checkpoint);
|
||||
public void updateGlobalCheckpointOnReplica(final long globalCheckpoint) {
|
||||
globalCheckpointTracker.updateGlobalCheckpointOnReplica(globalCheckpoint);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1475,13 +1475,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
/**
|
||||
* Marks the shard with the provided allocation ID as in-sync with the primary shard. See
|
||||
* {@link GlobalCheckpointTracker#markAllocationIdAsInSync(String)} for additional details.
|
||||
* {@link GlobalCheckpointTracker#markAllocationIdAsInSync(String, long)} for additional details.
|
||||
*
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param allocationId the allocation ID of the shard to mark as in-sync
|
||||
* @param localCheckpoint the current local checkpoint on the shard
|
||||
*/
|
||||
public void markAllocationIdAsInSync(final String allocationId) {
|
||||
public void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException {
|
||||
verifyPrimary();
|
||||
getEngine().seqNoService().markAllocationIdAsInSync(allocationId);
|
||||
getEngine().seqNoService().markAllocationIdAsInSync(allocationId, localCheckpoint);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1516,11 +1517,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
/**
|
||||
* Updates the global checkpoint on a replica shard after it has been updated by the primary.
|
||||
*
|
||||
* @param checkpoint the global checkpoint
|
||||
* @param globalCheckpoint the global checkpoint
|
||||
*/
|
||||
public void updateGlobalCheckpointOnReplica(final long checkpoint) {
|
||||
public void updateGlobalCheckpointOnReplica(final long globalCheckpoint) {
|
||||
verifyReplicationTarget();
|
||||
getEngine().seqNoService().updateGlobalCheckpointOnReplica(checkpoint);
|
||||
final SequenceNumbersService seqNoService = getEngine().seqNoService();
|
||||
final long localCheckpoint = seqNoService.getLocalCheckpoint();
|
||||
if (globalCheckpoint <= localCheckpoint) {
|
||||
seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint);
|
||||
} else {
|
||||
/*
|
||||
* This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global
|
||||
* checkpoint updates from in-flight operations. However, since this shard is not yet contributing to calculating the global
|
||||
* checkpoint, it can be the case that the global checkpoint update from the primary is ahead of the local checkpoint on this
|
||||
* shard. In this case, we ignore the global checkpoint update. This should only happen if we are in the translog stage of
|
||||
* recovery. Prior to this, the engine is not opened and this shard will not receive global checkpoint updates, and after this
|
||||
* the shard will be contributing to calculations of the the global checkpoint.
|
||||
*/
|
||||
assert recoveryState().getStage() == RecoveryState.Stage.TRANSLOG
|
||||
: "expected recovery stage [" + RecoveryState.Stage.TRANSLOG + "] but was [" + recoveryState().getStage() + "]";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.nio.file.StandardOpenOption;
|
|||
* only for testing until we have a disk-full FileSystem
|
||||
*/
|
||||
@FunctionalInterface
|
||||
interface ChannelFactory {
|
||||
public interface ChannelFactory {
|
||||
default FileChannel open(Path path) throws IOException {
|
||||
return open(path, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
|
||||
}
|
||||
|
|
|
@ -348,20 +348,24 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
|||
*/
|
||||
public static long getStartingSeqNo(final RecoveryTarget recoveryTarget) {
|
||||
try {
|
||||
final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.indexShard().shardPath().resolveTranslog());
|
||||
final long globalCheckpoint = Translog.readGlobalCheckpoint(recoveryTarget.translogLocation());
|
||||
final SeqNoStats seqNoStats = recoveryTarget.store().loadSeqNoStats(globalCheckpoint);
|
||||
if (seqNoStats.getMaxSeqNo() <= seqNoStats.getGlobalCheckpoint()) {
|
||||
// commit point is good for seq no based recovery as the maximum seq# including in it
|
||||
// is below the global checkpoint (i.e., it excludes any ops thay may not be on the primary)
|
||||
// Recovery will start at the first op after the local check point stored in the commit.
|
||||
/*
|
||||
* Commit point is good for sequence-number based recovery as the maximum sequence number included in it is below the global
|
||||
* checkpoint (i.e., it excludes any operations that may not be on the primary). Recovery will start at the first operation
|
||||
* after the local checkpoint stored in the commit.
|
||||
*/
|
||||
return seqNoStats.getLocalCheckpoint() + 1;
|
||||
} else {
|
||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
// this can happen, for example, if a phase one of the recovery completed successfully, a network partition happens before the
|
||||
// translog on the recovery target is opened, the recovery enters a retry loop seeing now that the index files are on disk and
|
||||
// proceeds to attempt a sequence-number-based recovery
|
||||
/*
|
||||
* This can happen, for example, if a phase one of the recovery completed successfully, a network partition happens before the
|
||||
* translog on the recovery target is opened, the recovery enters a retry loop seeing now that the index files are on disk and
|
||||
* proceeds to attempt a sequence-number-based recovery.
|
||||
*/
|
||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
}
|
||||
|
@ -418,7 +422,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
|||
final RecoveryTarget recoveryTarget = recoveryRef.target();
|
||||
try {
|
||||
recoveryTarget.indexTranslogOperations(request.operations(), request.totalTranslogOps());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
channel.sendResponse(new RecoveryTranslogOperationsResponse(recoveryTarget.indexShard().getLocalCheckpoint()));
|
||||
} catch (TranslogRecoveryPerformer.BatchOperationException exception) {
|
||||
MapperException mapperException = (MapperException) ExceptionsHelper.unwrap(exception, MapperException.class);
|
||||
if (mapperException == null) {
|
||||
|
|
|
@ -58,6 +58,7 @@ import java.io.IOException;
|
|||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
@ -179,14 +180,16 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
logger.trace("snapshot translog for recovery; current size is [{}]", translogView.totalOperations());
|
||||
final long targetLocalCheckpoint;
|
||||
try {
|
||||
phase2(isSequenceNumberBasedRecoveryPossible ? request.startingSeqNo() : SequenceNumbersService.UNASSIGNED_SEQ_NO,
|
||||
translogView.snapshot());
|
||||
final long startingSeqNo =
|
||||
isSequenceNumberBasedRecoveryPossible ? request.startingSeqNo() : SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
targetLocalCheckpoint = phase2(startingSeqNo, translogView.snapshot());
|
||||
} catch (Exception e) {
|
||||
throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e);
|
||||
}
|
||||
|
||||
finalizeRecovery();
|
||||
finalizeRecovery(targetLocalCheckpoint);
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
@ -410,8 +413,10 @@ public class RecoverySourceHandler {
|
|||
* @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if all
|
||||
* ops should be sent
|
||||
* @param snapshot a snapshot of the translog
|
||||
*
|
||||
* @return the local checkpoint on the target
|
||||
*/
|
||||
void phase2(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
|
||||
long phase2(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
|
||||
if (shard.state() == IndexShardState.CLOSED) {
|
||||
throw new IndexShardClosedException(request.shardId());
|
||||
}
|
||||
|
@ -422,18 +427,19 @@ public class RecoverySourceHandler {
|
|||
logger.trace("recovery [phase2]: sending transaction log operations");
|
||||
|
||||
// send all the snapshot's translog operations to the target
|
||||
final int totalOperations = sendSnapshot(startingSeqNo, snapshot);
|
||||
final SendSnapshotResult result = sendSnapshot(startingSeqNo, snapshot);
|
||||
|
||||
stopWatch.stop();
|
||||
logger.trace("recovery [phase2]: took [{}]", stopWatch.totalTime());
|
||||
response.phase2Time = stopWatch.totalTime().millis();
|
||||
response.phase2Operations = totalOperations;
|
||||
response.phase2Operations = result.totalOperations;
|
||||
return result.targetLocalCheckpoint;
|
||||
}
|
||||
|
||||
/*
|
||||
* finalizes the recovery process
|
||||
*/
|
||||
public void finalizeRecovery() {
|
||||
public void finalizeRecovery(final long targetLocalCheckpoint) {
|
||||
if (shard.state() == IndexShardState.CLOSED) {
|
||||
throw new IndexShardClosedException(request.shardId());
|
||||
}
|
||||
|
@ -441,7 +447,7 @@ public class RecoverySourceHandler {
|
|||
StopWatch stopWatch = new StopWatch().start();
|
||||
logger.trace("finalizing recovery");
|
||||
cancellableThreads.execute(() -> {
|
||||
shard.markAllocationIdAsInSync(request.targetAllocationId());
|
||||
shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint);
|
||||
recoveryTarget.finalizeRecovery(shard.getGlobalCheckpoint());
|
||||
});
|
||||
|
||||
|
@ -467,6 +473,18 @@ public class RecoverySourceHandler {
|
|||
logger.trace("finalizing recovery took [{}]", stopWatch.totalTime());
|
||||
}
|
||||
|
||||
static class SendSnapshotResult {
|
||||
|
||||
final long targetLocalCheckpoint;
|
||||
final int totalOperations;
|
||||
|
||||
SendSnapshotResult(final long targetLocalCheckpoint, final int totalOperations) {
|
||||
this.targetLocalCheckpoint = targetLocalCheckpoint;
|
||||
this.totalOperations = totalOperations;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the given snapshot's operations with a sequence number greater than the specified staring sequence number to this handler's
|
||||
* target node.
|
||||
|
@ -475,19 +493,25 @@ public class RecoverySourceHandler {
|
|||
*
|
||||
* @param startingSeqNo the sequence number for which only operations with a sequence number greater than this will be sent
|
||||
* @param snapshot the translog snapshot to replay operations from
|
||||
* @return the total number of translog operations that were sent
|
||||
* @return the local checkpoint on the target and the total number of operations sent
|
||||
* @throws IOException if an I/O exception occurred reading the translog snapshot
|
||||
*/
|
||||
protected int sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
|
||||
protected SendSnapshotResult sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) throws IOException {
|
||||
int ops = 0;
|
||||
long size = 0;
|
||||
int totalOperations = 0;
|
||||
int skippedOps = 0;
|
||||
int totalSentOps = 0;
|
||||
final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
final List<Translog.Operation> operations = new ArrayList<>();
|
||||
|
||||
if (snapshot.totalOperations() == 0) {
|
||||
final int expectedTotalOps = snapshot.totalOperations();
|
||||
if (expectedTotalOps == 0) {
|
||||
logger.trace("no translog operations to send");
|
||||
}
|
||||
|
||||
final CancellableThreads.Interruptable sendBatch =
|
||||
() -> targetLocalCheckpoint.set(recoveryTarget.indexTranslogOperations(operations, expectedTotalOps));
|
||||
|
||||
// send operations in batches
|
||||
Translog.Operation operation;
|
||||
while ((operation = snapshot.next()) != null) {
|
||||
|
@ -495,39 +519,41 @@ public class RecoverySourceHandler {
|
|||
throw new IndexShardClosedException(request.shardId());
|
||||
}
|
||||
cancellableThreads.checkForCancel();
|
||||
// if we are doing a sequence-number-based recovery, we have to skip older ops for which no sequence number was assigned, and
|
||||
// any ops before the starting sequence number
|
||||
/*
|
||||
* If we are doing a sequence-number-based recovery, we have to skip older ops for which no sequence number was assigned, and
|
||||
* any ops before the starting sequence number.
|
||||
*/
|
||||
final long seqNo = operation.seqNo();
|
||||
if (startingSeqNo >= 0 && (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) continue;
|
||||
if (startingSeqNo >= 0 && (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) {
|
||||
skippedOps++;
|
||||
continue;
|
||||
}
|
||||
operations.add(operation);
|
||||
ops++;
|
||||
size += operation.estimateSize();
|
||||
totalOperations++;
|
||||
totalSentOps++;
|
||||
|
||||
// check if this request is past bytes threshold, and if so, send it off
|
||||
if (size >= chunkSizeInBytes) {
|
||||
cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("sent batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size),
|
||||
snapshot.totalOperations());
|
||||
}
|
||||
cancellableThreads.execute(sendBatch);
|
||||
logger.trace("sent batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
|
||||
ops = 0;
|
||||
size = 0;
|
||||
operations.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// send the leftover operations
|
||||
if (!operations.isEmpty()) {
|
||||
cancellableThreads.execute(() -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
|
||||
if (!operations.isEmpty() || totalSentOps == 0) {
|
||||
// send the leftover operations or if no operations were sent, request the target to respond with its local checkpoint
|
||||
cancellableThreads.execute(sendBatch);
|
||||
}
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size),
|
||||
snapshot.totalOperations());
|
||||
}
|
||||
assert expectedTotalOps == skippedOps + totalSentOps
|
||||
: "expected total [" + expectedTotalOps + "], skipped [" + skippedOps + "], total sent [" + totalSentOps + "]";
|
||||
|
||||
return totalOperations;
|
||||
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
|
||||
|
||||
return new SendSnapshotResult(targetLocalCheckpoint.get(), totalSentOps);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.index.store.StoreFileMetaData;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
|
@ -374,12 +375,13 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) throws TranslogRecoveryPerformer
|
||||
.BatchOperationException {
|
||||
public long indexTranslogOperations(
|
||||
List<Translog.Operation> operations, int totalTranslogOps) throws TranslogRecoveryPerformer.BatchOperationException {
|
||||
final RecoveryState.Translog translog = state().getTranslog();
|
||||
translog.totalOperations(totalTranslogOps);
|
||||
assert indexShard().recoveryState() == state();
|
||||
indexShard().performBatchRecovery(operations);
|
||||
return indexShard().getLocalCheckpoint();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -470,4 +472,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
assert remove == null || remove == indexOutput; // remove maybe null if we got finished
|
||||
}
|
||||
}
|
||||
|
||||
Path translogLocation() {
|
||||
return indexShard().shardPath().resolveTranslog();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -53,8 +53,10 @@ public interface RecoveryTargetHandler {
|
|||
* Index a set of translog operations on the target
|
||||
* @param operations operations to index
|
||||
* @param totalTranslogOps current number of total operations expected to be indexed
|
||||
*
|
||||
* @return the local checkpoint on the target shard
|
||||
*/
|
||||
void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps);
|
||||
long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps);
|
||||
|
||||
/**
|
||||
* Notifies the target of the files it is going to receive
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RecoveryTranslogOperationsResponse extends TransportResponse {
|
||||
|
||||
long localCheckpoint;
|
||||
|
||||
RecoveryTranslogOperationsResponse() {
|
||||
|
||||
}
|
||||
|
||||
RecoveryTranslogOperationsResponse(final long localCheckpoint) {
|
||||
this.localCheckpoint = localCheckpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
// before 6.0.0 we responded with an empty response so we have to maintain that
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeZLong(localCheckpoint);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(final StreamInput in) throws IOException {
|
||||
// before 6.0.0 we received an empty response so we have to maintain that
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
localCheckpoint = in.readZLong();
|
||||
}
|
||||
else {
|
||||
localCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
}
|
||||
|
||||
static TransportResponseHandler<RecoveryTranslogOperationsResponse> HANDLER =
|
||||
new FutureTransportResponseHandler<RecoveryTranslogOperationsResponse>() {
|
||||
@Override
|
||||
public RecoveryTranslogOperationsResponse newInstance() {
|
||||
return new RecoveryTranslogOperationsResponse();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -28,7 +28,10 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportFuture;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -98,11 +101,16 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(
|
||||
recoveryId, shardId, operations, totalTranslogOps);
|
||||
transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.TRANSLOG_OPS, translogOperationsRequest,
|
||||
translogOpsRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
final RecoveryTranslogOperationsRequest translogOperationsRequest =
|
||||
new RecoveryTranslogOperationsRequest(recoveryId, shardId, operations, totalTranslogOps);
|
||||
final TransportFuture<RecoveryTranslogOperationsResponse> future = transportService.submitRequest(
|
||||
targetNode,
|
||||
PeerRecoveryTargetService.Actions.TRANSLOG_OPS,
|
||||
translogOperationsRequest,
|
||||
translogOpsRequestOptions,
|
||||
RecoveryTranslogOperationsResponse.HANDLER);
|
||||
return future.txGet().localCheckpoint;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -461,6 +461,8 @@ public class JvmInfo implements Writeable, ToXContent {
|
|||
|
||||
builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops);
|
||||
|
||||
builder.field(Fields.INPUT_ARGUMENTS, inputArguments);
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -489,6 +491,7 @@ public class JvmInfo implements Writeable, ToXContent {
|
|||
static final String GC_COLLECTORS = "gc_collectors";
|
||||
static final String MEMORY_POOLS = "memory_pools";
|
||||
static final String USING_COMPRESSED_OOPS = "using_compressed_ordinary_object_pointers";
|
||||
static final String INPUT_ARGUMENTS = "input_arguments";
|
||||
}
|
||||
|
||||
public static class Mem implements Writeable {
|
||||
|
|
|
@ -43,6 +43,8 @@ import org.elasticsearch.cluster.ClusterStateObserver;
|
|||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.NodeConnectionsService;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -291,6 +293,7 @@ public class Node implements Closeable {
|
|||
Constants.JVM_NAME,
|
||||
Constants.JAVA_VERSION,
|
||||
Constants.JVM_VERSION);
|
||||
logger.info("JVM arguments {}", Arrays.toString(jvmInfo.getInputArguments()));
|
||||
warnIfPreRelease(Version.CURRENT, Build.CURRENT.isSnapshot(), logger);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -399,19 +402,25 @@ public class Node implements Closeable {
|
|||
.flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService,
|
||||
scriptModule.getScriptService(), xContentRegistry).stream())
|
||||
.collect(Collectors.toList());
|
||||
Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders =
|
||||
pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.map(Plugin::getCustomMetaDataUpgrader)
|
||||
.collect(Collectors.toList());
|
||||
final RestController restController = actionModule.getRestController();
|
||||
final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class),
|
||||
threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, restController);
|
||||
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders);
|
||||
Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders =
|
||||
pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.map(Plugin::getCustomMetaDataUpgrader)
|
||||
.collect(Collectors.toList());
|
||||
Collection<UnaryOperator<Map<String, IndexTemplateMetaData>>> indexTemplateMetaDataUpgraders =
|
||||
pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.map(Plugin::getIndexTemplateMetaDataUpgrader)
|
||||
.collect(Collectors.toList());
|
||||
Collection<UnaryOperator<IndexMetaData>> indexMetaDataUpgraders = pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.map(Plugin::getIndexMetaDataUpgrader).collect(Collectors.toList());
|
||||
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders, indexTemplateMetaDataUpgraders);
|
||||
final Transport transport = networkModule.getTransportSupplier().get();
|
||||
final TransportService transportService = newTransportService(settings, transport, threadPool,
|
||||
networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings());
|
||||
final SearchTransportService searchTransportService = new SearchTransportService(settings,
|
||||
settingsModule.getClusterSettings(), transportService);
|
||||
transportService);
|
||||
final Consumer<Binder> httpBind;
|
||||
final HttpServerTransport httpServerTransport;
|
||||
if (networkModule.isHttpEnabled()) {
|
||||
|
@ -461,8 +470,8 @@ public class Node implements Closeable {
|
|||
b.bind(TransportService.class).toInstance(transportService);
|
||||
b.bind(NetworkService.class).toInstance(networkService);
|
||||
b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService()));
|
||||
b.bind(MetaDataIndexUpgradeService.class).toInstance(new MetaDataIndexUpgradeService(settings,
|
||||
xContentRegistry, indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings()));
|
||||
b.bind(MetaDataIndexUpgradeService.class).toInstance(new MetaDataIndexUpgradeService(settings, xContentRegistry,
|
||||
indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings(), indexMetaDataUpgraders));
|
||||
b.bind(ClusterInfoService.class).toInstance(clusterInfoService);
|
||||
b.bind(Discovery.class).toInstance(discoveryModule.getDiscovery());
|
||||
{
|
||||
|
@ -736,10 +745,6 @@ public class Node implements Closeable {
|
|||
|
||||
// start nodes now, after the http server, because it may take some time
|
||||
tribeService.startNodes();
|
||||
// starts connecting to remote clusters if any cluster is configured
|
||||
SearchTransportService searchTransportService = injector.getInstance(SearchTransportService.class);
|
||||
searchTransportService.start();
|
||||
|
||||
logger.info("started");
|
||||
|
||||
return this;
|
||||
|
@ -773,7 +778,6 @@ public class Node implements Closeable {
|
|||
injector.getInstance(GatewayService.class).stop();
|
||||
injector.getInstance(SearchService.class).stop();
|
||||
injector.getInstance(TransportService.class).stop();
|
||||
injector.getInstance(SearchTransportService.class).stop();
|
||||
|
||||
pluginLifecycleComponents.forEach(LifecycleComponent::stop);
|
||||
// we should stop this last since it waits for resources to get released
|
||||
|
@ -835,8 +839,6 @@ public class Node implements Closeable {
|
|||
toClose.add(injector.getInstance(SearchService.class));
|
||||
toClose.add(() -> stopWatch.stop().start("transport"));
|
||||
toClose.add(injector.getInstance(TransportService.class));
|
||||
toClose.add(() -> stopWatch.stop().start("search_transport_service"));
|
||||
toClose.add(injector.getInstance(SearchTransportService.class));
|
||||
|
||||
for (LifecycleComponent plugin : pluginLifecycleComponents) {
|
||||
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")"));
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
||||
import java.util.Collection;
|
||||
|
@ -32,7 +33,10 @@ import java.util.function.UnaryOperator;
|
|||
public class MetaDataUpgrader {
|
||||
public final UnaryOperator<Map<String, MetaData.Custom>> customMetaDataUpgraders;
|
||||
|
||||
public MetaDataUpgrader(Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders) {
|
||||
public final UnaryOperator<Map<String, IndexTemplateMetaData>> indexTemplateMetaDataUpgraders;
|
||||
|
||||
public MetaDataUpgrader(Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders,
|
||||
Collection<UnaryOperator<Map<String, IndexTemplateMetaData>>> indexTemplateMetaDataUpgraders) {
|
||||
this.customMetaDataUpgraders = customs -> {
|
||||
Map<String, MetaData.Custom> upgradedCustoms = new HashMap<>(customs);
|
||||
for (UnaryOperator<Map<String, MetaData.Custom>> customMetaDataUpgrader : customMetaDataUpgraders) {
|
||||
|
@ -40,5 +44,13 @@ public class MetaDataUpgrader {
|
|||
}
|
||||
return upgradedCustoms;
|
||||
};
|
||||
|
||||
this.indexTemplateMetaDataUpgraders = templates -> {
|
||||
Map<String, IndexTemplateMetaData> upgradedTemplates = new HashMap<>(templates);
|
||||
for (UnaryOperator<Map<String, IndexTemplateMetaData>> upgrader : indexTemplateMetaDataUpgraders) {
|
||||
upgradedTemplates = upgrader.apply(upgradedTemplates);
|
||||
}
|
||||
return upgradedTemplates;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionModule;
|
|||
import org.elasticsearch.bootstrap.BootstrapCheck;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
|
@ -153,14 +155,49 @@ public abstract class Plugin implements Closeable {
|
|||
* Provides a function to modify global custom meta data on startup.
|
||||
* <p>
|
||||
* Plugins should return the input custom map via {@link UnaryOperator#identity()} if no upgrade is required.
|
||||
* <p>
|
||||
* The order of custom meta data upgraders calls is undefined and can change between runs so, it is expected that
|
||||
* plugins will modify only data owned by them to avoid conflicts.
|
||||
* <p>
|
||||
* @return Never {@code null}. The same or upgraded {@code MetaData.Custom} map.
|
||||
* @throws IllegalStateException if the node should not start because at least one {@code MetaData.Custom}
|
||||
* is unsupported
|
||||
* is unsupported
|
||||
*/
|
||||
public UnaryOperator<Map<String, MetaData.Custom>> getCustomMetaDataUpgrader() {
|
||||
return UnaryOperator.identity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a function to modify index template meta data on startup.
|
||||
* <p>
|
||||
* Plugins should return the input template map via {@link UnaryOperator#identity()} if no upgrade is required.
|
||||
* <p>
|
||||
* The order of the template upgrader calls is undefined and can change between runs so, it is expected that
|
||||
* plugins will modify only templates owned by them to avoid conflicts.
|
||||
* <p>
|
||||
* @return Never {@code null}. The same or upgraded {@code IndexTemplateMetaData} map.
|
||||
* @throws IllegalStateException if the node should not start because at least one {@code IndexTemplateMetaData}
|
||||
* cannot be upgraded
|
||||
*/
|
||||
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
|
||||
return UnaryOperator.identity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a function to modify index meta data when an index is introduced into the cluster state for the first time.
|
||||
* <p>
|
||||
* Plugins should return the input index metadata via {@link UnaryOperator#identity()} if no upgrade is required.
|
||||
* <p>
|
||||
* The order of the index upgrader calls for the same index is undefined and can change between runs so, it is expected that
|
||||
* plugins will modify only indices owned by them to avoid conflicts.
|
||||
* <p>
|
||||
* @return Never {@code null}. The same or upgraded {@code IndexMetaData}.
|
||||
* @throws IllegalStateException if the node should not start because the index is unsupported
|
||||
*/
|
||||
public UnaryOperator<IndexMetaData> getIndexMetaDataUpgrader() {
|
||||
return UnaryOperator.identity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the list of this plugin's custom thread pools, empty if
|
||||
* none.
|
||||
|
|
|
@ -728,6 +728,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
bytes = bStream.bytes();
|
||||
}
|
||||
if (snapshotsBlobContainer.blobExists(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
|
||||
snapshotsBlobContainer.deleteBlob(INCOMPATIBLE_SNAPSHOTS_BLOB);
|
||||
}
|
||||
// write the incompatible snapshots blob
|
||||
writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes);
|
||||
}
|
||||
|
|
|
@ -545,7 +545,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<S
|
|||
ShardId shardId = get(Fields._SHARD, values, null);
|
||||
String nodeId = get(Fields._NODE, values, null);
|
||||
if (shardId != null && nodeId != null) {
|
||||
searchHit.shard(new SearchShardTarget(nodeId, shardId, OriginalIndices.NONE));
|
||||
searchHit.shard(new SearchShardTarget(nodeId, shardId, null, OriginalIndices.NONE));
|
||||
}
|
||||
searchHit.fields(fields);
|
||||
return searchHit;
|
||||
|
|
|
@ -500,7 +500,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().getId());
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(),
|
||||
indexShard.shardId(), OriginalIndices.NONE);
|
||||
indexShard.shardId(), null, OriginalIndices.NONE);
|
||||
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
|
||||
|
||||
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,
|
||||
|
|
|
@ -33,13 +33,14 @@ import java.io.IOException;
|
|||
/**
|
||||
* The target that the search request was executed on.
|
||||
*/
|
||||
public class SearchShardTarget implements Writeable, Comparable<SearchShardTarget> {
|
||||
public final class SearchShardTarget implements Writeable, Comparable<SearchShardTarget> {
|
||||
|
||||
private final Text nodeId;
|
||||
private final ShardId shardId;
|
||||
//original indices are only needed in the coordinating node throughout the search request execution.
|
||||
//original indices and cluster alias are only needed in the coordinating node throughout the search request execution.
|
||||
//no need to serialize them as part of SearchShardTarget.
|
||||
private final transient OriginalIndices originalIndices;
|
||||
private final transient String clusterAlias;
|
||||
|
||||
public SearchShardTarget(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
|
@ -49,17 +50,19 @@ public class SearchShardTarget implements Writeable, Comparable<SearchShardTarge
|
|||
}
|
||||
shardId = ShardId.readShardId(in);
|
||||
this.originalIndices = null;
|
||||
this.clusterAlias = null;
|
||||
}
|
||||
|
||||
public SearchShardTarget(String nodeId, ShardId shardId, OriginalIndices originalIndices) {
|
||||
public SearchShardTarget(String nodeId, ShardId shardId, String clusterAlias, OriginalIndices originalIndices) {
|
||||
this.nodeId = nodeId == null ? null : new Text(nodeId);
|
||||
this.shardId = shardId;
|
||||
this.originalIndices = originalIndices;
|
||||
this.clusterAlias = clusterAlias;
|
||||
}
|
||||
|
||||
//this constructor is only used in tests
|
||||
public SearchShardTarget(String nodeId, Index index, int shardId) {
|
||||
this(nodeId, new ShardId(index, shardId), OriginalIndices.NONE);
|
||||
this(nodeId, new ShardId(index, shardId), null, OriginalIndices.NONE);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -83,6 +86,10 @@ public class SearchShardTarget implements Writeable, Comparable<SearchShardTarge
|
|||
return originalIndices;
|
||||
}
|
||||
|
||||
public String getClusterAlias() {
|
||||
return clusterAlias;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(SearchShardTarget o) {
|
||||
int i = shardId.getIndexName().compareTo(o.getIndex());
|
||||
|
|
|
@ -63,7 +63,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator {
|
|||
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
|
||||
final LeafBucketCollector sub) throws IOException {
|
||||
final LeafSearchScript leafMapScript = mapScript.getLeafSearchScript(ctx);
|
||||
return new LeafBucketCollectorBase(sub, mapScript) {
|
||||
return new LeafBucketCollectorBase(sub, leafMapScript) {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assert bucket == 0 : bucket;
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* Base class for all services and components that need up-to-date information about the registered remote clusters
|
||||
*/
|
||||
public abstract class RemoteClusterAware extends AbstractComponent {
|
||||
|
||||
/**
|
||||
* A list of initial seed nodes to discover eligible nodes from the remote cluster
|
||||
*/
|
||||
public static final Setting.AffixSetting<List<InetSocketAddress>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.",
|
||||
"seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress,
|
||||
Setting.Property.NodeScope, Setting.Property.Dynamic));
|
||||
public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
public static final String LOCAL_CLUSTER_GROUP_KEY = "";
|
||||
protected final ClusterNameExpressionResolver clusterNameResolver;
|
||||
|
||||
/**
|
||||
* Creates a new {@link RemoteClusterAware} instance
|
||||
* @param settings the nodes level settings
|
||||
*/
|
||||
protected RemoteClusterAware(Settings settings) {
|
||||
super(settings);
|
||||
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
|
||||
}
|
||||
|
||||
protected static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
return allConcreteSettings.collect(
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
for (InetSocketAddress address : concreteSetting.get(settings)) {
|
||||
TransportAddress transportAddress = new TransportAddress(address);
|
||||
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
nodes.add(node);
|
||||
}
|
||||
return nodes;
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All
|
||||
* indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under
|
||||
* {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable.
|
||||
*
|
||||
* @param requestIndices the indices in the search request to filter
|
||||
* @param indexExists a predicate that can test if a certain index or alias exists in the local cluster
|
||||
*
|
||||
* @return a map of grouped remote and local indices
|
||||
*/
|
||||
public Map<String, List<String>> groupClusterIndices(String[] requestIndices, Predicate<String> indexExists) {
|
||||
Map<String, List<String>> perClusterIndices = new HashMap<>();
|
||||
Set<String> remoteClusterNames = getRemoteClusterNames();
|
||||
for (String index : requestIndices) {
|
||||
int i = index.indexOf(RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
if (i >= 0) {
|
||||
String remoteClusterName = index.substring(0, i);
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusterNames, remoteClusterName);
|
||||
if (clusters.isEmpty() == false) {
|
||||
if (indexExists.test(index)) {
|
||||
// we use : as a separator for remote clusters. might conflict if there is an index that is actually named
|
||||
// remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias
|
||||
// if that happens
|
||||
throw new IllegalArgumentException("Can not filter indices; index " + index +
|
||||
" exists but there is also a remote cluster named: " + remoteClusterName);
|
||||
}
|
||||
String indexName = index.substring(i + 1);
|
||||
for (String clusterName : clusters) {
|
||||
perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName);
|
||||
}
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
}
|
||||
return perClusterIndices;
|
||||
}
|
||||
|
||||
protected abstract Set<String> getRemoteClusterNames();
|
||||
|
||||
/**
|
||||
* Subclasses must implement this to receive information about updated cluster aliases. If the given address list is
|
||||
* empty the cluster alias is unregistered and should be removed.
|
||||
*/
|
||||
protected abstract void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses);
|
||||
|
||||
/**
|
||||
* Registers this instance to listen to updates on the cluster settings.
|
||||
*/
|
||||
public void listenForUpdates(ClusterSettings clusterSettings) {
|
||||
clusterSettings.addAffixUpdateConsumer(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, this::updateRemoteCluster,
|
||||
(namespace, value) -> {});
|
||||
}
|
||||
|
||||
private static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
String host = remoteHost.substring(0, portSeparator);
|
||||
InetAddress hostAddress;
|
||||
try {
|
||||
hostAddress = InetAddress.getByName(host);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new IllegalArgumentException("unknown host [" + host + "]", e);
|
||||
}
|
||||
try {
|
||||
int port = Integer.valueOf(remoteHost.substring(portSeparator + 1));
|
||||
if (port <= 0) {
|
||||
throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]");
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, port);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port must be a number", e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse
|
|||
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -42,17 +43,6 @@ import org.elasticsearch.common.util.CancellableThreads;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportActionProxy;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -25,13 +25,13 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchShardIterator;
|
||||
import org.elasticsearch.action.support.GroupedActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
@ -40,16 +40,10 @@ import org.elasticsearch.common.util.concurrent.CountDown;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -64,21 +58,12 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* Basic service for accessing remote clusters via gateway nodes
|
||||
*/
|
||||
public final class RemoteClusterService extends AbstractComponent implements Closeable {
|
||||
public final class RemoteClusterService extends RemoteClusterAware implements Closeable {
|
||||
|
||||
static final String LOCAL_CLUSTER_GROUP_KEY = "";
|
||||
|
||||
/**
|
||||
* A list of initial seed nodes to discover eligible nodes from the remote cluster
|
||||
*/
|
||||
public static final Setting.AffixSetting<List<InetSocketAddress>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.",
|
||||
"seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterService::parseSeedAddress,
|
||||
Setting.Property.NodeScope, Setting.Property.Dynamic));
|
||||
/**
|
||||
* The maximum number of connections that will be established to a remote cluster. For instance if there is only a single
|
||||
* seed node, other nodes will be discovered up to the given number of nodes in this setting. The default is 3.
|
||||
|
@ -109,17 +94,13 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
public static final Setting<Boolean> ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true,
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
private static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
|
||||
private final TransportService transportService;
|
||||
private final int numRemoteConnections;
|
||||
private final ClusterNameExpressionResolver clusterNameResolver;
|
||||
private volatile Map<String, RemoteClusterConnection> remoteClusters = Collections.emptyMap();
|
||||
|
||||
RemoteClusterService(Settings settings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
|
||||
numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings);
|
||||
}
|
||||
|
||||
|
@ -187,7 +168,7 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
/**
|
||||
* Returns <code>true</code> if at least one remote cluster is configured
|
||||
*/
|
||||
boolean isCrossClusterSearchEnabled() {
|
||||
public boolean isCrossClusterSearchEnabled() {
|
||||
return remoteClusters.isEmpty() == false;
|
||||
}
|
||||
|
||||
|
@ -195,46 +176,6 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
return remoteClusters.get(remoteCluster).isNodeConnected(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All
|
||||
* indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under
|
||||
* {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable.
|
||||
*
|
||||
* @param requestIndices the indices in the search request to filter
|
||||
* @param indexExists a predicate that can test if a certain index or alias exists
|
||||
*
|
||||
* @return a map of grouped remote and local indices
|
||||
*/
|
||||
Map<String, List<String>> groupClusterIndices(String[] requestIndices, Predicate<String> indexExists) {
|
||||
Map<String, List<String>> perClusterIndices = new HashMap<>();
|
||||
Set<String> remoteClusterNames = this.remoteClusters.keySet();
|
||||
for (String index : requestIndices) {
|
||||
int i = index.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
if (i >= 0) {
|
||||
String remoteClusterName = index.substring(0, i);
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusterNames, remoteClusterName);
|
||||
if (clusters.isEmpty() == false) {
|
||||
if (indexExists.test(index)) {
|
||||
// we use : as a separator for remote clusters. might conflict if there is an index that is actually named
|
||||
// remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias
|
||||
// if that happens
|
||||
throw new IllegalArgumentException("Can not filter indices; index " + index +
|
||||
" exists but there is also a remote cluster named: " + remoteClusterName);
|
||||
}
|
||||
String indexName = index.substring(i + 1);
|
||||
for (String clusterName : clusters) {
|
||||
perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName);
|
||||
}
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
}
|
||||
return perClusterIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given cluster is configured as a remote cluster. Otherwise <code>false</code>
|
||||
*/
|
||||
|
@ -242,7 +183,7 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
return remoteClusters.containsKey(clusterName);
|
||||
}
|
||||
|
||||
void collectSearchShards(SearchRequest searchRequest, Map<String, OriginalIndices> remoteIndicesByCluster,
|
||||
public void collectSearchShards(SearchRequest searchRequest, Map<String, OriginalIndices> remoteIndicesByCluster,
|
||||
ActionListener<Map<String, ClusterSearchShardsResponse>> listener) {
|
||||
final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size());
|
||||
final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>();
|
||||
|
@ -287,54 +228,11 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
}
|
||||
}
|
||||
|
||||
Function<String, Transport.Connection> processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses,
|
||||
Map<String, OriginalIndices> remoteIndicesByCluster,
|
||||
List<SearchShardIterator> remoteShardIterators,
|
||||
Map<String, AliasFilter> aliasFilterMap) {
|
||||
Map<String, Supplier<Transport.Connection>> nodeToCluster = new HashMap<>();
|
||||
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
|
||||
String clusterAlias = entry.getKey();
|
||||
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
|
||||
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
|
||||
nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterAlias));
|
||||
}
|
||||
Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
|
||||
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
|
||||
//add the cluster name to the remote index names for indices disambiguation
|
||||
//this ends up in the hits returned with the search response
|
||||
ShardId shardId = clusterSearchShardsGroup.getShardId();
|
||||
Index remoteIndex = shardId.getIndex();
|
||||
Index index = new Index(clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID());
|
||||
OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
|
||||
assert originalIndices != null;
|
||||
SearchShardIterator shardIterator = new SearchShardIterator(new ShardId(index, shardId.getId()),
|
||||
Arrays.asList(clusterSearchShardsGroup.getShards()), originalIndices);
|
||||
remoteShardIterators.add(shardIterator);
|
||||
AliasFilter aliasFilter;
|
||||
if (indicesAndFilters == null) {
|
||||
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||
} else {
|
||||
aliasFilter = indicesAndFilters.get(shardId.getIndexName());
|
||||
assert aliasFilter != null;
|
||||
}
|
||||
// here we have to map the filters to the UUID since from now on we use the uuid for the lookup
|
||||
aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
|
||||
}
|
||||
}
|
||||
return (nodeId) -> {
|
||||
Supplier<Transport.Connection> supplier = nodeToCluster.get(nodeId);
|
||||
if (supplier == null) {
|
||||
throw new IllegalArgumentException("unknown remote node: " + nodeId);
|
||||
}
|
||||
return supplier.get();
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a connection to the given node on the given remote cluster
|
||||
* @throws IllegalArgumentException if the remote cluster is unknown
|
||||
*/
|
||||
private Transport.Connection getConnection(DiscoveryNode node, String cluster) {
|
||||
public Transport.Connection getConnection(DiscoveryNode node, String cluster) {
|
||||
RemoteClusterConnection connection = remoteClusters.get(cluster);
|
||||
if (connection == null) {
|
||||
throw new IllegalArgumentException("no such remote cluster: " + cluster);
|
||||
|
@ -342,7 +240,12 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
return connection.getConnection(node);
|
||||
}
|
||||
|
||||
void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
@Override
|
||||
protected Set<String> getRemoteClusterNames() {
|
||||
return this.remoteClusters.keySet();
|
||||
}
|
||||
|
||||
protected void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {}));
|
||||
}
|
||||
|
||||
|
@ -359,47 +262,6 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener);
|
||||
}
|
||||
|
||||
static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
return allConcreteSettings.collect(
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
for (InetSocketAddress address : concreteSetting.get(settings)) {
|
||||
TransportAddress transportAddress = new TransportAddress(address);
|
||||
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
nodes.add(node);
|
||||
}
|
||||
return nodes;
|
||||
}));
|
||||
}
|
||||
|
||||
private static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
String host = remoteHost.substring(0, portSeparator);
|
||||
InetAddress hostAddress;
|
||||
try {
|
||||
hostAddress = InetAddress.getByName(host);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new IllegalArgumentException("unknown host [" + host + "]", e);
|
||||
}
|
||||
try {
|
||||
int port = Integer.valueOf(remoteHost.substring(portSeparator + 1));
|
||||
if (port <= 0) {
|
||||
throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]");
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, port);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port must be a number", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection
|
||||
* to all configured seed nodes.
|
||||
|
@ -407,7 +269,7 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
void initializeRemoteClusters() {
|
||||
final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
final PlainActionFuture<Void> future = new PlainActionFuture<>();
|
||||
Map<String, List<DiscoveryNode>> seeds = buildRemoteClustersSeeds(settings);
|
||||
Map<String, List<DiscoveryNode>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings);
|
||||
updateRemoteClusters(seeds, future);
|
||||
try {
|
||||
future.get(timeValue.millis(), TimeUnit.MILLISECONDS);
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.transport;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
|
@ -82,6 +83,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
protected final TaskManager taskManager;
|
||||
private final TransportInterceptor.AsyncSender asyncSender;
|
||||
private final Function<BoundTransportAddress, DiscoveryNode> localNodeFactory;
|
||||
private final boolean connectToRemoteCluster;
|
||||
|
||||
volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
|
||||
final Object requestHandlerMutex = new Object();
|
||||
|
@ -119,6 +121,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
volatile String[] tracerLogInclude;
|
||||
volatile String[] tracerLogExclude;
|
||||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
||||
/** if set will call requests sent to this id to shortcut and executed locally */
|
||||
volatile DiscoveryNode localNode = null;
|
||||
private final Transport.Connection localNodeConnection = new Transport.Connection() {
|
||||
|
@ -158,12 +162,21 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
taskManager = createTaskManager();
|
||||
this.interceptor = transportInterceptor;
|
||||
this.asyncSender = interceptor.interceptSender(this::sendRequestInternal);
|
||||
this.connectToRemoteCluster = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings);
|
||||
remoteClusterService = new RemoteClusterService(settings, this);
|
||||
if (clusterSettings != null) {
|
||||
clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude);
|
||||
clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude);
|
||||
if (connectToRemoteCluster) {
|
||||
remoteClusterService.listenForUpdates(clusterSettings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public RemoteClusterService getRemoteClusterService() {
|
||||
return remoteClusterService;
|
||||
}
|
||||
|
||||
public DiscoveryNode getLocalNode() {
|
||||
return localNode;
|
||||
}
|
||||
|
@ -209,6 +222,10 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
false, false,
|
||||
(request, channel) -> channel.sendResponse(
|
||||
new HandshakeResponse(localNode, clusterName, localNode.getVersion())));
|
||||
if (connectToRemoteCluster) {
|
||||
// here we start to connect to the remote clusters
|
||||
remoteClusterService.initializeRemoteClusters();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -253,8 +270,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
transport.close();
|
||||
protected void doClose() throws IOException {
|
||||
IOUtils.close(remoteClusterService, transport);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -759,10 +759,10 @@ public class ElasticsearchExceptionTests extends ESTestCase {
|
|||
failureCause = new NoShardAvailableActionException(new ShardId("_index_g", "_uuid_g", 6), "node_g", failureCause);
|
||||
ShardSearchFailure[] shardFailures = new ShardSearchFailure[]{
|
||||
new ShardSearchFailure(new ParsingException(0, 0, "Parsing g", null),
|
||||
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61), OriginalIndices.NONE)),
|
||||
new ShardSearchFailure(new RepositoryException("repository_g", "Repo"),
|
||||
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62), OriginalIndices.NONE)),
|
||||
new ShardSearchFailure(new SearchContextMissingException(0L), null)
|
||||
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61), null,
|
||||
OriginalIndices.NONE)), new ShardSearchFailure(new RepositoryException("repository_g", "Repo"),
|
||||
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62), null,
|
||||
OriginalIndices.NONE)), new ShardSearchFailure(new SearchContextMissingException(0L), null)
|
||||
};
|
||||
failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures);
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@ package org.elasticsearch.action.admin.cluster.node.tasks;
|
|||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
|
@ -90,7 +90,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
|
@ -466,8 +465,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
public void testTasksCancellation() throws Exception {
|
||||
// Start blocking test task
|
||||
// Get real client (the plugin is not registered on transport nodes)
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
|
||||
.execute();
|
||||
ActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()).execute();
|
||||
logger.info("--> started test tasks");
|
||||
|
||||
// Wait for the task to start on all nodes
|
||||
|
@ -488,8 +486,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
public void testTasksUnblocking() throws Exception {
|
||||
// Start blocking test task
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
|
||||
.execute();
|
||||
ActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()).execute();
|
||||
// Wait for the task to start on all nodes
|
||||
assertBusy(() -> assertEquals(internalCluster().size(),
|
||||
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
|
||||
|
@ -502,42 +499,45 @@ public class TasksIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testListTasksWaitForCompletion() throws Exception {
|
||||
waitForCompletionTestCase(randomBoolean(), id -> {
|
||||
return client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME)
|
||||
.setWaitForCompletion(true).execute();
|
||||
}, response -> {
|
||||
assertThat(response.getNodeFailures(), empty());
|
||||
assertThat(response.getTaskFailures(), empty());
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
TaskInfo task = response.getTasks().get(0);
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction());
|
||||
});
|
||||
waitForCompletionTestCase(randomBoolean(),
|
||||
id -> client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME)
|
||||
.setWaitForCompletion(true).execute(),
|
||||
response -> {
|
||||
assertThat(response.getNodeFailures(), empty());
|
||||
assertThat(response.getTaskFailures(), empty());
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
TaskInfo task = response.getTasks().get(0);
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, task.getAction());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testGetTaskWaitForCompletionWithoutStoringResult() throws Exception {
|
||||
waitForCompletionTestCase(false, id -> {
|
||||
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
|
||||
}, response -> {
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
// We didn't store the result so it won't come back when we wait
|
||||
assertNull(response.getTask().getResponse());
|
||||
// But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete.
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
});
|
||||
waitForCompletionTestCase(false,
|
||||
id -> client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(),
|
||||
response -> {
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
//We didn't store the result so it won't come back when we wait
|
||||
assertNull(response.getTask().getResponse());
|
||||
//But the task's details should still be there because we grabbed a reference to the task before waiting for it to complete
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testGetTaskWaitForCompletionWithStoringResult() throws Exception {
|
||||
waitForCompletionTestCase(true, id -> {
|
||||
return client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute();
|
||||
}, response -> {
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
// We stored the task so we should get its results
|
||||
assertEquals(0, response.getTask().getResponseAsMap().get("failure_count"));
|
||||
// The task's details should also be there
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
});
|
||||
waitForCompletionTestCase(true,
|
||||
id -> client().admin().cluster().prepareGetTask(id).setWaitForCompletion(true).execute(),
|
||||
response -> {
|
||||
assertTrue(response.getTask().isCompleted());
|
||||
// We stored the task so we should get its results
|
||||
assertEquals(0, response.getTask().getResponseAsMap().get("failure_count"));
|
||||
// The task's details should also be there
|
||||
assertNotNull(response.getTask().getTask());
|
||||
assertEquals(TestTaskPlugin.TestTaskAction.NAME, response.getTask().getTask().getAction());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -546,13 +546,13 @@ public class TasksIT extends ESIntegTestCase {
|
|||
* @param wait start waiting for a task. Accepts that id of the task to wait for and returns a future waiting for it.
|
||||
* @param validator validate the response and return the task ids that were found
|
||||
*/
|
||||
private <T> void waitForCompletionTestCase(boolean storeResult, Function<TaskId, ListenableActionFuture<T>> wait, Consumer<T> validator)
|
||||
private <T> void waitForCompletionTestCase(boolean storeResult, Function<TaskId, ActionFuture<T>> wait, Consumer<T> validator)
|
||||
throws Exception {
|
||||
// Start blocking test task
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
|
||||
ActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
|
||||
.setShouldStoreResult(storeResult).execute();
|
||||
|
||||
ListenableActionFuture<T> waitResponseFuture;
|
||||
ActionFuture<T> waitResponseFuture;
|
||||
TaskId taskId;
|
||||
try {
|
||||
taskId = waitForTestTaskStartOnAllNodes();
|
||||
|
@ -623,8 +623,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
*/
|
||||
private void waitForTimeoutTestCase(Function<TaskId, ? extends Iterable<? extends Throwable>> wait) throws Exception {
|
||||
// Start blocking test task
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client())
|
||||
.execute();
|
||||
ActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()).execute();
|
||||
try {
|
||||
TaskId taskId = waitForTestTaskStartOnAllNodes();
|
||||
|
||||
|
@ -662,7 +661,7 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
public void testTasksListWaitForNoTask() throws Exception {
|
||||
// Spin up a request to wait for no matching tasks
|
||||
ListenableActionFuture<ListTasksResponse> waitResponseFuture = client().admin().cluster().prepareListTasks()
|
||||
ActionFuture<ListTasksResponse> waitResponseFuture = client().admin().cluster().prepareListTasks()
|
||||
.setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").setWaitForCompletion(true).setTimeout(timeValueMillis(10))
|
||||
.execute();
|
||||
|
||||
|
@ -672,12 +671,12 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
public void testTasksGetWaitForNoTask() throws Exception {
|
||||
// Spin up a request to wait for no matching tasks
|
||||
ListenableActionFuture<GetTaskResponse> waitResponseFuture = client().admin().cluster().prepareGetTask("notfound:1")
|
||||
ActionFuture<GetTaskResponse> waitResponseFuture = client().admin().cluster().prepareGetTask("notfound:1")
|
||||
.setWaitForCompletion(true).setTimeout(timeValueMillis(10))
|
||||
.execute();
|
||||
|
||||
// It should finish quickly and without complaint
|
||||
expectNotFound(() -> waitResponseFuture.get());
|
||||
expectNotFound(waitResponseFuture::get);
|
||||
}
|
||||
|
||||
public void testTasksWaitForAllTask() throws Exception {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
|
@ -124,7 +124,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
|
|||
createIndex("test", Settings.builder().put("refresh_interval", -1).build());
|
||||
|
||||
// Index a document asynchronously so the request will only return when document is refreshed
|
||||
ListenableActionFuture<IndexResponse> index = client().prepareIndex("test", "test", "test").setSource("test", "test")
|
||||
ActionFuture<IndexResponse> index = client().prepareIndex("test", "test", "test").setSource("test", "test")
|
||||
.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute();
|
||||
|
||||
// Wait for the refresh listener to appear in the stats
|
||||
|
|
|
@ -19,9 +19,16 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
|
@ -31,7 +38,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
public class AbstractSearchAsyncActionTookTests extends ESTestCase {
|
||||
public class AbstractSearchAsyncActionTests extends ESTestCase {
|
||||
|
||||
private AbstractSearchAsyncAction<SearchPhaseResult> createAction(
|
||||
final boolean controlled,
|
||||
|
@ -53,35 +60,19 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
|
|||
System::nanoTime);
|
||||
}
|
||||
|
||||
return new AbstractSearchAsyncAction<SearchPhaseResult>(
|
||||
"test",
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
new GroupShardsIterator<>(Collections.singletonList(new SearchShardIterator(null, Collections.emptyList(), null))),
|
||||
timeProvider,
|
||||
0,
|
||||
null,
|
||||
null
|
||||
) {
|
||||
return new AbstractSearchAsyncAction<SearchPhaseResult>("test", null, null, null,
|
||||
Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null,
|
||||
new SearchRequest(), null, new GroupShardsIterator<>(Collections.singletonList(
|
||||
new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null,
|
||||
new InitialSearchPhase.SearchPhaseResults<>(10)) {
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(
|
||||
final SearchPhaseResults<SearchPhaseResult> results,
|
||||
final SearchPhaseContext context) {
|
||||
protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, final SearchPhaseContext context) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void executePhaseOnShard(
|
||||
final SearchShardIterator shardIt,
|
||||
final ShardRouting shard,
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
|
||||
protected void executePhaseOnShard(final SearchShardIterator shardIt, final ShardRouting shard,
|
||||
final SearchActionListener<SearchPhaseResult> listener) {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -112,4 +103,16 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
|
|||
assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get())));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBuildShardSearchTransportRequest() {
|
||||
final AtomicLong expected = new AtomicLong();
|
||||
AbstractSearchAsyncAction<SearchPhaseResult> action = createAction(false, expected);
|
||||
SearchShardIterator iterator = new SearchShardIterator("test-cluster", new ShardId(new Index("name", "foo"), 1),
|
||||
Collections.emptyList(), new OriginalIndices(new String[] {"name", "name1"}, IndicesOptions.strictExpand()));
|
||||
ShardSearchTransportRequest shardSearchTransportRequest = action.buildShardSearchRequest(iterator);
|
||||
assertEquals(IndicesOptions.strictExpand(), shardSearchTransportRequest.indicesOptions());
|
||||
assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices());
|
||||
assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.filteringAliases());
|
||||
assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f);
|
||||
}
|
||||
}
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.search.ScoreDoc;
|
|||
import org.apache.lucene.search.TermStatistics;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
|
@ -59,7 +58,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
|
||||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
@ -114,7 +113,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
|
||||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
@ -169,7 +168,7 @@ public class DfsQueryPhaseTests extends ESTestCase {
|
|||
|
||||
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task,
|
||||
|
|
|
@ -57,7 +57,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
.collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz"))));
|
||||
mockSearchPhaseContext.getRequest().source().query(originalQuery);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -126,7 +126,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder()
|
||||
.collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz"))));
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
@ -168,7 +168,7 @@ public class ExpandSearchPhaseTests extends ESTestCase {
|
|||
public void testSkipPhase() throws IOException {
|
||||
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
|
||||
mockSearchPhaseContext.searchTransport = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
|
||||
@Override
|
||||
void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
|
||||
|
|
|
@ -103,7 +103,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -157,7 +157,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -210,7 +210,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
}
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -271,7 +271,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
AtomicInteger numFetches = new AtomicInteger(0);
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
@ -324,7 +324,7 @@ public class FetchSearchPhaseTests extends ESTestCase {
|
|||
results.consumeResult(queryResult);
|
||||
|
||||
SearchTransportService searchTransportService = new SearchTransportService(
|
||||
Settings.builder().put("search.remote.connect", false).build(), null, null) {
|
||||
Settings.builder().put("search.remote.connect", false).build(), null) {
|
||||
@Override
|
||||
public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task,
|
||||
SearchActionListener<FetchSearchResult> listener) {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.action.search;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
@ -100,7 +99,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Transport.Connection getConnection(String nodeId) {
|
||||
public Transport.Connection getConnection(String clusterAlias, String nodeId) {
|
||||
return null; // null is ok here for this test
|
||||
}
|
||||
|
||||
|
@ -111,7 +110,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard) {
|
||||
public ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt) {
|
||||
Assert.fail("should not be called");
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -80,8 +79,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()),
|
||||
randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode);
|
||||
AtomicInteger numFreedContext = new AtomicInteger();
|
||||
SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
|
||||
Collections.singleton(RemoteClusterService.REMOTE_CLUSTERS_SEEDS)), null) {
|
||||
SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, null) {
|
||||
@Override
|
||||
public void sendFreeContext(Transport.Connection connection, long contextId, OriginalIndices originalIndices) {
|
||||
numFreedContext.incrementAndGet();
|
||||
|
@ -98,7 +96,9 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
"test",
|
||||
logger,
|
||||
transportService,
|
||||
lookup::get,
|
||||
(cluster, node) -> {
|
||||
assert cluster == null : "cluster was not null: " + cluster;
|
||||
return lookup.get(node); },
|
||||
aliasFilters,
|
||||
Collections.emptyMap(),
|
||||
null,
|
||||
|
@ -115,7 +115,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, SearchActionListener<TestSearchPhaseResult>
|
||||
listener) {
|
||||
assertTrue("shard: " + shard.shardId() + " has been queried twice", response.queried.add(shard.shardId()));
|
||||
Transport.Connection connection = getConnection(shard.currentNodeId());
|
||||
Transport.Connection connection = getConnection(null, shard.currentNodeId());
|
||||
TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(),
|
||||
connection.getNode());
|
||||
Set<Long> ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> new HashSet<>());
|
||||
|
@ -186,7 +186,7 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
}
|
||||
Collections.shuffle(started, random());
|
||||
started.addAll(initializing);
|
||||
list.add(new SearchShardIterator(new ShardId(new Index(index, "_na_"), i), started, originalIndices));
|
||||
list.add(new SearchShardIterator(null, new ShardId(new Index(index, "_na_"), i), started, originalIndices));
|
||||
}
|
||||
return new GroupShardsIterator<>(list);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
String indexUuid = randomAlphaOfLengthBetween(5, 10);
|
||||
int shardId = randomInt();
|
||||
return new ShardSearchFailure(ex,
|
||||
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, indexUuid), shardId), null));
|
||||
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, indexUuid), shardId), null, null));
|
||||
}
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
|
@ -74,7 +74,7 @@ public class ShardSearchFailureTests extends ESTestCase {
|
|||
|
||||
public void testToXContent() throws IOException {
|
||||
ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(0, 0, "some message", null),
|
||||
new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), OriginalIndices.NONE));
|
||||
new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), null, OriginalIndices.NONE));
|
||||
BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean());
|
||||
assertEquals(
|
||||
"{\"shard\":123,"
|
||||
|
|
|
@ -19,25 +19,52 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.PlainShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.RemoteClusterService;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
|
||||
public class TransportSearchActionTests extends ESTestCase {
|
||||
|
||||
private final ThreadPool threadPool = new TestThreadPool(getClass().getName());
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
|
||||
public void testMergeShardsIterators() throws IOException {
|
||||
List<ShardIterator> localShardIterators = new ArrayList<>();
|
||||
{
|
||||
|
@ -63,14 +90,14 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||
{
|
||||
ShardId remoteShardId = new ShardId("remote_index", "remote_index_uuid", 2);
|
||||
ShardRouting remoteShardRouting = TestShardRouting.newShardRouting(remoteShardId, "remote_node", true, STARTED);
|
||||
SearchShardIterator remoteShardIterator = new SearchShardIterator(remoteShardId,
|
||||
SearchShardIterator remoteShardIterator = new SearchShardIterator("remote", remoteShardId,
|
||||
Collections.singletonList(remoteShardRouting), remoteIndices);
|
||||
remoteShardIterators.add(remoteShardIterator);
|
||||
}
|
||||
{
|
||||
ShardId remoteShardId2 = new ShardId("remote_index_2", "remote_index_2_uuid", 3);
|
||||
ShardRouting remoteShardRouting2 = TestShardRouting.newShardRouting(remoteShardId2, "remote_node", true, STARTED);
|
||||
SearchShardIterator remoteShardIterator2 = new SearchShardIterator(remoteShardId2,
|
||||
SearchShardIterator remoteShardIterator2 = new SearchShardIterator("remote", remoteShardId2,
|
||||
Collections.singletonList(remoteShardRouting2), remoteIndices);
|
||||
remoteShardIterators.add(remoteShardIterator2);
|
||||
}
|
||||
|
@ -79,7 +106,7 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||
{
|
||||
ShardId remoteShardId3 = new ShardId("remote_index_3", "remote_index_3_uuid", 4);
|
||||
ShardRouting remoteShardRouting3 = TestShardRouting.newShardRouting(remoteShardId3, "remote_node", true, STARTED);
|
||||
SearchShardIterator remoteShardIterator3 = new SearchShardIterator(remoteShardId3,
|
||||
SearchShardIterator remoteShardIterator3 = new SearchShardIterator("remote", remoteShardId3,
|
||||
Collections.singletonList(remoteShardRouting3), remoteIndices2);
|
||||
remoteShardIterators.add(remoteShardIterator3);
|
||||
}
|
||||
|
@ -119,4 +146,93 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testProcessRemoteShards() throws IOException {
|
||||
try (TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool,
|
||||
null)) {
|
||||
RemoteClusterService service = transportService.getRemoteClusterService();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
List<SearchShardIterator> iteratorList = new ArrayList<>();
|
||||
Map<String, ClusterSearchShardsResponse> searchShardsResponseMap = new HashMap<>();
|
||||
DiscoveryNode[] nodes = new DiscoveryNode[] {
|
||||
new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT),
|
||||
new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)
|
||||
};
|
||||
Map<String, AliasFilter> indicesAndAliases = new HashMap<>();
|
||||
indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), Strings.EMPTY_ARRAY));
|
||||
indicesAndAliases.put("bar", new AliasFilter(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY));
|
||||
ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[] {
|
||||
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("foo", 0, "node1", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("foo", 0, "node2", false, ShardRoutingState.STARTED)}),
|
||||
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 1),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("foo", 0, "node1", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("foo", 1, "node2", false, ShardRoutingState.STARTED)}),
|
||||
new ClusterSearchShardsGroup(new ShardId("bar", "bar_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("bar", 0, "node2", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("bar", 0, "node1", false, ShardRoutingState.STARTED)})
|
||||
};
|
||||
searchShardsResponseMap.put("test_cluster_1", new ClusterSearchShardsResponse(groups, nodes, indicesAndAliases));
|
||||
DiscoveryNode[] nodes2 = new DiscoveryNode[] {
|
||||
new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT)
|
||||
};
|
||||
ClusterSearchShardsGroup[] groups2 = new ClusterSearchShardsGroup[] {
|
||||
new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)})
|
||||
};
|
||||
searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, null));
|
||||
|
||||
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
|
||||
remoteIndicesByCluster.put("test_cluster_1",
|
||||
new OriginalIndices(new String[]{"fo*", "ba*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
|
||||
remoteIndicesByCluster.put("test_cluster_2",
|
||||
new OriginalIndices(new String[]{"x*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
|
||||
Map<String, AliasFilter> remoteAliases = new HashMap<>();
|
||||
TransportSearchAction.processRemoteShards(searchShardsResponseMap, remoteIndicesByCluster, iteratorList,
|
||||
remoteAliases);
|
||||
assertEquals(4, iteratorList.size());
|
||||
for (SearchShardIterator iterator : iteratorList) {
|
||||
if (iterator.shardId().getIndexName().endsWith("foo")) {
|
||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
||||
assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1);
|
||||
assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "foo");
|
||||
shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "foo");
|
||||
assertNull(iterator.nextOrNull());
|
||||
} else if (iterator.shardId().getIndexName().endsWith("bar")) {
|
||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
||||
assertEquals(0, iterator.shardId().getId());
|
||||
assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "bar");
|
||||
shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "bar");
|
||||
assertNull(iterator.nextOrNull());
|
||||
} else if (iterator.shardId().getIndexName().endsWith("xyz")) {
|
||||
assertArrayEquals(new String[]{"x*"}, iterator.getOriginalIndices().indices());
|
||||
assertEquals(0, iterator.shardId().getId());
|
||||
assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "xyz");
|
||||
assertNull(iterator.nextOrNull());
|
||||
}
|
||||
}
|
||||
assertEquals(3, remoteAliases.size());
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("foo_id"));
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("bar_id"));
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("xyz_id"));
|
||||
assertEquals(new TermsQueryBuilder("foo", "bar"), remoteAliases.get("foo_id").getQueryBuilder());
|
||||
assertEquals(new MatchAllQueryBuilder(), remoteAliases.get("bar_id").getQueryBuilder());
|
||||
assertNull(remoteAliases.get("xyz_id").getQueryBuilder());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -140,7 +140,7 @@ public class ActiveShardsObserverIT extends ESIntegTestCase {
|
|||
.build();
|
||||
|
||||
logger.info("--> start the index creation process");
|
||||
ListenableActionFuture<CreateIndexResponse> responseListener =
|
||||
ActionFuture<CreateIndexResponse> responseListener =
|
||||
prepareCreate(indexName)
|
||||
.setSettings(settings)
|
||||
.setWaitForActiveShards(ActiveShardCount.ALL)
|
||||
|
|
|
@ -34,7 +34,12 @@ public class ListenableActionFutureTests extends ESTestCase {
|
|||
public void testListenerIsCallableFromNetworkThreads() throws Throwable {
|
||||
ThreadPool threadPool = new TestThreadPool("testListenerIsCallableFromNetworkThreads");
|
||||
try {
|
||||
final PlainListenableActionFuture<Object> future = new PlainListenableActionFuture<>(threadPool);
|
||||
final PlainListenableActionFuture<Object> future;
|
||||
if (randomBoolean()) {
|
||||
future = PlainListenableActionFuture.newDispatchingListenableFuture(threadPool);
|
||||
} else {
|
||||
future = PlainListenableActionFuture.newListenableFuture();
|
||||
}
|
||||
final CountDownLatch listenerCalled = new CountDownLatch(1);
|
||||
final AtomicReference<Throwable> error = new AtomicReference<>();
|
||||
final Object response = new Object();
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -77,12 +76,7 @@ public class TransportActionFilterChainTests extends ESTestCase {
|
|||
};
|
||||
|
||||
ArrayList<ActionFilter> actionFiltersByOrder = new ArrayList<>(filters);
|
||||
Collections.sort(actionFiltersByOrder, new Comparator<ActionFilter>() {
|
||||
@Override
|
||||
public int compare(ActionFilter o1, ActionFilter o2) {
|
||||
return Integer.compare(o1.order(), o2.order());
|
||||
}
|
||||
});
|
||||
actionFiltersByOrder.sort(Comparator.comparingInt(ActionFilter::order));
|
||||
|
||||
List<ActionFilter> expectedActionFilters = new ArrayList<>();
|
||||
boolean errorExpected = false;
|
||||
|
@ -97,7 +91,8 @@ public class TransportActionFilterChainTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
PlainListenableActionFuture<TestResponse> future = new PlainListenableActionFuture<>(null);
|
||||
PlainActionFuture<TestResponse> future = PlainActionFuture.newFuture();
|
||||
|
||||
transportAction.execute(new TestRequest(), future);
|
||||
try {
|
||||
assertThat(future.get(), notNullValue());
|
||||
|
@ -110,12 +105,8 @@ public class TransportActionFilterChainTests extends ESTestCase {
|
|||
for (ActionFilter actionFilter : actionFilters.filters()) {
|
||||
testFiltersByLastExecution.add((RequestTestFilter) actionFilter);
|
||||
}
|
||||
Collections.sort(testFiltersByLastExecution, new Comparator<RequestTestFilter>() {
|
||||
@Override
|
||||
public int compare(RequestTestFilter o1, RequestTestFilter o2) {
|
||||
return Integer.compare(o1.executionToken, o2.executionToken);
|
||||
}
|
||||
});
|
||||
|
||||
testFiltersByLastExecution.sort(Comparator.comparingInt(o -> o.executionToken));
|
||||
|
||||
ArrayList<RequestTestFilter> finalTestFilters = new ArrayList<>();
|
||||
for (ActionFilter filter : testFiltersByLastExecution) {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.GenericAction;
|
|||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
|
@ -32,7 +33,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
|
|||
import org.elasticsearch.action.delete.DeleteAction;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -101,22 +101,22 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
// validation in the settings??? - ugly and conceptually wrong)
|
||||
|
||||
// choosing arbitrary top level actions to test
|
||||
client.prepareGet("idx", "type", "id").execute().addListener(new AssertingActionListener<>(GetAction.NAME, client.threadPool()));
|
||||
client.prepareSearch().execute().addListener(new AssertingActionListener<>(SearchAction.NAME, client.threadPool()));
|
||||
client.prepareDelete("idx", "type", "id").execute().addListener(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareDeleteStoredScript("lang", "id").execute().addListener(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool()));
|
||||
client.prepareIndex("idx", "type", "id").setSource("source", XContentType.JSON).execute().addListener(new AssertingActionListener<>(IndexAction.NAME, client.threadPool()));
|
||||
client.prepareGet("idx", "type", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool()));
|
||||
client.prepareSearch().execute(new AssertingActionListener<>(SearchAction.NAME, client.threadPool()));
|
||||
client.prepareDelete("idx", "type", "id").execute(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareDeleteStoredScript("lang", "id").execute(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool()));
|
||||
client.prepareIndex("idx", "type", "id").setSource("source", XContentType.JSON).execute(new AssertingActionListener<>(IndexAction.NAME, client.threadPool()));
|
||||
|
||||
// choosing arbitrary cluster admin actions to test
|
||||
client.admin().cluster().prepareClusterStats().execute().addListener(new AssertingActionListener<>(ClusterStatsAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute().addListener(new AssertingActionListener<>(CreateSnapshotAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareReroute().execute().addListener(new AssertingActionListener<>(ClusterRerouteAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareClusterStats().execute(new AssertingActionListener<>(ClusterStatsAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute(new AssertingActionListener<>(CreateSnapshotAction.NAME, client.threadPool()));
|
||||
client.admin().cluster().prepareReroute().execute(new AssertingActionListener<>(ClusterRerouteAction.NAME, client.threadPool()));
|
||||
|
||||
// choosing arbitrary indices admin actions to test
|
||||
client.admin().indices().prepareCreate("idx").execute().addListener(new AssertingActionListener<>(CreateIndexAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareStats().execute().addListener(new AssertingActionListener<>(IndicesStatsAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareClearCache("idx1", "idx2").execute().addListener(new AssertingActionListener<>(ClearIndicesCacheAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareFlush().execute().addListener(new AssertingActionListener<>(FlushAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareCreate("idx").execute(new AssertingActionListener<>(CreateIndexAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareStats().execute(new AssertingActionListener<>(IndicesStatsAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareClearCache("idx1", "idx2").execute(new AssertingActionListener<>(ClearIndicesCacheAction.NAME, client.threadPool()));
|
||||
client.admin().indices().prepareFlush().execute(new AssertingActionListener<>(FlushAction.NAME, client.threadPool()));
|
||||
}
|
||||
|
||||
public void testOverrideHeader() throws Exception {
|
||||
|
@ -126,13 +126,13 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
expected.put("key2", "val 2");
|
||||
client.threadPool().getThreadContext().putHeader("key1", key1Val);
|
||||
client.prepareGet("idx", "type", "id")
|
||||
.execute().addListener(new AssertingActionListener<>(GetAction.NAME, expected, client.threadPool()));
|
||||
.execute(new AssertingActionListener<>(GetAction.NAME, expected, client.threadPool()));
|
||||
|
||||
client.admin().cluster().prepareClusterStats()
|
||||
.execute().addListener(new AssertingActionListener<>(ClusterStatsAction.NAME, expected, client.threadPool()));
|
||||
.execute(new AssertingActionListener<>(ClusterStatsAction.NAME, expected, client.threadPool()));
|
||||
|
||||
client.admin().indices().prepareCreate("idx")
|
||||
.execute().addListener(new AssertingActionListener<>(CreateIndexAction.NAME, expected, client.threadPool()));
|
||||
.execute(new AssertingActionListener<>(CreateIndexAction.NAME, expected, client.threadPool()));
|
||||
}
|
||||
|
||||
protected static void assertHeaders(Map<String, String> headers, Map<String, String> expected) {
|
||||
|
@ -205,7 +205,5 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.client.transport;
|
|||
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.support.PlainListenableActionFuture;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -69,7 +69,7 @@ public class TransportClientRetryIT extends ESIntegTestCase {
|
|||
if (randomBoolean()) {
|
||||
clusterState = client.admin().cluster().state(clusterStateRequest).get().getState();
|
||||
} else {
|
||||
PlainListenableActionFuture<ClusterStateResponse> future = new PlainListenableActionFuture<>(client.threadPool());
|
||||
PlainActionFuture<ClusterStateResponse> future = PlainActionFuture.newFuture();
|
||||
client.admin().cluster().state(clusterStateRequest, future);
|
||||
clusterState = future.get().getState();
|
||||
}
|
||||
|
|
|
@ -31,7 +31,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
|
||||
public void testArchiveBrokenIndexSettings() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.emptyList());
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.EMPTY);
|
||||
IndexMetaData indexMetaData = service.archiveBrokenIndexSettings(src);
|
||||
assertSame(indexMetaData, src);
|
||||
|
@ -58,7 +59,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
|
||||
public void testUpgrade() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.emptyList());
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
|
||||
assertFalse(service.isUpgraded(src));
|
||||
src = service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion());
|
||||
|
@ -70,7 +72,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
|
||||
public void testIsUpgraded() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.emptyList());
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
|
||||
assertFalse(service.isUpgraded(src));
|
||||
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion());
|
||||
|
@ -82,7 +85,8 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
|
||||
public void testFailUpgrade() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.emptyList());
|
||||
final IndexMetaData metaData = newIndexMeta("foo", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.4.0"))
|
||||
|
@ -99,6 +103,38 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
|
|||
service.upgradeIndexMetaData(goodMeta, Version.V_5_0_0.minimumIndexCompatibilityVersion());
|
||||
}
|
||||
|
||||
public void testPluginUpgrade() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.singletonList(
|
||||
indexMetaData -> IndexMetaData.builder(indexMetaData)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(indexMetaData.getSettings())
|
||||
.put("index.refresh_interval", "10s")
|
||||
).build()));
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "200s").build());
|
||||
assertFalse(service.isUpgraded(src));
|
||||
src = service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion());
|
||||
assertTrue(service.isUpgraded(src));
|
||||
assertEquals("10s", src.getSettings().get("index.refresh_interval"));
|
||||
assertSame(src, service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion())); // no double upgrade
|
||||
}
|
||||
|
||||
public void testPluginUpgradeFailure() {
|
||||
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(),
|
||||
new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS,
|
||||
Collections.singletonList(
|
||||
indexMetaData -> {
|
||||
throw new IllegalStateException("Cannot upgrade index " + indexMetaData.getIndex().getName());
|
||||
}
|
||||
));
|
||||
IndexMetaData src = newIndexMeta("foo", Settings.EMPTY);
|
||||
String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(src,
|
||||
Version.CURRENT.minimumIndexCompatibilityVersion())).getMessage();
|
||||
assertEquals(message, "Cannot upgrade index foo");
|
||||
}
|
||||
|
||||
public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
|
||||
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
|
|
@ -93,26 +93,14 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase {
|
|||
super.finishPingingRound(pingingRound);
|
||||
}
|
||||
};
|
||||
final DiscoveryNodes nodes =
|
||||
DiscoveryNodes.builder().add(pingTransport.getLocalNode()).build();
|
||||
final DiscoveryNodes nodes = DiscoveryNodes.builder()
|
||||
.add(nodeTransport.getLocalNode())
|
||||
.add(pingTransport.getLocalNode())
|
||||
.localNodeId(pingTransport.getLocalNode().getId())
|
||||
.build();
|
||||
final ClusterName clusterName = new ClusterName(internalCluster().getClusterName());
|
||||
final ClusterState state = ClusterState.builder(clusterName).nodes(nodes).build();
|
||||
unicastZenPing.start(new PingContextProvider() {
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes
|
||||
.builder()
|
||||
.add(nodeTransport.getLocalNode())
|
||||
.add(pingTransport.getLocalNode())
|
||||
.localNodeId(pingTransport.getLocalNode().getId())
|
||||
.build();
|
||||
}
|
||||
});
|
||||
unicastZenPing.start(() -> state);
|
||||
closeables.push(unicastZenPing);
|
||||
final CompletableFuture<ZenPing.PingCollection> responses = new CompletableFuture<>();
|
||||
unicastZenPing.ping(responses::complete, TimeValue.timeValueSeconds(3));
|
||||
|
|
|
@ -90,7 +90,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
protected ThreadPool threadPool;
|
||||
protected Map<String, MockNode> nodes = new HashMap<>();
|
||||
|
||||
public static class MockNode implements PublishClusterStateAction.NewPendingClusterStateListener, DiscoveryNodesProvider {
|
||||
public static class MockNode implements PublishClusterStateAction.NewPendingClusterStateListener {
|
||||
public final DiscoveryNode discoveryNode;
|
||||
public final MockTransportService service;
|
||||
public MockPublishAction action;
|
||||
|
@ -142,7 +142,6 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
action.pendingStatesQueue().markAsProcessed(newClusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return clusterState.nodes();
|
||||
}
|
||||
|
|
|
@ -189,31 +189,18 @@ public class UnicastZenPingTests extends ESTestCase {
|
|||
|
||||
Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build();
|
||||
TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingA.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
ClusterState stateA = ClusterState.builder(state)
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK))
|
||||
.nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A"))
|
||||
.build();
|
||||
zenPingA.start(() -> stateA);
|
||||
closeables.push(zenPingA);
|
||||
|
||||
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
ClusterState stateB = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B"))
|
||||
.build();
|
||||
zenPingB.start(() -> stateB);
|
||||
closeables.push(zenPingB);
|
||||
|
||||
TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC,
|
||||
|
@ -223,32 +210,18 @@ public class UnicastZenPingTests extends ESTestCase {
|
|||
return versionD;
|
||||
}
|
||||
};
|
||||
zenPingC.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return stateMismatch;
|
||||
}
|
||||
});
|
||||
ClusterState stateC = ClusterState.builder(stateMismatch)
|
||||
.nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C"))
|
||||
.build();
|
||||
zenPingC.start(() -> stateC);
|
||||
closeables.push(zenPingC);
|
||||
|
||||
TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD,
|
||||
EMPTY_HOSTS_PROVIDER);
|
||||
zenPingD.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return stateMismatch;
|
||||
}
|
||||
});
|
||||
ClusterState stateD = ClusterState.builder(stateMismatch)
|
||||
.nodes(DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D"))
|
||||
.build();
|
||||
zenPingD.start(() -> stateD);
|
||||
closeables.push(zenPingD);
|
||||
|
||||
logger.info("ping from UZP_A");
|
||||
|
@ -339,45 +312,25 @@ public class UnicastZenPingTests extends ESTestCase {
|
|||
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build();
|
||||
|
||||
final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingA.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
ClusterState stateA = ClusterState.builder(state)
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK))
|
||||
.nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A"))
|
||||
.build();
|
||||
zenPingA.start(() -> stateA);
|
||||
closeables.push(zenPingA);
|
||||
|
||||
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
ClusterState stateB = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B"))
|
||||
.build();
|
||||
zenPingB.start(() -> stateB);
|
||||
closeables.push(zenPingB);
|
||||
|
||||
TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingC.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
ClusterState stateC = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C"))
|
||||
.build();
|
||||
zenPingC.start(() -> stateC);
|
||||
closeables.push(zenPingC);
|
||||
|
||||
// the presence of an unresolvable host should not prevent resolvable hosts from being pinged
|
||||
|
@ -657,31 +610,18 @@ public class UnicastZenPingTests extends ESTestCase {
|
|||
});
|
||||
|
||||
final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingA.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
final ClusterState stateA = ClusterState.builder(state)
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK))
|
||||
.nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A"))
|
||||
.build();
|
||||
zenPingA.start(() -> stateA);
|
||||
closeables.push(zenPingA);
|
||||
|
||||
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
final ClusterState stateB = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B"))
|
||||
.build();
|
||||
zenPingB.start(() -> stateB);
|
||||
closeables.push(zenPingB);
|
||||
|
||||
Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait().toList();
|
||||
|
@ -716,34 +656,19 @@ public class UnicastZenPingTests extends ESTestCase {
|
|||
.put("discovery.zen.ping.unicast.hosts", (String) null) // use nodes for simplicity
|
||||
.build();
|
||||
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build();
|
||||
final ClusterState stateA = ClusterState.builder(state)
|
||||
.blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK))
|
||||
.nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")).build();
|
||||
|
||||
final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingA.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
zenPingA.start(() -> stateA);
|
||||
closeables.push(zenPingA);
|
||||
|
||||
// Node B doesn't know about A!
|
||||
final ClusterState stateB = ClusterState.builder(state).nodes(
|
||||
DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")).build();
|
||||
TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
zenPingB.start(() -> stateB);
|
||||
closeables.push(zenPingB);
|
||||
|
||||
{
|
||||
|
|
|
@ -140,6 +140,55 @@ public class AsyncShardFetchTests extends ESTestCase {
|
|||
assertThat(fetchData.getData().get(node1), sameInstance(response1));
|
||||
}
|
||||
|
||||
public void testIgnoreResponseFromDifferentRound() throws Exception {
|
||||
DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build();
|
||||
test.addSimulation(node1.getId(), response1);
|
||||
|
||||
// first fetch, no data, still on going
|
||||
AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet());
|
||||
assertThat(fetchData.hasData(), equalTo(false));
|
||||
assertThat(test.reroute.get(), equalTo(0));
|
||||
|
||||
// handle a response with incorrect round id, wait on reroute incrementing
|
||||
test.processAsyncFetch(Collections.singletonList(response1), Collections.emptyList(), 0);
|
||||
assertThat(fetchData.hasData(), equalTo(false));
|
||||
assertThat(test.reroute.get(), equalTo(1));
|
||||
|
||||
// fire a response (with correct round id), wait on reroute incrementing
|
||||
test.fireSimulationAndWait(node1.getId());
|
||||
// verify we get back the data node
|
||||
assertThat(test.reroute.get(), equalTo(2));
|
||||
fetchData = test.fetchData(nodes, emptySet());
|
||||
assertThat(fetchData.hasData(), equalTo(true));
|
||||
assertThat(fetchData.getData().size(), equalTo(1));
|
||||
assertThat(fetchData.getData().get(node1), sameInstance(response1));
|
||||
}
|
||||
|
||||
public void testIgnoreFailureFromDifferentRound() throws Exception {
|
||||
DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build();
|
||||
// add a failed response for node1
|
||||
test.addSimulation(node1.getId(), failure1);
|
||||
|
||||
// first fetch, no data, still on going
|
||||
AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet());
|
||||
assertThat(fetchData.hasData(), equalTo(false));
|
||||
assertThat(test.reroute.get(), equalTo(0));
|
||||
|
||||
// handle a failure with incorrect round id, wait on reroute incrementing
|
||||
test.processAsyncFetch(Collections.emptyList(), Collections.singletonList(
|
||||
new FailedNodeException(node1.getId(), "dummy failure", failure1)), 0);
|
||||
assertThat(fetchData.hasData(), equalTo(false));
|
||||
assertThat(test.reroute.get(), equalTo(1));
|
||||
|
||||
// fire a response, wait on reroute incrementing
|
||||
test.fireSimulationAndWait(node1.getId());
|
||||
// failure, fetched data exists, but has no data
|
||||
assertThat(test.reroute.get(), equalTo(2));
|
||||
fetchData = test.fetchData(nodes, emptySet());
|
||||
assertThat(fetchData.hasData(), equalTo(true));
|
||||
assertThat(fetchData.getData().size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testTwoNodesOnSetup() throws Exception {
|
||||
DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build();
|
||||
test.addSimulation(node1.getId(), response1);
|
||||
|
@ -267,7 +316,7 @@ public class AsyncShardFetchTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void asyncFetch(final ShardId shardId, DiscoveryNode[] nodes) {
|
||||
protected void asyncFetch(DiscoveryNode[] nodes, long fetchingRound) {
|
||||
for (final DiscoveryNode node : nodes) {
|
||||
final String nodeId = node.getId();
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
|
@ -283,11 +332,10 @@ public class AsyncShardFetchTests extends ESTestCase {
|
|||
assert entry != null;
|
||||
entry.executeLatch.await();
|
||||
if (entry.failure != null) {
|
||||
processAsyncFetch(shardId, null, Collections.singletonList(new FailedNodeException(nodeId,
|
||||
"unexpected",
|
||||
entry.failure)));
|
||||
processAsyncFetch(null,
|
||||
Collections.singletonList(new FailedNodeException(nodeId, "unexpected", entry.failure)), fetchingRound);
|
||||
} else {
|
||||
processAsyncFetch(shardId, Collections.singletonList(entry.response), null);
|
||||
processAsyncFetch(Collections.singletonList(entry.response), null, fetchingRound);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("unexpected failure", e);
|
||||
|
|
|
@ -22,7 +22,9 @@ package org.elasticsearch.gateway;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -33,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.plugins.MetaDataUpgrader;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.test.TestCustomMetaData;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -258,7 +259,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
Collections.singletonList(customs -> {
|
||||
customs.put(CustomMetaData1.TYPE, new CustomMetaData1("modified_data1"));
|
||||
return customs;
|
||||
})
|
||||
}),
|
||||
Collections.emptyList()
|
||||
);
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
|
@ -273,7 +275,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
Collections.singletonList(customs -> {
|
||||
customs.remove(CustomMetaData1.TYPE);
|
||||
return customs;
|
||||
})
|
||||
}),
|
||||
Collections.emptyList()
|
||||
);
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
|
@ -287,7 +290,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
Collections.singletonList(customs -> {
|
||||
customs.put(CustomMetaData1.TYPE, new CustomMetaData1("modified_data1"));
|
||||
return customs;
|
||||
})
|
||||
}),
|
||||
Collections.emptyList()
|
||||
);
|
||||
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
|
@ -297,9 +301,27 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
assertThat(((TestCustomMetaData) upgrade.custom(CustomMetaData1.TYPE)).getData(), equalTo("modified_data1"));
|
||||
}
|
||||
|
||||
|
||||
public void testUpdateTemplateMetaDataOnUpgrade() throws Exception {
|
||||
MetaData metaData = randomMetaData();
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(
|
||||
templates -> {
|
||||
templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template").build());
|
||||
return templates;
|
||||
}
|
||||
));
|
||||
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
assertFalse(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
assertTrue(upgrade.templates().containsKey("added_test_template"));
|
||||
}
|
||||
|
||||
public void testNoMetaDataUpgrade() throws Exception {
|
||||
MetaData metaData = randomMetaData(new CustomMetaData1("data"));
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.emptyList());
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.emptyList(), Collections.emptyList());
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade == metaData);
|
||||
assertTrue(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
|
@ -314,7 +336,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
customs -> {
|
||||
throw new IllegalStateException("custom meta data too old");
|
||||
}
|
||||
));
|
||||
), Collections.emptyList());
|
||||
try {
|
||||
GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
} catch (IllegalStateException e) {
|
||||
|
@ -334,7 +356,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
case 2:
|
||||
metaData = randomMetaData();
|
||||
break;
|
||||
default: throw new IllegalStateException("should never happen");
|
||||
default:
|
||||
throw new IllegalStateException("should never happen");
|
||||
}
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(
|
||||
Arrays.asList(
|
||||
|
@ -345,8 +368,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
customs -> {
|
||||
customs.put(CustomMetaData2.TYPE, new CustomMetaData1("modified_data2"));
|
||||
return customs;
|
||||
})
|
||||
);
|
||||
}
|
||||
), Collections.emptyList());
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
assertFalse(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
|
@ -361,7 +384,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
|
||||
public void testIndexMetaDataUpgrade() throws Exception {
|
||||
MetaData metaData = randomMetaData();
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.emptyList());
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.emptyList(), Collections.emptyList());
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(true), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
assertTrue(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
|
@ -372,7 +395,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
|
||||
public void testCustomMetaDataNoChange() throws Exception {
|
||||
MetaData metaData = randomMetaData(new CustomMetaData1("data"));
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.singletonList(HashMap::new));
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(Collections.singletonList(HashMap::new),
|
||||
Collections.singletonList(HashMap::new));
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade == metaData);
|
||||
assertTrue(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
|
@ -381,13 +405,71 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testIndexTemplateValidation() throws Exception {
|
||||
MetaData metaData = randomMetaData();
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(
|
||||
customs -> {
|
||||
throw new IllegalStateException("template is incompatible");
|
||||
}));
|
||||
String message = expectThrows(IllegalStateException.class,
|
||||
() -> GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader)).getMessage();
|
||||
assertThat(message, equalTo("template is incompatible"));
|
||||
}
|
||||
|
||||
|
||||
public void testMultipleIndexTemplateUpgrade() throws Exception {
|
||||
final MetaData metaData;
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
metaData = randomMetaDataWithIndexTemplates("template1", "template2");
|
||||
break;
|
||||
case 1:
|
||||
metaData = randomMetaDataWithIndexTemplates(randomBoolean() ? "template1" : "template2");
|
||||
break;
|
||||
case 2:
|
||||
metaData = randomMetaData();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("should never happen");
|
||||
}
|
||||
MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(
|
||||
Collections.emptyList(),
|
||||
Arrays.asList(
|
||||
indexTemplateMetaDatas -> {
|
||||
indexTemplateMetaDatas.put("template1", IndexTemplateMetaData.builder("template1").settings(
|
||||
Settings.builder().put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 20).build()).build());
|
||||
return indexTemplateMetaDatas;
|
||||
|
||||
},
|
||||
indexTemplateMetaDatas -> {
|
||||
indexTemplateMetaDatas.put("template2", IndexTemplateMetaData.builder("template2").settings(
|
||||
Settings.builder().put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 10).build()).build());
|
||||
return indexTemplateMetaDatas;
|
||||
|
||||
}
|
||||
));
|
||||
MetaData upgrade = GatewayMetaState.upgradeMetaData(metaData, new MockMetaDataIndexUpgradeService(false), metaDataUpgrader);
|
||||
assertTrue(upgrade != metaData);
|
||||
assertFalse(MetaData.isGlobalStateEquals(upgrade, metaData));
|
||||
assertNotNull(upgrade.templates().get("template1"));
|
||||
assertThat(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(upgrade.templates().get("template1").settings()), equalTo(20));
|
||||
assertNotNull(upgrade.templates().get("template2"));
|
||||
assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(upgrade.templates().get("template2").settings()), equalTo(10));
|
||||
for (IndexMetaData indexMetaData : upgrade) {
|
||||
assertTrue(metaData.hasIndexMetaData(indexMetaData));
|
||||
}
|
||||
}
|
||||
|
||||
private static class MockMetaDataIndexUpgradeService extends MetaDataIndexUpgradeService {
|
||||
private final boolean upgrade;
|
||||
|
||||
MockMetaDataIndexUpgradeService(boolean upgrade) {
|
||||
super(Settings.EMPTY, null, null, null);
|
||||
super(Settings.EMPTY, null, null, null, null);
|
||||
this.upgrade = upgrade;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||
return upgrade ? IndexMetaData.builder(indexMetaData).build() : indexMetaData;
|
||||
|
@ -445,4 +527,25 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private static MetaData randomMetaDataWithIndexTemplates(String... templates) {
|
||||
MetaData.Builder builder = MetaData.builder();
|
||||
for (String template : templates) {
|
||||
IndexTemplateMetaData templateMetaData = IndexTemplateMetaData.builder(template)
|
||||
.settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3))
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 5)))
|
||||
.build();
|
||||
builder.put(templateMetaData);
|
||||
}
|
||||
for (int i = 0; i < randomIntBetween(1, 5); i++) {
|
||||
builder.put(
|
||||
IndexMetaData.builder(randomAlphaOfLength(10))
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfReplicas(randomIntBetween(0, 3))
|
||||
.numberOfShards(randomIntBetween(1, 5))
|
||||
);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -481,7 +481,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
@Override
|
||||
protected AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation) {
|
||||
return new AsyncShardFetch.FetchResult<>(shardId, data, Collections.<String>emptySet(), Collections.<String>emptySet());
|
||||
return new AsyncShardFetch.FetchResult<>(shardId, data, Collections.<String>emptySet());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -389,7 +389,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
tData.put(entry.getKey(), new TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData(entry.getKey(), entry.getValue()));
|
||||
}
|
||||
}
|
||||
return new AsyncShardFetch.FetchResult<>(shardId, tData, Collections.<String>emptySet(), Collections.<String>emptySet());
|
||||
return new AsyncShardFetch.FetchResult<>(shardId, tData, Collections.emptySet());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
|
@ -149,7 +149,7 @@ public class WaitUntilRefreshIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testNoRefreshInterval() throws InterruptedException, ExecutionException {
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(singletonMap("index.refresh_interval", -1)).get();
|
||||
ListenableActionFuture<IndexResponse> index = client().prepareIndex("test", "index", "1").setSource("foo", "bar")
|
||||
ActionFuture<IndexResponse> index = client().prepareIndex("test", "index", "1").setSource("foo", "bar")
|
||||
.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute();
|
||||
while (false == index.isDone()) {
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
|
|
@ -1,341 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilders;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boostingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.disMaxQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.regexpQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanContainingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanFirstQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanMultiTermQueryBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
|
||||
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction;
|
||||
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
|
||||
|
||||
/**
|
||||
* If one of the following tests doesn't compile make sure to not only fix the compilation error here
|
||||
* but also the documentation under ./docs/java-api/query-dsl/bool-query.asciidoc
|
||||
*
|
||||
* There are no assertions here on purpose - all of these tests ((ideally) should) equal to what is
|
||||
* documented in the java api query dsl part of our reference guide.
|
||||
* */
|
||||
public class QueryDSLDocumentationTests extends ESTestCase {
|
||||
public void testBool() {
|
||||
boolQuery()
|
||||
.must(termQuery("content", "test1"))
|
||||
.must(termQuery("content", "test4"))
|
||||
.mustNot(termQuery("content", "test2"))
|
||||
.should(termQuery("content", "test3"))
|
||||
.filter(termQuery("content", "test5"));
|
||||
}
|
||||
|
||||
public void testBoosting() {
|
||||
boostingQuery(termQuery("name","kimchy"), termQuery("name","dadoonet"))
|
||||
.negativeBoost(0.2f);
|
||||
}
|
||||
|
||||
public void testCommonTerms() {
|
||||
commonTermsQuery("name", "kimchy");
|
||||
}
|
||||
|
||||
public void testConstantScore() {
|
||||
constantScoreQuery(termQuery("name","kimchy"))
|
||||
.boost(2.0f);
|
||||
}
|
||||
|
||||
public void testDisMax() {
|
||||
disMaxQuery()
|
||||
.add(termQuery("name", "kimchy"))
|
||||
.add(termQuery("name", "elasticsearch"))
|
||||
.boost(1.2f)
|
||||
.tieBreaker(0.7f);
|
||||
}
|
||||
|
||||
public void testExists() {
|
||||
existsQuery("name");
|
||||
}
|
||||
|
||||
public void testFunctionScore() {
|
||||
FilterFunctionBuilder[] functions = {
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
matchQuery("name", "kimchy"),
|
||||
randomFunction("ABCDEF")),
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
exponentialDecayFunction("age", 0L, 1L))
|
||||
};
|
||||
functionScoreQuery(functions);
|
||||
}
|
||||
|
||||
public void testFuzzy() {
|
||||
fuzzyQuery("name", "kimchy");
|
||||
}
|
||||
|
||||
public void testGeoBoundingBox() {
|
||||
geoBoundingBoxQuery("pin.location").setCorners(40.73, -74.1, 40.717, -73.99);
|
||||
}
|
||||
|
||||
public void testGeoDistance() {
|
||||
geoDistanceQuery("pin.location")
|
||||
.point(40, -70)
|
||||
.distance(200, DistanceUnit.KILOMETERS)
|
||||
.geoDistance(GeoDistance.ARC);
|
||||
}
|
||||
|
||||
public void testGeoPolygon() {
|
||||
List<GeoPoint> points = new ArrayList<GeoPoint>();
|
||||
points.add(new GeoPoint(40, -70));
|
||||
points.add(new GeoPoint(30, -80));
|
||||
points.add(new GeoPoint(20, -90));
|
||||
geoPolygonQuery("pin.location", points);
|
||||
}
|
||||
|
||||
public void testGeoShape() throws IOException {
|
||||
GeoShapeQueryBuilder qb = geoShapeQuery(
|
||||
"pin.location",
|
||||
ShapeBuilders.newMultiPoint(
|
||||
new CoordinatesBuilder()
|
||||
.coordinate(0, 0)
|
||||
.coordinate(0, 10)
|
||||
.coordinate(10, 10)
|
||||
.coordinate(10, 0)
|
||||
.coordinate(0, 0)
|
||||
.build()));
|
||||
qb.relation(ShapeRelation.WITHIN);
|
||||
|
||||
qb = geoShapeQuery(
|
||||
"pin.location",
|
||||
"DEU",
|
||||
"countries");
|
||||
qb.relation(ShapeRelation.WITHIN)
|
||||
.indexedShapeIndex("shapes")
|
||||
.indexedShapePath("location");
|
||||
}
|
||||
|
||||
public void testHasChild() {
|
||||
hasChildQuery(
|
||||
"blog_tag",
|
||||
termQuery("tag","something"),
|
||||
ScoreMode.None);
|
||||
}
|
||||
|
||||
public void testHasParent() {
|
||||
hasParentQuery(
|
||||
"blog",
|
||||
termQuery("tag","something"),
|
||||
false);
|
||||
}
|
||||
|
||||
public void testIds() {
|
||||
idsQuery("my_type", "type2")
|
||||
.addIds("1", "4", "100");
|
||||
|
||||
idsQuery().addIds("1", "4", "100");
|
||||
}
|
||||
|
||||
public void testMatchAll() {
|
||||
matchAllQuery();
|
||||
}
|
||||
|
||||
public void testMatch() {
|
||||
matchQuery("name", "kimchy elasticsearch");
|
||||
}
|
||||
|
||||
public void testMLT() {
|
||||
String[] fields = {"name.first", "name.last"};
|
||||
String[] texts = {"text like this one"};
|
||||
Item[] items = null;
|
||||
|
||||
moreLikeThisQuery(fields, texts, items)
|
||||
.minTermFreq(1)
|
||||
.maxQueryTerms(12);
|
||||
}
|
||||
|
||||
public void testMultiMatch() {
|
||||
multiMatchQuery("kimchy elasticsearch", "user", "message");
|
||||
}
|
||||
|
||||
public void testNested() {
|
||||
nestedQuery(
|
||||
"obj1",
|
||||
boolQuery()
|
||||
.must(matchQuery("obj1.name", "blue"))
|
||||
.must(rangeQuery("obj1.count").gt(5)),
|
||||
ScoreMode.Avg);
|
||||
}
|
||||
|
||||
public void testPrefix() {
|
||||
prefixQuery("brand", "heine");
|
||||
}
|
||||
|
||||
public void testQueryString() {
|
||||
queryStringQuery("+kimchy -elasticsearch");
|
||||
}
|
||||
|
||||
public void testRange() {
|
||||
rangeQuery("price")
|
||||
.from(5)
|
||||
.to(10)
|
||||
.includeLower(true)
|
||||
.includeUpper(false);
|
||||
|
||||
rangeQuery("age")
|
||||
.gte("10")
|
||||
.lt("20");
|
||||
}
|
||||
|
||||
public void testRegExp() {
|
||||
regexpQuery("name.first", "s.*y");
|
||||
}
|
||||
|
||||
public void testScript() {
|
||||
scriptQuery(
|
||||
new Script("doc['num1'].value > 1")
|
||||
);
|
||||
|
||||
Map<String, Object> parameters = new HashMap<>();
|
||||
parameters.put("param1", 5);
|
||||
scriptQuery(
|
||||
new Script(
|
||||
ScriptType.FILE, "coollang", "myscript",
|
||||
parameters)
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
public void testSimpleQueryString() {
|
||||
simpleQueryStringQuery("+kimchy -elasticsearch");
|
||||
}
|
||||
|
||||
public void testSpanContaining() {
|
||||
spanContainingQuery(
|
||||
spanNearQuery(spanTermQuery("field1","bar"), 5)
|
||||
.addClause(spanTermQuery("field1","baz"))
|
||||
.inOrder(true),
|
||||
spanTermQuery("field1","foo"));
|
||||
}
|
||||
|
||||
public void testSpanFirst() {
|
||||
spanFirstQuery(
|
||||
spanTermQuery("user", "kimchy"),
|
||||
3
|
||||
);
|
||||
}
|
||||
|
||||
public void testSpanMultiTerm() {
|
||||
spanMultiTermQueryBuilder(prefixQuery("user", "ki"));
|
||||
}
|
||||
|
||||
public void testSpanNear() {
|
||||
spanNearQuery(spanTermQuery("field","value1"), 12)
|
||||
.addClause(spanTermQuery("field","value2"))
|
||||
.addClause(spanTermQuery("field","value3"))
|
||||
.inOrder(false);
|
||||
}
|
||||
|
||||
public void testSpanNot() {
|
||||
spanNotQuery(spanTermQuery("field","value1"),
|
||||
spanTermQuery("field","value2"));
|
||||
}
|
||||
|
||||
public void testSpanOr() {
|
||||
spanOrQuery(spanTermQuery("field","value1"))
|
||||
.addClause(spanTermQuery("field","value2"))
|
||||
.addClause(spanTermQuery("field","value3"));
|
||||
}
|
||||
|
||||
public void testSpanTerm() {
|
||||
spanTermQuery("user", "kimchy");
|
||||
}
|
||||
|
||||
public void testSpanWithin() {
|
||||
spanWithinQuery(
|
||||
spanNearQuery(spanTermQuery("field1", "bar"), 5)
|
||||
.addClause(spanTermQuery("field1", "baz"))
|
||||
.inOrder(true),
|
||||
spanTermQuery("field1", "foo"));
|
||||
}
|
||||
|
||||
public void testTerm() {
|
||||
termQuery("name", "kimchy");
|
||||
}
|
||||
|
||||
public void testTerms() {
|
||||
termsQuery("tags", "blue", "pill");
|
||||
}
|
||||
|
||||
public void testType() {
|
||||
typeQuery("my_type");
|
||||
}
|
||||
|
||||
public void testWildcard() {
|
||||
wildcardQuery("user", "k?mch*");
|
||||
}
|
||||
}
|
|
@ -296,9 +296,9 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
return getDiscoveryNode(primary.routingEntry().currentNodeId());
|
||||
}
|
||||
|
||||
public Future<Void> asyncRecoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier)
|
||||
throws IOException {
|
||||
FutureTask<Void> task = new FutureTask<>(() -> {
|
||||
public Future<Void> asyncRecoverReplica(
|
||||
final IndexShard replica, final BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier) throws IOException {
|
||||
final FutureTask<Void> task = new FutureTask<>(() -> {
|
||||
recoverReplica(replica, targetSupplier);
|
||||
return null;
|
||||
});
|
||||
|
|
|
@ -43,16 +43,17 @@ import org.elasticsearch.indices.recovery.RecoveryTarget;
|
|||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestCase {
|
||||
|
@ -205,60 +206,40 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
}
|
||||
}
|
||||
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," +
|
||||
"org.elasticsearch.discovery:TRACE," +
|
||||
"org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," +
|
||||
"org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE," +
|
||||
"org.elasticsearch.index.seqno:TRACE"
|
||||
)
|
||||
@TestLogging(
|
||||
"_root:DEBUG,"
|
||||
+ "org.elasticsearch.action.bulk:TRACE,"
|
||||
+ "org.elasticsearch.action.get:TRACE,"
|
||||
+ "org.elasticsearch.cluster.service:TRACE,"
|
||||
+ "org.elasticsearch.discovery:TRACE,"
|
||||
+ "org.elasticsearch.indices.cluster:TRACE,"
|
||||
+ "org.elasticsearch.indices.recovery:TRACE,"
|
||||
+ "org.elasticsearch.index.seqno:TRACE,"
|
||||
+ "org.elasticsearch.index.shard:TRACE")
|
||||
public void testWaitForPendingSeqNo() throws Exception {
|
||||
IndexMetaData metaData = buildIndexMetaData(1);
|
||||
|
||||
final int pendingDocs = randomIntBetween(1, 5);
|
||||
final AtomicReference<Semaphore> blockIndexingOnPrimary = new AtomicReference<>();
|
||||
final CountDownLatch blockedIndexers = new CountDownLatch(pendingDocs);
|
||||
final BlockingEngineFactory primaryEngineFactory = new BlockingEngineFactory();
|
||||
|
||||
try (ReplicationGroup shards = new ReplicationGroup(metaData) {
|
||||
@Override
|
||||
protected EngineFactory getEngineFactory(ShardRouting routing) {
|
||||
if (routing.primary()) {
|
||||
return new EngineFactory() {
|
||||
@Override
|
||||
public Engine newReadWriteEngine(EngineConfig config) {
|
||||
return InternalEngineTests.createInternalEngine((directory, writerConfig) ->
|
||||
new IndexWriter(directory, writerConfig) {
|
||||
@Override
|
||||
public long addDocument(Iterable<? extends IndexableField> doc) throws IOException {
|
||||
Semaphore block = blockIndexingOnPrimary.get();
|
||||
if (block != null) {
|
||||
blockedIndexers.countDown();
|
||||
try {
|
||||
block.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError("unexpectedly interrupted", e);
|
||||
}
|
||||
}
|
||||
return super.addDocument(doc);
|
||||
}
|
||||
|
||||
}, null, config);
|
||||
}
|
||||
};
|
||||
return primaryEngineFactory;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}) {
|
||||
shards.startAll();
|
||||
int docs = shards.indexDocs(randomIntBetween(1,10));
|
||||
int docs = shards.indexDocs(randomIntBetween(1, 10));
|
||||
IndexShard replica = shards.getReplicas().get(0);
|
||||
shards.removeReplica(replica);
|
||||
closeShards(replica);
|
||||
|
||||
docs += pendingDocs;
|
||||
final Semaphore pendingDocsSemaphore = new Semaphore(pendingDocs);
|
||||
blockIndexingOnPrimary.set(pendingDocsSemaphore);
|
||||
blockIndexingOnPrimary.get().acquire(pendingDocs);
|
||||
primaryEngineFactory.latchIndexers();
|
||||
CountDownLatch pendingDocsDone = new CountDownLatch(pendingDocs);
|
||||
for (int i = 0; i < pendingDocs; i++) {
|
||||
final String id = "pending_" + i;
|
||||
|
@ -274,9 +255,9 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
}
|
||||
|
||||
// wait for the pending ops to "hang"
|
||||
blockedIndexers.await();
|
||||
primaryEngineFactory.awaitIndexersLatch();
|
||||
|
||||
blockIndexingOnPrimary.set(null);
|
||||
primaryEngineFactory.allowIndexing();
|
||||
// index some more
|
||||
docs += shards.indexDocs(randomInt(5));
|
||||
|
||||
|
@ -298,11 +279,12 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
|
||||
recoveryStart.await();
|
||||
|
||||
for (int i = 0; i < pendingDocs; i++) {
|
||||
assertFalse((pendingDocs - i) + " pending operations, recovery should wait", preparedForTranslog.get());
|
||||
pendingDocsSemaphore.release();
|
||||
}
|
||||
// index some more
|
||||
docs += shards.indexDocs(randomInt(5));
|
||||
|
||||
assertFalse("recovery should wait on pending docs", preparedForTranslog.get());
|
||||
|
||||
primaryEngineFactory.releaseLatchedIndexers();
|
||||
pendingDocsDone.await();
|
||||
|
||||
// now recovery can finish
|
||||
|
@ -312,6 +294,114 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(docs));
|
||||
|
||||
shards.assertAllEqual(docs);
|
||||
} finally {
|
||||
primaryEngineFactory.close();
|
||||
}
|
||||
}
|
||||
|
||||
@TestLogging(
|
||||
"_root:DEBUG,"
|
||||
+ "org.elasticsearch.action.bulk:TRACE,"
|
||||
+ "org.elasticsearch.action.get:TRACE,"
|
||||
+ "org.elasticsearch.cluster.service:TRACE,"
|
||||
+ "org.elasticsearch.discovery:TRACE,"
|
||||
+ "org.elasticsearch.indices.cluster:TRACE,"
|
||||
+ "org.elasticsearch.indices.recovery:TRACE,"
|
||||
+ "org.elasticsearch.index.seqno:TRACE,"
|
||||
+ "org.elasticsearch.index.shard:TRACE")
|
||||
public void testCheckpointsAndMarkingInSync() throws Exception {
|
||||
final IndexMetaData metaData = buildIndexMetaData(0);
|
||||
final BlockingEngineFactory replicaEngineFactory = new BlockingEngineFactory();
|
||||
try (
|
||||
ReplicationGroup shards = new ReplicationGroup(metaData) {
|
||||
@Override
|
||||
protected EngineFactory getEngineFactory(final ShardRouting routing) {
|
||||
if (routing.primary()) {
|
||||
return null;
|
||||
} else {
|
||||
return replicaEngineFactory;
|
||||
}
|
||||
}
|
||||
};
|
||||
AutoCloseable ignored = replicaEngineFactory // make sure we release indexers before closing
|
||||
) {
|
||||
shards.startPrimary();
|
||||
final int docs = shards.indexDocs(randomIntBetween(1, 10));
|
||||
logger.info("indexed [{}] docs", docs);
|
||||
final CountDownLatch pendingDocDone = new CountDownLatch(1);
|
||||
final CountDownLatch pendingDocActiveWithExtraDocIndexed = new CountDownLatch(1);
|
||||
final IndexShard replica = shards.addReplica();
|
||||
final Future<Void> recoveryFuture = shards.asyncRecoverReplica(
|
||||
replica,
|
||||
(indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, l -> {}) {
|
||||
@Override
|
||||
public long indexTranslogOperations(final List<Translog.Operation> operations, final int totalTranslogOps) {
|
||||
// index a doc which is not part of the snapshot, but also does not complete on replica
|
||||
replicaEngineFactory.latchIndexers();
|
||||
threadPool.generic().submit(() -> {
|
||||
try {
|
||||
shards.index(new IndexRequest(index.getName(), "type", "pending").source("{}", XContentType.JSON));
|
||||
} catch (final Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
pendingDocDone.countDown();
|
||||
}
|
||||
});
|
||||
try {
|
||||
// the pending doc is latched in the engine
|
||||
replicaEngineFactory.awaitIndexersLatch();
|
||||
// unblock indexing for the next doc
|
||||
replicaEngineFactory.allowIndexing();
|
||||
shards.index(new IndexRequest(index.getName(), "type", "completed").source("{}", XContentType.JSON));
|
||||
/*
|
||||
* We want to test that the global checkpoint is blocked from advancing on the primary when a replica shard
|
||||
* is pending being marked in-sync. We also want to test the the global checkpoint does not advance on the
|
||||
* replica when its local checkpoint is behind the global checkpoint on the primary. Finally, advancing the
|
||||
* global checkpoint here forces recovery to block until the pending doc is indexing on the replica.
|
||||
*/
|
||||
shards.getPrimary().updateGlobalCheckpointOnPrimary();
|
||||
pendingDocActiveWithExtraDocIndexed.countDown();
|
||||
} catch (final Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
return super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
}
|
||||
});
|
||||
pendingDocActiveWithExtraDocIndexed.await();
|
||||
assertThat(pendingDocDone.getCount(), equalTo(1L));
|
||||
{
|
||||
final long expectedDocs = docs + 2L;
|
||||
assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
// recovery has not completed, therefore the global checkpoint can have advance on the primary
|
||||
assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
// the pending document is not done, the checkpoints can not have advanced on the replica
|
||||
assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 1));
|
||||
assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 1));
|
||||
}
|
||||
|
||||
shards.getPrimary().updateGlobalCheckpointOnPrimary();
|
||||
{
|
||||
final long expectedDocs = docs + 3L;
|
||||
shards.index(new IndexRequest(index.getName(), "type", "last").source("{}", XContentType.JSON));
|
||||
assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 2));
|
||||
assertThat(replica.getLocalCheckpoint(), lessThan(expectedDocs - 2));
|
||||
assertThat(replica.getGlobalCheckpoint(), lessThan(expectedDocs - 2));
|
||||
}
|
||||
|
||||
replicaEngineFactory.releaseLatchedIndexers();
|
||||
pendingDocDone.await();
|
||||
recoveryFuture.get();
|
||||
shards.getPrimary().updateGlobalCheckpointOnPrimary();
|
||||
{
|
||||
final long expectedDocs = docs + 3L;
|
||||
assertBusy(() -> {
|
||||
assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
assertThat(shards.getPrimary().getGlobalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
assertThat(replica.getLocalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
assertThat(replica.getGlobalCheckpoint(), equalTo(expectedDocs - 1));
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,11 +444,11 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
if (hasBlocked() == false) {
|
||||
blockIfNeeded(RecoveryState.Stage.TRANSLOG);
|
||||
}
|
||||
super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
return super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -379,4 +469,66 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
|
||||
}
|
||||
|
||||
static class BlockingEngineFactory implements EngineFactory, AutoCloseable {
|
||||
|
||||
private final List<CountDownLatch> blocks = new ArrayList<>();
|
||||
|
||||
private final AtomicReference<CountDownLatch> blockReference = new AtomicReference<>();
|
||||
private final AtomicReference<CountDownLatch> blockedIndexers = new AtomicReference<>();
|
||||
|
||||
public synchronized void latchIndexers() {
|
||||
final CountDownLatch block = new CountDownLatch(1);
|
||||
blocks.add(block);
|
||||
blockedIndexers.set(new CountDownLatch(1));
|
||||
assert blockReference.compareAndSet(null, block);
|
||||
}
|
||||
|
||||
public void awaitIndexersLatch() throws InterruptedException {
|
||||
blockedIndexers.get().await();
|
||||
}
|
||||
|
||||
public synchronized void allowIndexing() {
|
||||
final CountDownLatch previous = blockReference.getAndSet(null);
|
||||
assert previous == null || blocks.contains(previous);
|
||||
}
|
||||
|
||||
public synchronized void releaseLatchedIndexers() {
|
||||
allowIndexing();
|
||||
blocks.forEach(CountDownLatch::countDown);
|
||||
blocks.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Engine newReadWriteEngine(final EngineConfig config) {
|
||||
return InternalEngineTests.createInternalEngine(
|
||||
(directory, writerConfig) ->
|
||||
new IndexWriter(directory, writerConfig) {
|
||||
@Override
|
||||
public long addDocument(final Iterable<? extends IndexableField> doc) throws IOException {
|
||||
final CountDownLatch block = blockReference.get();
|
||||
if (block != null) {
|
||||
final CountDownLatch latch = blockedIndexers.get();
|
||||
if (latch != null) {
|
||||
latch.countDown();
|
||||
}
|
||||
try {
|
||||
block.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
return super.addDocument(doc);
|
||||
}
|
||||
},
|
||||
null,
|
||||
config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
releaseLatchedIndexers();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,247 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.seqno;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class GlobalCheckpointTests extends ESTestCase {
|
||||
|
||||
GlobalCheckpointTracker tracker;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
tracker =
|
||||
new GlobalCheckpointTracker(
|
||||
new ShardId("test", "_na_", 0),
|
||||
IndexSettingsModule.newIndexSettings("test", Settings.EMPTY),
|
||||
UNASSIGNED_SEQ_NO);
|
||||
}
|
||||
|
||||
public void testEmptyShards() {
|
||||
assertFalse("checkpoint shouldn't be updated when the are no active shards", tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
}
|
||||
|
||||
private final AtomicInteger aIdGenerator = new AtomicInteger();
|
||||
|
||||
private Map<String, Long> randomAllocationsWithLocalCheckpoints(int min, int max) {
|
||||
Map<String, Long> allocations = new HashMap<>();
|
||||
for (int i = randomIntBetween(min, max); i > 0; i--) {
|
||||
allocations.put("id_" + aIdGenerator.incrementAndGet(), (long) randomInt(1000));
|
||||
}
|
||||
return allocations;
|
||||
}
|
||||
|
||||
public void testGlobalCheckpointUpdate() {
|
||||
Map<String, Long> allocations = new HashMap<>();
|
||||
Map<String, Long> activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
Set<String> active = new HashSet<>(activeWithCheckpoints.keySet());
|
||||
allocations.putAll(activeWithCheckpoints);
|
||||
Map<String, Long> initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
Set<String> initializing = new HashSet<>(initializingWithCheckpoints.keySet());
|
||||
allocations.putAll(initializingWithCheckpoints);
|
||||
assertThat(allocations.size(), equalTo(active.size() + initializing.size()));
|
||||
|
||||
// note: allocations can never be empty in practice as we always have at least one primary shard active/in sync
|
||||
// it is however nice not to assume this on this level and check we do the right thing.
|
||||
final long maxLocalCheckpoint = allocations.values().stream().min(Long::compare).orElse(UNASSIGNED_SEQ_NO);
|
||||
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
logger.info("--> using allocations");
|
||||
allocations.keySet().forEach(aId -> {
|
||||
final String type;
|
||||
if (active.contains(aId)) {
|
||||
type = "active";
|
||||
} else if (initializing.contains(aId)) {
|
||||
type = "init";
|
||||
} else {
|
||||
throw new IllegalStateException(aId + " not found in any map");
|
||||
}
|
||||
logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type);
|
||||
});
|
||||
|
||||
tracker.updateAllocationIdsFromMaster(active, initializing);
|
||||
initializing.forEach(aId -> tracker.markAllocationIdAsInSync(aId));
|
||||
allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId, allocations.get(aId)));
|
||||
|
||||
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
assertThat(tracker.updateCheckpointOnPrimary(), equalTo(maxLocalCheckpoint != UNASSIGNED_SEQ_NO));
|
||||
assertThat(tracker.getCheckpoint(), equalTo(maxLocalCheckpoint));
|
||||
|
||||
// increment checkpoints
|
||||
active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4)));
|
||||
initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4)));
|
||||
allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId, allocations.get(aId)));
|
||||
|
||||
// now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested.
|
||||
final String extraId = "extra_" + randomAlphaOfLength(5);
|
||||
|
||||
// first check that adding it without the master blessing doesn't change anything.
|
||||
tracker.updateLocalCheckpoint(extraId, maxLocalCheckpoint + 1 + randomInt(4));
|
||||
assertThat(tracker.getLocalCheckpointForAllocationId(extraId), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
Set<String> newActive = new HashSet<>(active);
|
||||
newActive.add(extraId);
|
||||
tracker.updateAllocationIdsFromMaster(newActive, initializing);
|
||||
|
||||
// we should ask for a refresh , but not update the checkpoint
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(maxLocalCheckpoint));
|
||||
|
||||
// now notify for the new id
|
||||
tracker.updateLocalCheckpoint(extraId, maxLocalCheckpoint + 1 + randomInt(4));
|
||||
|
||||
// now it should be incremented
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), greaterThan(maxLocalCheckpoint));
|
||||
}
|
||||
|
||||
public void testMissingActiveIdsPreventAdvance() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
final Map<String, Long> assigned = new HashMap<>();
|
||||
assigned.putAll(active);
|
||||
assigned.putAll(initializing);
|
||||
tracker.updateAllocationIdsFromMaster(
|
||||
new HashSet<>(randomSubsetOf(randomInt(active.size() - 1), active.keySet())),
|
||||
initializing.keySet());
|
||||
randomSubsetOf(initializing.keySet()).forEach(tracker::markAllocationIdAsInSync);
|
||||
assigned.forEach(tracker::updateLocalCheckpoint);
|
||||
|
||||
// now mark all active shards
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
|
||||
// global checkpoint can't be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
// update again
|
||||
assigned.forEach(tracker::updateLocalCheckpoint);
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testMissingInSyncIdsPreventAdvance() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
initializing.keySet().forEach(tracker::markAllocationIdAsInSync);
|
||||
randomSubsetOf(randomInt(initializing.size() - 1),
|
||||
initializing.keySet()).forEach(aId -> tracker.updateLocalCheckpoint(aId, initializing.get(aId)));
|
||||
|
||||
active.forEach(tracker::updateLocalCheckpoint);
|
||||
|
||||
// global checkpoint can't be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
// update again
|
||||
initializing.forEach(tracker::updateLocalCheckpoint);
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> nonApproved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
initializing.keySet().forEach(tracker::markAllocationIdAsInSync);
|
||||
nonApproved.keySet().forEach(tracker::markAllocationIdAsInSync);
|
||||
|
||||
List<Map<String, Long>> allocations = Arrays.asList(active, initializing, nonApproved);
|
||||
Collections.shuffle(allocations, random());
|
||||
allocations.forEach(a -> a.forEach(tracker::updateLocalCheckpoint));
|
||||
|
||||
// global checkpoint can be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreRemovedIfNotValidatedByMaster() {
|
||||
final Map<String, Long> activeToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> activeToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializingToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Set<String> active = Sets.union(activeToStay.keySet(), activeToBeRemoved.keySet());
|
||||
final Set<String> initializing = Sets.union(initializingToStay.keySet(), initializingToBeRemoved.keySet());
|
||||
final Map<String, Long> allocations = new HashMap<>();
|
||||
allocations.putAll(activeToStay);
|
||||
if (randomBoolean()) {
|
||||
allocations.putAll(activeToBeRemoved);
|
||||
}
|
||||
allocations.putAll(initializingToStay);
|
||||
if (randomBoolean()) {
|
||||
allocations.putAll(initializingToBeRemoved);
|
||||
}
|
||||
tracker.updateAllocationIdsFromMaster(active, initializing);
|
||||
if (randomBoolean()) {
|
||||
initializingToStay.keySet().forEach(tracker::markAllocationIdAsInSync);
|
||||
} else {
|
||||
initializing.forEach(tracker::markAllocationIdAsInSync);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
allocations.forEach(tracker::updateLocalCheckpoint);
|
||||
}
|
||||
|
||||
// global checkpoint may be advanced, but we need a sync in any case
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
|
||||
// now remove shards
|
||||
if (randomBoolean()) {
|
||||
tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet());
|
||||
allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L));
|
||||
} else {
|
||||
allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L));
|
||||
tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet());
|
||||
}
|
||||
|
||||
final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream())
|
||||
.min(Long::compare).get() + 10; // we added 10 to make sure it's advanced in the second time
|
||||
|
||||
// global checkpoint is advanced and we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(checkpoint));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,490 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.seqno;
|
||||
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
import static org.hamcrest.Matchers.comparesEqualTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class GlobalCheckpointTrackerTests extends ESTestCase {
|
||||
|
||||
GlobalCheckpointTracker tracker;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
tracker =
|
||||
new GlobalCheckpointTracker(
|
||||
new ShardId("test", "_na_", 0),
|
||||
IndexSettingsModule.newIndexSettings("test", Settings.EMPTY),
|
||||
UNASSIGNED_SEQ_NO);
|
||||
}
|
||||
|
||||
public void testEmptyShards() {
|
||||
assertFalse("checkpoint shouldn't be updated when the are no active shards", tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
}
|
||||
|
||||
private final AtomicInteger aIdGenerator = new AtomicInteger();
|
||||
|
||||
private Map<String, Long> randomAllocationsWithLocalCheckpoints(int min, int max) {
|
||||
Map<String, Long> allocations = new HashMap<>();
|
||||
for (int i = randomIntBetween(min, max); i > 0; i--) {
|
||||
allocations.put("id_" + aIdGenerator.incrementAndGet(), (long) randomInt(1000));
|
||||
}
|
||||
return allocations;
|
||||
}
|
||||
|
||||
public void testGlobalCheckpointUpdate() {
|
||||
Map<String, Long> allocations = new HashMap<>();
|
||||
Map<String, Long> activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
Set<String> active = new HashSet<>(activeWithCheckpoints.keySet());
|
||||
allocations.putAll(activeWithCheckpoints);
|
||||
Map<String, Long> initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
Set<String> initializing = new HashSet<>(initializingWithCheckpoints.keySet());
|
||||
allocations.putAll(initializingWithCheckpoints);
|
||||
assertThat(allocations.size(), equalTo(active.size() + initializing.size()));
|
||||
|
||||
// note: allocations can never be empty in practice as we always have at least one primary shard active/in sync
|
||||
// it is however nice not to assume this on this level and check we do the right thing.
|
||||
final long maxLocalCheckpoint = allocations.values().stream().min(Long::compare).orElse(UNASSIGNED_SEQ_NO);
|
||||
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
logger.info("--> using allocations");
|
||||
allocations.keySet().forEach(aId -> {
|
||||
final String type;
|
||||
if (active.contains(aId)) {
|
||||
type = "active";
|
||||
} else if (initializing.contains(aId)) {
|
||||
type = "init";
|
||||
} else {
|
||||
throw new IllegalStateException(aId + " not found in any map");
|
||||
}
|
||||
logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type);
|
||||
});
|
||||
|
||||
tracker.updateAllocationIdsFromMaster(active, initializing);
|
||||
initializing.forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId, tracker.getCheckpoint()));
|
||||
allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId, allocations.get(aId)));
|
||||
|
||||
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
assertThat(tracker.updateCheckpointOnPrimary(), equalTo(maxLocalCheckpoint != UNASSIGNED_SEQ_NO));
|
||||
assertThat(tracker.getCheckpoint(), equalTo(maxLocalCheckpoint));
|
||||
|
||||
// increment checkpoints
|
||||
active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4)));
|
||||
initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4)));
|
||||
allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId, allocations.get(aId)));
|
||||
|
||||
// now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested.
|
||||
final String extraId = "extra_" + randomAlphaOfLength(5);
|
||||
|
||||
// first check that adding it without the master blessing doesn't change anything.
|
||||
tracker.updateLocalCheckpoint(extraId, maxLocalCheckpoint + 1 + randomInt(4));
|
||||
assertThat(tracker.getLocalCheckpointForAllocationId(extraId), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
Set<String> newActive = new HashSet<>(active);
|
||||
newActive.add(extraId);
|
||||
tracker.updateAllocationIdsFromMaster(newActive, initializing);
|
||||
|
||||
// we should ask for a refresh , but not update the checkpoint
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(maxLocalCheckpoint));
|
||||
|
||||
// now notify for the new id
|
||||
tracker.updateLocalCheckpoint(extraId, maxLocalCheckpoint + 1 + randomInt(4));
|
||||
|
||||
// now it should be incremented
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), greaterThan(maxLocalCheckpoint));
|
||||
}
|
||||
|
||||
public void testMissingActiveIdsPreventAdvance() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
final Map<String, Long> assigned = new HashMap<>();
|
||||
assigned.putAll(active);
|
||||
assigned.putAll(initializing);
|
||||
tracker.updateAllocationIdsFromMaster(
|
||||
new HashSet<>(randomSubsetOf(randomInt(active.size() - 1), active.keySet())),
|
||||
initializing.keySet());
|
||||
randomSubsetOf(initializing.keySet()).forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
assigned.forEach(tracker::updateLocalCheckpoint);
|
||||
|
||||
// now mark all active shards
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
|
||||
// global checkpoint can't be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
// update again
|
||||
assigned.forEach(tracker::updateLocalCheckpoint);
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testMissingInSyncIdsPreventAdvance() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(0, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
randomSubsetOf(randomInt(initializing.size() - 1),
|
||||
initializing.keySet()).forEach(aId -> tracker.updateLocalCheckpoint(aId, initializing.get(aId)));
|
||||
|
||||
active.forEach(tracker::updateLocalCheckpoint);
|
||||
|
||||
// global checkpoint can't be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(UNASSIGNED_SEQ_NO));
|
||||
|
||||
// update again
|
||||
initializing.forEach(tracker::updateLocalCheckpoint);
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() {
|
||||
final Map<String, Long> active = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> nonApproved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet());
|
||||
initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
nonApproved.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
|
||||
List<Map<String, Long>> allocations = Arrays.asList(active, initializing, nonApproved);
|
||||
Collections.shuffle(allocations, random());
|
||||
allocations.forEach(a -> a.forEach(tracker::updateLocalCheckpoint));
|
||||
|
||||
// global checkpoint can be advanced, but we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO)));
|
||||
}
|
||||
|
||||
public void testInSyncIdsAreRemovedIfNotValidatedByMaster() {
|
||||
final Map<String, Long> activeToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> activeToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Map<String, Long> initializingToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5);
|
||||
final Set<String> active = Sets.union(activeToStay.keySet(), activeToBeRemoved.keySet());
|
||||
final Set<String> initializing = Sets.union(initializingToStay.keySet(), initializingToBeRemoved.keySet());
|
||||
final Map<String, Long> allocations = new HashMap<>();
|
||||
allocations.putAll(activeToStay);
|
||||
if (randomBoolean()) {
|
||||
allocations.putAll(activeToBeRemoved);
|
||||
}
|
||||
allocations.putAll(initializingToStay);
|
||||
if (randomBoolean()) {
|
||||
allocations.putAll(initializingToBeRemoved);
|
||||
}
|
||||
tracker.updateAllocationIdsFromMaster(active, initializing);
|
||||
if (randomBoolean()) {
|
||||
initializingToStay.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
} else {
|
||||
initializing.forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getCheckpoint()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
allocations.forEach(tracker::updateLocalCheckpoint);
|
||||
}
|
||||
|
||||
// global checkpoint may be advanced, but we need a sync in any case
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
|
||||
// now remove shards
|
||||
if (randomBoolean()) {
|
||||
tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet());
|
||||
allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L));
|
||||
} else {
|
||||
allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L));
|
||||
tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet());
|
||||
}
|
||||
|
||||
final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream())
|
||||
.min(Long::compare).get() + 10; // we added 10 to make sure it's advanced in the second time
|
||||
|
||||
// global checkpoint is advanced and we need a sync
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
assertThat(tracker.getCheckpoint(), equalTo(checkpoint));
|
||||
}
|
||||
|
||||
public void testWaitForAllocationIdToBeInSync() throws BrokenBarrierException, InterruptedException {
|
||||
final int localCheckpoint = randomIntBetween(1, 32);
|
||||
final int globalCheckpoint = randomIntBetween(localCheckpoint + 1, 64);
|
||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
final AtomicBoolean complete = new AtomicBoolean();
|
||||
final String inSyncAllocationId =randomAlphaOfLength(16);
|
||||
final String trackingAllocationId = randomAlphaOfLength(16);
|
||||
tracker.updateAllocationIdsFromMaster(Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId));
|
||||
tracker.updateLocalCheckpoint(inSyncAllocationId, globalCheckpoint);
|
||||
tracker.updateCheckpointOnPrimary();
|
||||
final Thread thread = new Thread(() -> {
|
||||
try {
|
||||
// synchronize starting with the test thread
|
||||
barrier.await();
|
||||
tracker.markAllocationIdAsInSync(trackingAllocationId, localCheckpoint);
|
||||
complete.set(true);
|
||||
// synchronize with the test thread checking if we are no longer waiting
|
||||
barrier.await();
|
||||
} catch (final BrokenBarrierException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
thread.start();
|
||||
|
||||
// synchronize starting with the waiting thread
|
||||
barrier.await();
|
||||
|
||||
final List<Integer> elements = IntStream.rangeClosed(0, globalCheckpoint - 1).boxed().collect(Collectors.toList());
|
||||
Randomness.shuffle(elements);
|
||||
for (int i = 0; i < elements.size(); i++) {
|
||||
tracker.updateLocalCheckpoint(trackingAllocationId, elements.get(i));
|
||||
assertFalse(complete.get());
|
||||
assertTrue(awaitBusy(() -> tracker.trackingLocalCheckpoints.containsKey(trackingAllocationId)));
|
||||
assertTrue(awaitBusy(() -> tracker.pendingInSync.contains(trackingAllocationId)));
|
||||
assertFalse(tracker.inSyncLocalCheckpoints.containsKey(trackingAllocationId));
|
||||
}
|
||||
|
||||
tracker.updateLocalCheckpoint(trackingAllocationId, randomIntBetween(globalCheckpoint, 64));
|
||||
// synchronize with the waiting thread to mark that it is complete
|
||||
barrier.await();
|
||||
assertTrue(complete.get());
|
||||
assertTrue(tracker.trackingLocalCheckpoints.isEmpty());
|
||||
assertTrue(tracker.pendingInSync.isEmpty());
|
||||
assertTrue(tracker.inSyncLocalCheckpoints.containsKey(trackingAllocationId));
|
||||
|
||||
thread.join();
|
||||
}
|
||||
|
||||
public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBarrierException, InterruptedException {
|
||||
final int localCheckpoint = randomIntBetween(1, 32);
|
||||
final int globalCheckpoint = randomIntBetween(localCheckpoint + 1, 64);
|
||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
final AtomicBoolean interrupted = new AtomicBoolean();
|
||||
final String inSyncAllocationId = randomAlphaOfLength(16);
|
||||
final String trackingAllocationId = randomAlphaOfLength(32);
|
||||
tracker.updateAllocationIdsFromMaster(Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId));
|
||||
tracker.updateLocalCheckpoint(inSyncAllocationId, globalCheckpoint);
|
||||
tracker.updateCheckpointOnPrimary();
|
||||
final Thread thread = new Thread(() -> {
|
||||
try {
|
||||
// synchronize starting with the test thread
|
||||
barrier.await();
|
||||
} catch (final BrokenBarrierException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
try {
|
||||
tracker.markAllocationIdAsInSync(trackingAllocationId, localCheckpoint);
|
||||
} catch (final InterruptedException e) {
|
||||
interrupted.set(true);
|
||||
// synchronize with the test thread checking if we are interrupted
|
||||
}
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (final BrokenBarrierException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
thread.start();
|
||||
|
||||
// synchronize starting with the waiting thread
|
||||
barrier.await();
|
||||
|
||||
thread.interrupt();
|
||||
|
||||
// synchronize with the waiting thread to mark that it is complete
|
||||
barrier.await();
|
||||
|
||||
assertTrue(interrupted.get());
|
||||
|
||||
thread.join();
|
||||
}
|
||||
|
||||
public void testUpdateAllocationIdsFromMaster() throws Exception {
|
||||
final int numberOfActiveAllocationsIds = randomIntBetween(2, 16);
|
||||
final Set<String> activeAllocationIds =
|
||||
IntStream.range(0, numberOfActiveAllocationsIds).mapToObj(i -> randomAlphaOfLength(16)).collect(Collectors.toSet());
|
||||
final int numberOfInitializingIds = randomIntBetween(2, 16);
|
||||
final Set<String> initializingIds =
|
||||
IntStream.range(0, numberOfInitializingIds).mapToObj(i -> {
|
||||
do {
|
||||
final String initializingId = randomAlphaOfLength(16);
|
||||
// ensure we do not duplicate an allocation ID in active and initializing sets
|
||||
if (!activeAllocationIds.contains(initializingId)) {
|
||||
return initializingId;
|
||||
}
|
||||
} while (true);
|
||||
}).collect(Collectors.toSet());
|
||||
tracker.updateAllocationIdsFromMaster(activeAllocationIds, initializingIds);
|
||||
|
||||
// first we assert that the in-sync and tracking sets are set up correctly
|
||||
assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(
|
||||
activeAllocationIds
|
||||
.stream()
|
||||
.allMatch(a -> tracker.inSyncLocalCheckpoints.get(a) == SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
assertTrue(initializingIds.stream().allMatch(a -> tracker.trackingLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(
|
||||
initializingIds
|
||||
.stream()
|
||||
.allMatch(a -> tracker.trackingLocalCheckpoints.get(a) == SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
|
||||
// now we will remove some allocation IDs from these and ensure that they propagate through
|
||||
final List<String> removingActiveAllocationIds = randomSubsetOf(activeAllocationIds);
|
||||
final Set<String> newActiveAllocationIds =
|
||||
activeAllocationIds.stream().filter(a -> !removingActiveAllocationIds.contains(a)).collect(Collectors.toSet());
|
||||
final List<String> removingInitializingAllocationIds = randomSubsetOf(initializingIds);
|
||||
final Set<String> newInitializingAllocationIds =
|
||||
initializingIds.stream().filter(a -> !removingInitializingAllocationIds.contains(a)).collect(Collectors.toSet());
|
||||
tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds);
|
||||
assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(removingActiveAllocationIds.stream().noneMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(newInitializingAllocationIds.stream().allMatch(a -> tracker.trackingLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(removingInitializingAllocationIds.stream().noneMatch(a -> tracker.trackingLocalCheckpoints.containsKey(a)));
|
||||
|
||||
/*
|
||||
* Now we will add an allocation ID to each of active and initializing and ensure they propagate through. Using different lengths
|
||||
* than we have been using above ensures that we can not collide with a previous allocation ID
|
||||
*/
|
||||
newActiveAllocationIds.add(randomAlphaOfLength(32));
|
||||
newInitializingAllocationIds.add(randomAlphaOfLength(64));
|
||||
tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds);
|
||||
assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(
|
||||
newActiveAllocationIds
|
||||
.stream()
|
||||
.allMatch(a -> tracker.inSyncLocalCheckpoints.get(a) == SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
assertTrue(newInitializingAllocationIds.stream().allMatch(a -> tracker.trackingLocalCheckpoints.containsKey(a)));
|
||||
assertTrue(
|
||||
newInitializingAllocationIds
|
||||
.stream()
|
||||
.allMatch(a -> tracker.trackingLocalCheckpoints.get(a) == SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
|
||||
// the tracking allocation IDs should play no role in determining the global checkpoint
|
||||
final Map<String, Integer> activeLocalCheckpoints =
|
||||
newActiveAllocationIds.stream().collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024)));
|
||||
activeLocalCheckpoints.forEach((a, l) -> tracker.updateLocalCheckpoint(a, l));
|
||||
final Map<String, Integer> initializingLocalCheckpoints =
|
||||
newInitializingAllocationIds.stream().collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024)));
|
||||
initializingLocalCheckpoints.forEach((a, l) -> tracker.updateLocalCheckpoint(a, l));
|
||||
assertTrue(
|
||||
activeLocalCheckpoints
|
||||
.entrySet()
|
||||
.stream()
|
||||
.allMatch(e -> tracker.getLocalCheckpointForAllocationId(e.getKey()) == e.getValue()));
|
||||
assertTrue(
|
||||
initializingLocalCheckpoints
|
||||
.entrySet()
|
||||
.stream()
|
||||
.allMatch(e -> tracker.trackingLocalCheckpoints.get(e.getKey()) == e.getValue()));
|
||||
assertTrue(tracker.updateCheckpointOnPrimary());
|
||||
final long minimumActiveLocalCheckpoint = (long) activeLocalCheckpoints.values().stream().min(Integer::compareTo).get();
|
||||
assertThat(tracker.getCheckpoint(), equalTo(minimumActiveLocalCheckpoint));
|
||||
final long minimumInitailizingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get();
|
||||
|
||||
// now we are going to add a new allocation ID and bring it in sync which should move it to the in-sync allocation IDs
|
||||
final long localCheckpoint =
|
||||
randomIntBetween(0, Math.toIntExact(Math.min(minimumActiveLocalCheckpoint, minimumInitailizingLocalCheckpoint) - 1));
|
||||
|
||||
// using a different length than we have been using above ensures that we can not collide with a previous allocation ID
|
||||
final String newSyncingAllocationId = randomAlphaOfLength(128);
|
||||
newInitializingAllocationIds.add(newSyncingAllocationId);
|
||||
tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds);
|
||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
final Thread thread = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
tracker.markAllocationIdAsInSync(newSyncingAllocationId, localCheckpoint);
|
||||
barrier.await();
|
||||
} catch (final BrokenBarrierException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
thread.start();
|
||||
|
||||
barrier.await();
|
||||
|
||||
assertBusy(() -> {
|
||||
assertTrue(tracker.pendingInSync.contains(newSyncingAllocationId));
|
||||
assertTrue(tracker.trackingLocalCheckpoints.containsKey(newSyncingAllocationId));
|
||||
});
|
||||
|
||||
tracker.updateLocalCheckpoint(newSyncingAllocationId, randomIntBetween(Math.toIntExact(minimumActiveLocalCheckpoint), 1024));
|
||||
|
||||
barrier.await();
|
||||
|
||||
assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId));
|
||||
assertFalse(tracker.trackingLocalCheckpoints.containsKey(newSyncingAllocationId));
|
||||
assertTrue(tracker.inSyncLocalCheckpoints.containsKey(newSyncingAllocationId));
|
||||
|
||||
/*
|
||||
* The new in-sync allocation ID is in the in-sync set now yet the master does not know this; the allocation ID should still be in
|
||||
* the in-sync set even if we receive a cluster state update that does not reflect this.
|
||||
*
|
||||
*/
|
||||
tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds);
|
||||
assertFalse(tracker.trackingLocalCheckpoints.containsKey(newSyncingAllocationId));
|
||||
assertTrue(tracker.inSyncLocalCheckpoints.containsKey(newSyncingAllocationId));
|
||||
}
|
||||
|
||||
|
||||
private void markAllocationIdAsInSyncQuietly(
|
||||
final GlobalCheckpointTracker tracker, final String allocationId, final long localCheckpoint) {
|
||||
try {
|
||||
tracker.markAllocationIdAsInSync(allocationId, localCheckpoint);
|
||||
} catch (final InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1275,9 +1275,10 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> {
|
||||
}) {
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
assertFalse(replica.getTranslog().syncNeeded());
|
||||
return localCheckpoint;
|
||||
}
|
||||
}, true);
|
||||
|
||||
|
@ -1331,10 +1332,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
public long indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps) {
|
||||
final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps);
|
||||
// Shard should now be active since we did recover:
|
||||
assertTrue(replica.isActive());
|
||||
return localCheckpoint;
|
||||
}
|
||||
}, false);
|
||||
|
||||
|
|
|
@ -474,20 +474,36 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
verify(search("t*"), false);
|
||||
}
|
||||
|
||||
public void testCloseApiWildcards() throws Exception {
|
||||
public void testOpenCloseApiWildcards() throws Exception {
|
||||
createIndex("foo", "foobar", "bar", "barbaz");
|
||||
ensureGreen();
|
||||
|
||||
// if there are no indices to open/close and allow_no_indices=true (default), the open/close is a no-op
|
||||
verify(client().admin().indices().prepareClose("bar*"), false);
|
||||
verify(client().admin().indices().prepareClose("bar*"), false);
|
||||
verify(client().admin().indices().prepareClose("bar*"), true);
|
||||
|
||||
verify(client().admin().indices().prepareClose("foo*"), false);
|
||||
verify(client().admin().indices().prepareClose("foo*"), true);
|
||||
verify(client().admin().indices().prepareClose("_all"), true);
|
||||
verify(client().admin().indices().prepareClose("foo*"), false);
|
||||
verify(client().admin().indices().prepareClose("_all"), false);
|
||||
|
||||
verify(client().admin().indices().prepareOpen("bar*"), false);
|
||||
verify(client().admin().indices().prepareOpen("_all"), false);
|
||||
verify(client().admin().indices().prepareOpen("_all"), true);
|
||||
verify(client().admin().indices().prepareOpen("_all"), false);
|
||||
|
||||
// if there are no indices to open/close throw an exception
|
||||
IndicesOptions openIndicesOptions = IndicesOptions.fromOptions(false, false, false, true);
|
||||
IndicesOptions closeIndicesOptions = IndicesOptions.fromOptions(false, false, true, false);
|
||||
|
||||
verify(client().admin().indices().prepareClose("bar*").setIndicesOptions(closeIndicesOptions), false);
|
||||
verify(client().admin().indices().prepareClose("bar*").setIndicesOptions(closeIndicesOptions), true);
|
||||
|
||||
verify(client().admin().indices().prepareClose("foo*").setIndicesOptions(closeIndicesOptions), false);
|
||||
verify(client().admin().indices().prepareClose("foo*").setIndicesOptions(closeIndicesOptions), true);
|
||||
verify(client().admin().indices().prepareClose("_all").setIndicesOptions(closeIndicesOptions), true);
|
||||
|
||||
verify(client().admin().indices().prepareOpen("bar*").setIndicesOptions(openIndicesOptions), false);
|
||||
verify(client().admin().indices().prepareOpen("_all").setIndicesOptions(openIndicesOptions), false);
|
||||
verify(client().admin().indices().prepareOpen("_all").setIndicesOptions(openIndicesOptions), true);
|
||||
}
|
||||
|
||||
public void testDeleteIndex() throws Exception {
|
||||
|
|
|
@ -162,7 +162,8 @@ public class ClusterStateChanges extends AbstractComponent {
|
|||
TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings);
|
||||
MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, xContentRegistry, null, null) {
|
||||
MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, xContentRegistry, null, null,
|
||||
null) {
|
||||
// metaData upgrader should do nothing
|
||||
@Override
|
||||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||
|
|
|
@ -29,6 +29,16 @@ import org.elasticsearch.index.mapper.SourceToParse;
|
|||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.index.translog.TranslogWriter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -36,7 +46,13 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
|
||||
public void testGetStartingSeqNo() throws Exception {
|
||||
IndexShard replica = newShard(false);
|
||||
RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
|
||||
final AtomicReference<Path> translogLocation = new AtomicReference<>();
|
||||
RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null) {
|
||||
@Override
|
||||
Path translogLocation() {
|
||||
return translogLocation.get();
|
||||
}
|
||||
};
|
||||
try {
|
||||
recoveryEmptyReplica(replica);
|
||||
int docs = randomIntBetween(1, 10);
|
||||
|
@ -56,22 +72,28 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
final long maxSeqNo = replica.seqNoStats().getMaxSeqNo();
|
||||
final long localCheckpoint = replica.getLocalCheckpoint();
|
||||
|
||||
translogLocation.set(replica.getTranslog().location());
|
||||
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
|
||||
replica.updateGlobalCheckpointOnReplica(maxSeqNo - 1);
|
||||
replica.getTranslog().sync();
|
||||
final Translog translog = replica.getTranslog();
|
||||
translogLocation.set(
|
||||
writeTranslog(replica.shardId(), translog.getTranslogUUID(), translog.currentFileGeneration(), maxSeqNo - 1));
|
||||
|
||||
// commit is enough, global checkpoint is below max *committed* which is NO_OPS_PERFORMED
|
||||
// commit is good, global checkpoint is at least max *committed* which is NO_OPS_PERFORMED
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L));
|
||||
|
||||
replica.flush(new FlushRequest());
|
||||
|
||||
// commit is still not good enough, global checkpoint is below max
|
||||
translogLocation.set(replica.getTranslog().location());
|
||||
|
||||
// commit is not good, global checkpoint is below max
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
|
||||
replica.updateGlobalCheckpointOnReplica(maxSeqNo);
|
||||
replica.getTranslog().sync();
|
||||
// commit is enough, global checkpoint is below max
|
||||
translogLocation.set(
|
||||
writeTranslog(replica.shardId(), translog.getTranslogUUID(), translog.currentFileGeneration(), maxSeqNo));
|
||||
|
||||
// commit is good, global checkpoint is above max
|
||||
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1));
|
||||
} finally {
|
||||
closeShards(replica);
|
||||
|
@ -79,4 +101,23 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private Path writeTranslog(
|
||||
final ShardId shardId,
|
||||
final String translogUUID,
|
||||
final long generation,
|
||||
final long globalCheckpoint
|
||||
) throws IOException {
|
||||
final Path tempDir = createTempDir();
|
||||
final Path resolve = tempDir.resolve(Translog.getFilename(generation));
|
||||
Files.createFile(tempDir.resolve(Translog.CHECKPOINT_FILE_NAME));
|
||||
try (TranslogWriter ignored = TranslogWriter.create(
|
||||
shardId,
|
||||
translogUUID,
|
||||
generation,
|
||||
resolve,
|
||||
FileChannel::open,
|
||||
TranslogConfig.DEFAULT_BUFFER_SIZE, () -> globalCheckpoint)) {}
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true)));
|
||||
}
|
||||
operations.add(null);
|
||||
int totalOperations = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() {
|
||||
RecoverySourceHandler.SendSnapshotResult result = handler.sendSnapshot(startingSeqNo, new Translog.Snapshot() {
|
||||
private int counter = 0;
|
||||
|
||||
@Override
|
||||
|
@ -194,9 +194,9 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
if (startingSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
assertThat(totalOperations, equalTo(initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers));
|
||||
assertThat(result.totalOperations, equalTo(initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers));
|
||||
} else {
|
||||
assertThat(totalOperations, equalTo(Math.toIntExact(numberOfDocsWithValidSequenceNumbers - startingSeqNo)));
|
||||
assertThat(result.totalOperations, equalTo(Math.toIntExact(numberOfDocsWithValidSequenceNumbers - startingSeqNo)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,8 +403,9 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
void phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
|
||||
long phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
|
||||
phase2Called.set(true);
|
||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
|
||||
};
|
||||
|
@ -494,8 +495,9 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
void phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
|
||||
long phase2(long startingSeqNo, Translog.Snapshot snapshot) throws IOException {
|
||||
phase2Called.set(true);
|
||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -191,41 +191,6 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
assertIndexIsOpened("test1", "test2", "test3");
|
||||
}
|
||||
|
||||
// if there are no indices to open/close throw an exception
|
||||
public void testOpenCloseWildCardsNoIndicesDefault() {
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareOpen("test").execute().actionGet());
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareClose("test").execute().actionGet());
|
||||
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareOpen("test*").execute().actionGet());
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareClose("test*").execute().actionGet());
|
||||
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareOpen("*").execute().actionGet());
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareClose("*").execute().actionGet());
|
||||
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareOpen("_all").execute().actionGet());
|
||||
expectThrows(IndexNotFoundException.class, () -> client().admin().indices().prepareClose("_all").execute().actionGet());
|
||||
}
|
||||
|
||||
// if there are no indices to open/close and allow_no_indices=true, the open/close is a no-op
|
||||
public void testOpenCloseWildCardsNoIndicesAllowNoIndices() throws InterruptedException, ExecutionException {
|
||||
IndicesOptions openIndicesOptions = IndicesOptions.fromOptions(false, true, false, true);
|
||||
IndicesOptions closeIndicesOptions = IndicesOptions.fromOptions(false, true, true, false);
|
||||
|
||||
expectThrows(IndexNotFoundException.class,
|
||||
() -> client().admin().indices().prepareOpen("test").setIndicesOptions(openIndicesOptions).execute().actionGet());
|
||||
expectThrows(IndexNotFoundException.class,
|
||||
() -> client().admin().indices().prepareClose("test").setIndicesOptions(closeIndicesOptions).execute().actionGet());
|
||||
|
||||
assertAcked(client().admin().indices().prepareOpen("test*").setIndicesOptions(openIndicesOptions).execute().get());
|
||||
assertAcked(client().admin().indices().prepareClose("test*").setIndicesOptions(closeIndicesOptions).execute().get());
|
||||
|
||||
assertAcked(client().admin().indices().prepareOpen("*").setIndicesOptions(openIndicesOptions).execute().get());
|
||||
assertAcked(client().admin().indices().prepareClose("*").setIndicesOptions(closeIndicesOptions).execute().get());
|
||||
|
||||
assertAcked(client().admin().indices().prepareOpen("_all").setIndicesOptions(openIndicesOptions).execute().get());
|
||||
assertAcked(client().admin().indices().prepareClose("_all").setIndicesOptions(closeIndicesOptions).execute().get());
|
||||
}
|
||||
|
||||
public void testCloseNoIndex() {
|
||||
Client client = client();
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () ->
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
|
@ -128,7 +128,7 @@ public class SearchCancellationIT extends ESIntegTestCase {
|
|||
assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId()));
|
||||
}
|
||||
|
||||
private SearchResponse ensureSearchWasCancelled(ListenableActionFuture<SearchResponse> searchResponse) {
|
||||
private SearchResponse ensureSearchWasCancelled(ActionFuture<SearchResponse> searchResponse) {
|
||||
try {
|
||||
SearchResponse response = searchResponse.actionGet();
|
||||
logger.info("Search response {}", response);
|
||||
|
@ -146,7 +146,7 @@ public class SearchCancellationIT extends ESIntegTestCase {
|
|||
indexTestData();
|
||||
|
||||
logger.info("Executing search");
|
||||
ListenableActionFuture<SearchResponse> searchResponse = client().prepareSearch("test").setQuery(
|
||||
ActionFuture<SearchResponse> searchResponse = client().prepareSearch("test").setQuery(
|
||||
scriptQuery(new Script(
|
||||
ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap())))
|
||||
.execute();
|
||||
|
@ -164,7 +164,7 @@ public class SearchCancellationIT extends ESIntegTestCase {
|
|||
indexTestData();
|
||||
|
||||
logger.info("Executing search");
|
||||
ListenableActionFuture<SearchResponse> searchResponse = client().prepareSearch("test")
|
||||
ActionFuture<SearchResponse> searchResponse = client().prepareSearch("test")
|
||||
.addScriptField("test_field",
|
||||
new Script(ScriptType.INLINE, "native", NativeTestScriptedBlockFactory.TEST_NATIVE_BLOCK_SCRIPT, Collections.emptyMap())
|
||||
).execute();
|
||||
|
@ -182,7 +182,7 @@ public class SearchCancellationIT extends ESIntegTestCase {
|
|||
indexTestData();
|
||||
|
||||
logger.info("Executing search");
|
||||
ListenableActionFuture<SearchResponse> searchResponse = client().prepareSearch("test")
|
||||
ActionFuture<SearchResponse> searchResponse = client().prepareSearch("test")
|
||||
.setScroll(TimeValue.timeValueSeconds(10))
|
||||
.setSize(5)
|
||||
.setQuery(
|
||||
|
@ -230,7 +230,7 @@ public class SearchCancellationIT extends ESIntegTestCase {
|
|||
|
||||
String scrollId = searchResponse.getScrollId();
|
||||
logger.info("Executing scroll with id {}", scrollId);
|
||||
ListenableActionFuture<SearchResponse> scrollResponse = client().prepareSearchScroll(searchResponse.getScrollId())
|
||||
ActionFuture<SearchResponse> scrollResponse = client().prepareSearchScroll(searchResponse.getScrollId())
|
||||
.setScroll(keepAlive).execute();
|
||||
|
||||
awaitForBlock(plugins);
|
||||
|
|
|
@ -129,7 +129,7 @@ public class SearchHitTests extends ESTestCase {
|
|||
}
|
||||
if (randomBoolean()) {
|
||||
hit.shard(new SearchShardTarget(randomAlphaOfLengthBetween(5, 10),
|
||||
new ShardId(new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), randomInt()),
|
||||
new ShardId(new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), randomInt()), null,
|
||||
OriginalIndices.NONE));
|
||||
}
|
||||
return hit;
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
import org.elasticsearch.script.ScoreAccessor;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContextRegistry;
|
||||
import org.elasticsearch.script.ScriptEngineRegistry;
|
||||
|
@ -59,6 +60,13 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript",
|
||||
Collections.emptyMap());
|
||||
|
||||
private static final Script INIT_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptScore",
|
||||
Collections.emptyMap());
|
||||
private static final Script MAP_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptScore",
|
||||
Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptScore",
|
||||
Collections.emptyMap());
|
||||
private static final Map<String, Function<Map<String, Object>, Object>> SCRIPTS = new HashMap<>();
|
||||
|
||||
|
||||
|
@ -79,6 +87,21 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
return ((List<Integer>) agg.get("collector")).stream().mapToInt(Integer::intValue).sum();
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
agg.put("collector", new ArrayList<Double>());
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("mapScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
((List<Double>) agg.get("collector")).add(((ScoreAccessor) params.get("_score")).doubleValue());
|
||||
return agg;
|
||||
});
|
||||
SCRIPTS.put("combineScriptScore", params -> {
|
||||
Map<String, Object> agg = (Map<String, Object>) params.get("_agg");
|
||||
return ((List<Double>) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum();
|
||||
});
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -144,6 +167,29 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* test that uses the score of the documents
|
||||
*/
|
||||
public void testScriptedMetricWithCombineAccessesScores() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
Integer numDocs = randomInt(100);
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
|
||||
}
|
||||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_SCORE).mapScript(MAP_SCRIPT_SCORE).combineScript(COMBINE_SCRIPT_SCORE);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
assertEquals(AGG_NAME, scriptedMetric.getName());
|
||||
assertNotNull(scriptedMetric.aggregation());
|
||||
// all documents have score of 1.0
|
||||
assertEquals((double) numDocs, scriptedMetric.aggregation());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We cannot use Mockito for mocking QueryShardContext in this case because
|
||||
* script-related methods (e.g. QueryShardContext#getLazyExecutableScript)
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.snapshots;
|
|||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntSet;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
|
@ -412,7 +412,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
|
||||
logger.info("--> execution was blocked on node [{}], aborting snapshot", blockedNode);
|
||||
|
||||
ListenableActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
|
||||
ActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
|
||||
// Make sure that abort makes some progress
|
||||
Thread.sleep(100);
|
||||
unblockNode("test-repo", blockedNode);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.snapshots;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -83,7 +83,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
String blockedNode = internalCluster().getMasterName();
|
||||
((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true);
|
||||
logger.info("--> start deletion of first snapshot");
|
||||
ListenableActionFuture<DeleteSnapshotResponse> future =
|
||||
ActionFuture<DeleteSnapshotResponse> future =
|
||||
client().admin().cluster().prepareDeleteSnapshot(repo, snapshot2).execute();
|
||||
logger.info("--> waiting for block to kick in on node [{}]", blockedNode);
|
||||
waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10));
|
||||
|
@ -129,8 +129,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
String blockedNode = internalCluster().getMasterName();
|
||||
((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true);
|
||||
logger.info("--> start deletion of snapshot");
|
||||
ListenableActionFuture<DeleteSnapshotResponse> future =
|
||||
client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).execute();
|
||||
ActionFuture<DeleteSnapshotResponse> future = client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).execute();
|
||||
logger.info("--> waiting for block to kick in on node [{}]", blockedNode);
|
||||
waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10));
|
||||
|
||||
|
@ -185,8 +184,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
String blockedNode = internalCluster().getMasterName();
|
||||
((MockRepository)internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(true);
|
||||
logger.info("--> start deletion of snapshot");
|
||||
ListenableActionFuture<DeleteSnapshotResponse> future =
|
||||
client().admin().cluster().prepareDeleteSnapshot(repo, snapshot2).execute();
|
||||
ActionFuture<DeleteSnapshotResponse> future = client().admin().cluster().prepareDeleteSnapshot(repo, snapshot2).execute();
|
||||
logger.info("--> waiting for block to kick in on node [{}]", blockedNode);
|
||||
waitForBlock(blockedNode, repo, TimeValue.timeValueSeconds(10));
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.snapshots;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
|
@ -56,7 +56,6 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
|||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -155,7 +154,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertHitCount(client.prepareSearch("test-idx-2").setSize(0).get(), 100L);
|
||||
assertHitCount(client.prepareSearch("test-idx-3").setSize(0).get(), 100L);
|
||||
|
||||
ListenableActionFuture<FlushResponse> flushResponseFuture = null;
|
||||
ActionFuture<FlushResponse> flushResponseFuture = null;
|
||||
if (randomBoolean()) {
|
||||
ArrayList<String> indicesToFlush = new ArrayList<>();
|
||||
for (int i = 1; i < 4; i++) {
|
||||
|
@ -888,7 +887,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> delete index");
|
||||
cluster().wipeIndices("test-idx");
|
||||
logger.info("--> restore index after deletion");
|
||||
ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
|
||||
ActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
|
||||
client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
|
||||
|
||||
logger.info("--> wait for the index to appear");
|
||||
|
@ -2014,7 +2013,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
||||
logger.info("--> snapshot allow partial {}", allowPartial);
|
||||
ListenableActionFuture<CreateSnapshotResponse> future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
ActionFuture<CreateSnapshotResponse> future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute();
|
||||
logger.info("--> wait for block to kick in");
|
||||
if (initBlocking) {
|
||||
|
@ -2109,7 +2108,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
blockAllDataNodes("test-repo");
|
||||
logger.info("--> execution will be blocked on all data nodes");
|
||||
|
||||
final ListenableActionFuture<RestoreSnapshotResponse> restoreFut;
|
||||
final ActionFuture<RestoreSnapshotResponse> restoreFut;
|
||||
try {
|
||||
logger.info("--> start restore");
|
||||
restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
|
||||
|
@ -2174,7 +2173,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> execution will be blocked on all data nodes");
|
||||
blockAllDataNodes(repoName);
|
||||
|
||||
final ListenableActionFuture<RestoreSnapshotResponse> restoreFut;
|
||||
final ActionFuture<RestoreSnapshotResponse> restoreFut;
|
||||
try {
|
||||
logger.info("--> start restore");
|
||||
restoreFut = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName)
|
||||
|
@ -2461,7 +2460,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
// take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned
|
||||
// block a node so the create snapshot operation can remain in progress
|
||||
final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
|
||||
ListenableActionFuture<CreateSnapshotResponse> responseListener =
|
||||
ActionFuture<CreateSnapshotResponse> responseListener =
|
||||
client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo")
|
||||
.setWaitForCompletion(false)
|
||||
.setIndices(indexName)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Build;
|
||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse
|
|||
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -53,6 +54,8 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.RemoteClusterConnection;
|
||||
import org.elasticsearch.transport.RemoteConnectionInfo;
|
||||
import org.elasticsearch.transport.RemoteTransportException;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportService;
|
|
@ -16,26 +16,14 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -44,10 +32,8 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
@ -78,7 +64,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSettingsAreRegistered() {
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTERS_SEEDS));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterAware.REMOTE_CLUSTERS_SEEDS));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CONNECTIONS_PER_CLUSTER));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_NODE_ATTRIBUTE));
|
||||
|
@ -89,12 +75,12 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1:8080")
|
||||
.put("search.remote.bar.seed", "[::1]:9090").build();
|
||||
RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings));
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings).forEach(setting -> setting.get(settings));
|
||||
|
||||
Settings brokenSettings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1").build();
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings)));
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings)));
|
||||
}
|
||||
|
||||
public void testBuiltRemoteClustersSeeds() throws Exception {
|
||||
|
@ -144,10 +130,11 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
|
||||
assertFalse(service.isRemoteClusterRegistered("foo"));
|
||||
Map<String, List<String>> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar",
|
||||
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, i -> false);
|
||||
String[] localIndices = perClusterIndices.computeIfAbsent(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY,
|
||||
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"},
|
||||
i -> false);
|
||||
String[] localIndices = perClusterIndices.computeIfAbsent(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY,
|
||||
k -> Collections.emptyList()).toArray(new String[0]);
|
||||
assertNotNull(perClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY));
|
||||
assertNotNull(perClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY));
|
||||
assertArrayEquals(new String[]{"foo:bar", "foo", "no*match:boo"}, localIndices);
|
||||
assertEquals(2, perClusterIndices.size());
|
||||
assertEquals(Arrays.asList("bar", "test", "baz", "boo"), perClusterIndices.get("cluster_1"));
|
||||
|
@ -195,97 +182,13 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.updateRemoteCluster("cluster_2", Collections.emptyList());
|
||||
assertFalse(service.isRemoteClusterRegistered("cluster_2"));
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
|
||||
() -> service.updateRemoteCluster(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList()));
|
||||
() -> service.updateRemoteCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Collections.emptyList()));
|
||||
assertEquals("remote clusters must not have the empty string as its key", iae.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testProcessRemoteShards() throws IOException {
|
||||
try (RemoteClusterService service = new RemoteClusterService(Settings.EMPTY, null)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
List<SearchShardIterator> iteratorList = new ArrayList<>();
|
||||
Map<String, ClusterSearchShardsResponse> searchShardsResponseMap = new HashMap<>();
|
||||
DiscoveryNode[] nodes = new DiscoveryNode[] {
|
||||
new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT),
|
||||
new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)
|
||||
};
|
||||
Map<String, AliasFilter> indicesAndAliases = new HashMap<>();
|
||||
indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), Strings.EMPTY_ARRAY));
|
||||
indicesAndAliases.put("bar", new AliasFilter(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY));
|
||||
ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[] {
|
||||
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("foo", 0, "node1", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("foo", 0, "node2", false, ShardRoutingState.STARTED)}),
|
||||
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 1),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("foo", 0, "node1", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("foo", 1, "node2", false, ShardRoutingState.STARTED)}),
|
||||
new ClusterSearchShardsGroup(new ShardId("bar", "bar_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("bar", 0, "node2", true, ShardRoutingState.STARTED),
|
||||
TestShardRouting.newShardRouting("bar", 0, "node1", false, ShardRoutingState.STARTED)})
|
||||
};
|
||||
searchShardsResponseMap.put("test_cluster_1", new ClusterSearchShardsResponse(groups, nodes, indicesAndAliases));
|
||||
DiscoveryNode[] nodes2 = new DiscoveryNode[] {
|
||||
new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT)
|
||||
};
|
||||
ClusterSearchShardsGroup[] groups2 = new ClusterSearchShardsGroup[] {
|
||||
new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0),
|
||||
new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)})
|
||||
};
|
||||
searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, null));
|
||||
|
||||
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
|
||||
remoteIndicesByCluster.put("test_cluster_1",
|
||||
new OriginalIndices(new String[]{"fo*", "ba*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
|
||||
remoteIndicesByCluster.put("test_cluster_2",
|
||||
new OriginalIndices(new String[]{"x*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
|
||||
Map<String, AliasFilter> remoteAliases = new HashMap<>();
|
||||
service.processRemoteShards(searchShardsResponseMap, remoteIndicesByCluster, iteratorList, remoteAliases);
|
||||
assertEquals(4, iteratorList.size());
|
||||
for (SearchShardIterator iterator : iteratorList) {
|
||||
if (iterator.shardId().getIndexName().endsWith("foo")) {
|
||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
||||
assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1);
|
||||
assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "foo");
|
||||
shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "foo");
|
||||
assertNull(iterator.nextOrNull());
|
||||
} else if (iterator.shardId().getIndexName().endsWith("bar")) {
|
||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
||||
assertEquals(0, iterator.shardId().getId());
|
||||
assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "bar");
|
||||
shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "bar");
|
||||
assertNull(iterator.nextOrNull());
|
||||
} else if (iterator.shardId().getIndexName().endsWith("xyz")) {
|
||||
assertArrayEquals(new String[]{"x*"}, iterator.getOriginalIndices().indices());
|
||||
assertEquals(0, iterator.shardId().getId());
|
||||
assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName());
|
||||
ShardRouting shardRouting = iterator.nextOrNull();
|
||||
assertNotNull(shardRouting);
|
||||
assertEquals(shardRouting.getIndexName(), "xyz");
|
||||
assertNull(iterator.nextOrNull());
|
||||
}
|
||||
}
|
||||
assertEquals(3, remoteAliases.size());
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("foo_id"));
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("bar_id"));
|
||||
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("xyz_id"));
|
||||
assertEquals(new TermsQueryBuilder("foo", "bar"), remoteAliases.get("foo_id").getQueryBuilder());
|
||||
assertEquals(new MatchAllQueryBuilder(), remoteAliases.get("bar_id").getQueryBuilder());
|
||||
assertNull(remoteAliases.get("xyz_id").getQueryBuilder());
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoteNodeAttribute() throws IOException, InterruptedException {
|
||||
final Settings settings =
|
||||
Settings.builder().put("search.remote.node.attr", "gateway").build();
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue